blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
53e4d40086d979605c7d3f61d7a9345a066b9bbd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SMITIDvisu/examples/updateTimeLine.Rd.R
|
76bc5d9cb453e0b7ed67a89de832e48d408dce47
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 421
|
r
|
updateTimeLine.Rd.R
|
library(SMITIDvisu)
### Name: updateTimeLine
### Title: updateTimeLine
### Aliases: updateTimeLine
### ** Examples
## Not run:
##D ## server.R
##D ## output server variable
##D output$timeline <- renderTimeLine({
##D timeLine(data.frame(), "")
##D })
##D ## ui.R
##D timeLineOutput("timeline")
##D ## server.R
##D timeLineProxy("timeline") %>% updateTimeLine(newtimeline, "newId")
## End(Not run)
|
702e72177e6edbe9b20f19ef5a1cc9699c810102
|
12a74036fd925b7a7087fec6d773b8f0061a6500
|
/R/create_ggtheme.R
|
37f9e90064aef0e08c582a3c94ca092522a71bb7
|
[
"MIT"
] |
permissive
|
jmhome/rPlotter
|
283a1af741995b263f9841dccaa9d6740be894a4
|
7b47b9ba0897a55a86d35760acf2edca3cc7da9d
|
refs/heads/master
| 2021-01-24T16:52:00.867998
| 2014-09-27T00:32:40
| 2014-09-27T00:32:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,477
|
r
|
create_ggtheme.R
|
#' Create customised themes for ggplot2 object
#'
#' This function creates a ggplot2 theme object based on my favourite templates.
#'
#' @param theme The name of the temaplate (blank, xkcd, more to come ...)
#'
#' @examples
#' theme_blank <- create_ggtheme("blank")
#' theme_xkcd <- create_ggtheme("xkcd")
#' @export
#' @import ggplot2 stringr
create_ggtheme <- function(theme = "blank") {
if (theme == "xkcd") {
output_theme <- theme(panel.background = element_rect(fill="white"),
axis.ticks = element_line(colour=NA),
panel.grid = element_line(colour="white"),
axis.text.y = element_text(colour=NA),
axis.text.x = element_text(colour="black"),
text = element_text(size=16, family="Humor Sans"))
} else if (theme == "blank") {
output_theme <- theme_bw()
output_theme$line <- element_blank()
output_theme$rect <- element_blank()
output_theme$strip.text <- element_blank()
output_theme$axis.text <- element_blank()
output_theme$plot.title <- element_blank()
output_theme$axis.title <- element_blank()
output_theme$plot.margin <- structure(c(0, 0, -1, -1),
unit = "lines",
valid.unit = 3L,
class = "unit")
}
## Return
return(output_theme)
}
|
9489f96ada175478c62b68261c098c6b89d66fce
|
3b3168707c67aefbd85934bae572b6f686b479e0
|
/inst/shiny-examples/simpleVis/ui.R
|
99a558edbf93a8e21fc07f76ad8a46633aa8d175
|
[] |
no_license
|
qenvio/hicvidere
|
e114869c279da21e5de178b7961432fbded5a191
|
65df8c6a2d5a9ba9d7e138da776afd3b47ab7f6e
|
refs/heads/master
| 2021-01-10T04:44:26.737390
| 2016-01-25T15:56:53
| 2016-01-25T15:56:53
| 43,735,401
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
ui.R
|
ui <- shinyUI(fluidPage(
titlePanel("HiCvideRe"),
sidebarLayout(
sidebarPanel(
fileInput('file1',
'Choose TSV File',
accept=c("text/tab-separated-values",
".tsv")),
uiOutput('selection')
),
mainPanel(
plotOutput('plot_matrix', inline = T)
)
)
))
|
94f569e79a02daac37dcc917dcac00a14bf5d879
|
79526a578700b558b4b9d269afb9adcbcd53d570
|
/cachematrix.R
|
fcbc03eb8446cc6b8f21a66d84485fcdb5bef332
|
[] |
no_license
|
MSFlk921/ProgrammingAssignment2
|
e4b301b2d9d1881a696011c212cf51f96a692794
|
67de5ca4dcf5736bf566da36d11002f183cf1f76
|
refs/heads/master
| 2021-01-18T10:40:44.092917
| 2015-04-23T22:35:52
| 2015-04-23T22:35:52
| 34,482,254
| 0
| 0
| null | 2015-04-23T21:22:46
| 2015-04-23T21:22:43
| null |
UTF-8
|
R
| false
| false
| 878
|
r
|
cachematrix.R
|
## The first function make the Matrix and the second cache the value of inverse
## For example, you can make the Matrix
## m <- makeCacheMatrix(matrix(1:4,2,2))
## and then get the catch of the inverse
## cacheSolve(m)
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
##
## This functon create the Matrix and you can set the value of it
makeCacheMatrix <- function(x = numeric()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set,
get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## This function is the inverse of the Matrix
cacheSolve <- function(x=matrix(), ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
|
411b0142c34b8f468cb25fc039f7f117cc4d4d2d
|
bb16a53ad0ffafa4f8be4d98081b5513b004704e
|
/intro to R class 2.R
|
236370d7e8f16535b348c461cf03e6e02eeecdcc
|
[] |
no_license
|
mosesaron/Intro-to-data-Analysis-using-R
|
00359fc07a6a6f42df639ef209d67fc6f5165659
|
4e48c3605950aa208d05674ecb9a8b225aa0d918
|
refs/heads/master
| 2022-11-23T00:39:12.004362
| 2020-07-30T12:05:17
| 2020-07-30T12:05:17
| 283,734,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,758
|
r
|
intro to R class 2.R
|
# We need to set our working directory
setwd("~/Desktop/Intro to R")
# remove everything in my work space
rm(list =ls())
# import the data but we have to install the necessary package
#install.packages("readxl")
library(readxl)
#install.packages("haven")
library(haven)
#install.packages("tidyverse")
library(tidyverse)
#install.packages("kableExtra")
library(kableExtra)
food <- read_excel("food.xlsx")
wash <- read_sav("wash.sav")
table(wash$how_often_treat_water)
# to view
#View(food)
#View(wash)
# descriptive statitics
# to know the variable names
colnames(food)
# get the summary statistics
summary(food$`Income in 2012`)
colnames(wash)
summary(wash$age_respondent)
# get the frequency table
table(wash$edu_level)
# get the percentage and round to 1 decimal place
round (prop.table (table(wash$edu_level))*100, 1)
# how to tell the data type
class(food$`District Name`)
class(food$`Income in 2012`)
#The best format to use in R is a .csv file
#
write_csv(wash, "wash.csv")
Madzi <- read_csv("wash.csv")
WASH <- select(Madzi,household_id,gender_repondent,age_respondent,edu_level,household_head,total_household_members)
WASH1 <- rename(WASH,
id = household_id,
sex = gender_repondent,
age = age_respondent,
edu = edu_level,
hh = household_head,
hh_members = total_household_members)
summary(WASH1$age)
WASH2 <- filter(WASH1, age >= 15 & age <=60)
summary(WASH2$age)
table(WASH2$sex)
WASH2$sex[WASH2$sex == 0] <- "Male"
WASH2$sex[WASH2$sex == 1] <- "Female"
table(WASH2$sex)
WASH2$edu[WASH2$edu == 0] <- "None"
WASH2$edu[WASH2$edu == 1] <- "Primary"
WASH2$edu[WASH2$edu == 2] <- "Secondary"
WASH2$edu[WASH2$edu == 3] <- "Tertiary"
table(WASH2$edu)
table(WASH2$hh)
WASH2$hh[WASH2$hh == 0] <- "No"
WASH2$hh[WASH2$hh == 1] <- "Yes"
WASH2$hh[WASH2$hh == 2] <- "Yes"
table(WASH2$hh)
WASH3 <- mutate(WASH2,
age_cat = ifelse(age %in% 15:30, "15-30",
ifelse(age %in% 31:45, "31-45",
"46+")))
age_cat <- ifelse(WASH2$age >=15 & WASH2$age <=30, "15-30",
ifelse(WASH2$age >=31 & WASH2$age <=45, "31-45",
ifelse(WASH2$age >=46, "46+", NA)))
WASH2$age_cat2 <- age_cat
table(WASH3$age_cat)
table(WASH3$edu, WASH3$sex)
ggplot(data = WASH3, aes(fill =sex, x=edu))+geom_bar(position = "dodge") +ggtitle("Distribution of respondent level of Education by sex") +xlab("Highest level of Education") + geom_text(aes(label=..count..),stat = "count", position = position_dodge(0.9), vjust =-0.2)
kkk<-WASH3 %>%
group_by(sex) %>%
summarise(n = n(),
mean = mean (age,),
min = min (age))
kkk %>% kable() %>% kable_styling()
|
aca935f4a2a67e463f1ee87ca1fcd369fc1fc331
|
99144fe0beb697c124e5271a1d395ab6477d405a
|
/man/conditionalize.data.frame.Rd
|
11529da1b7f165e4065597667bdb14f2071657b8
|
[] |
no_license
|
cran/yamlet
|
233e29fc38d75205d4cc04db5a81af49dc05a5d5
|
3f494a19ab2e1cdb426606af40304309c78603ca
|
refs/heads/master
| 2023-09-04T00:52:18.417901
| 2023-08-24T05:00:02
| 2023-08-24T06:31:30
| 236,960,454
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,951
|
rd
|
conditionalize.data.frame.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conditionalize.R
\name{conditionalize.data.frame}
\alias{conditionalize.data.frame}
\title{Conditionalize Attributes of Data Frame}
\usage{
\method{conditionalize}{data.frame}(x, column, attribute, test, value, ...)
}
\arguments{
\item{x}{data.frame}
\item{column}{unquoted name of column to conditionalize}
\item{attribute}{unquoted name of attribute to create for column}
\item{test}{unquoted name of column to test}
\item{value}{unquoted name of column supplying attribute value}
\item{...}{ignored arguments}
}
\value{
class 'decorated' 'data.frame'
}
\description{
Conditionalizes attributes of data.frame.
Creates a conditional \code{attribute} definition
for \code{column} by mapping \code{value} to
\code{test}. Only considers records where
both \code{test} and \code{value} are defined,
and gives an error if there is not one-to-one mapping.
Can be used with write methods as an alternative
to hand-coding conditional metadata.
}
\details{
If the test column is character, individual
elements should not contain both single and
double quotes. For the conditional expressions,
these values will be single-quoted by default,
or double-quoted if they contain single quotes.
}
\examples{
library(magrittr)
library(dplyr)
library(csv)
file <- system.file(package = 'yamlet', 'extdata','phenobarb.csv')
x <- as.csv(file)
head(x,3)
# suppose we have an event label stored as a column:
x \%<>\% mutate(evid = ifelse(
event == 'dose',
'dose of drug administered',
'serum phenobarbital concentration'
)
)
# We can define a conditional label for 'value'
# by mapping evid to event:
x \%<>\% conditionalize(value, label, event, evid)
x \%>\% as_yamlet
x \%>\% write_yamlet
}
\seealso{
Other conditionalize:
\code{\link{conditionalize}()}
}
\concept{conditionalize}
\keyword{internal}
|
d2ce771cb6835599598abf3e36052d8d02145c16
|
b1da3e6d4a851428d48314e848e7d865ecbff895
|
/man/Oyster_plot_cross_section.Rd
|
5e6df3257f28e7bd70e4bcbfeeb8cc60e79b8448
|
[] |
no_license
|
cran/shelltrace
|
ee2476844d44cce51e1c9738c1ae9f156bf6087c
|
c7156d5f8d7583bc29192fdd9541f3bd727f95a5
|
refs/heads/master
| 2021-07-07T15:30:42.634309
| 2017-10-06T10:35:20
| 2017-10-06T10:35:20
| 105,997,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,543
|
rd
|
Oyster_plot_cross_section.Rd
|
\name{Oyster_plot_cross_section}
\alias{Oyster_plot_cross_section}
\title{Plot the converted shell cross section}
\description{Simple function that returns a plot of the shell cross section
after it has been converted to a common X-axis.
de Winter, N. J. (2017) <doi:10.5194/gmd-2017-137>}
\usage{
Oyster_plot_cross_section(cross_section)
}
\arguments{
\item{cross_section}{Digitized cross section of the shell with shell top,
bottom and growth increments relative to a common X-axis}
}
\details{Plotting of digitized cross section after first modelling step to
verify the correct digitization of the shell increments}
\value{Opens a new plotting window to plot the shell cross section based on
its X- and Y-coordinates
}
\references{de Winter, N. J.: ShellTrace v1.0 ? A new approach for modelling
growth and trace element uptake in marine bivalve shells: Model
verification on pacific oyster shells (Crassostrea gigas), Geosci.
Model Dev. Discuss., https://doi.org/10.5194/gmd-2017-137, in review,
2017.}
\author{Niels J. de Winter}
\note{Please cite Geoscientific Model Development paper dealing with the
ShellTrace model}
\source{
\href{https://github.com/nielsjdewinter/ShellTrace}{GitHub} \cr
\href{https://doi.org/10.5194/gmd-2017-137}{Manuscript} \cr
\href{https://doi.org/10.5194/gmd-2017-137-supplement}{Supplementary data} \cr
\href{http://nidewint.wixsite.com/nielsdewinter}{Author website}
}
\examples{
Oyster_plot_cross_section(cross_section)
}
|
2dbc8848b9af526392e3ebdedede667fa980c491
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/galgo/examples/bestFitness.Galgo.Rd.R
|
f987d1fb6a631adb34bcdd33a868d85b8b8dbe2a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 734
|
r
|
bestFitness.Galgo.Rd.R
|
library(galgo)
### Name: bestFitness.Galgo
### Title: Returns the fitness of the best chromosome
### Aliases: bestFitness.Galgo Galgo.bestFitness bestFitness.Galgo
### bestFitness,Galgo-method bestFitness
### Keywords: methods internal methods
### ** Examples
wo <- World(niches=newRandomCollection(Niche(chromosomes=newRandomCollection(
Chromosome(genes=newCollection(Gene(shape1=1, shape2=100),5)), 10),2),2))
ga <- Galgo(populations=newRandomCollection(wo,1), goalFitness = 0.75,
callBackFunc=plot,
fitnessFunc=function(chr, parent) 5/sd(as.numeric(chr)))
evolve(ga)
best(ga)
max(ga) # the Maximum chromosome may be different to the best
bestFitness(ga)
maxFitness(ga)
|
204282a6019d7a552758ad7ca5017b43c13e6630
|
7aee837d5d59a606d8a8eebc7567982bab96300c
|
/plot1.R
|
d3023ca81459eb00e987080c7a0202fd2c3ec2b5
|
[] |
no_license
|
jorditejedor/ExDA_4
|
4e07b74a28094025842394df112613f7e6cc04a8
|
b3a2bcc3e4bde411dfa62e31a24b33c77cfe2ea4
|
refs/heads/master
| 2023-08-14T17:12:35.427075
| 2021-10-21T17:30:46
| 2021-10-21T17:30:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 589
|
r
|
plot1.R
|
source("~/R/ExDA_4/DownloadData.R")
if(!("pm25" %in% ls())) {
pm25 <- get_pm25()
}
# Question 1
# Have total emissions from PM2.5 decreased in the United States from 1999
# to 2008?
# Using the base plotting system, make a plot showing the total PM2.5 emission
# from all sources for each of the years 1999, 2002, 2005, and 2008.
totalem <-with(pm25,tapply(Emissions,year,sum,na.rm=TRUE))
png("plot1.png")
plot(names(totalem),totalem,type="o",xlab="Year",ylab="Emissions",xaxt="n")
axis(1,at=seq(1999,2008,by=3))
title(main="Total Emissions per Year in US")
dev.off()
rm(totalem)
|
c80cba9c1bf9ccdd752e95cfdb552d8ab36156bd
|
59c608e9f0ea550529a90f52356a3a7bb3a63df9
|
/code/GTFS files - Combining all databases.R
|
956296f2c66089cbcc2bbff7c5a0b27838056373
|
[] |
no_license
|
cgettings/NYC-Subway-Data
|
bd7d29bdf6a6549676119460e8d5fac82e36f4f0
|
a210d7224f6152ebf5fe53dba69dd910883bff66
|
refs/heads/master
| 2021-04-26T22:53:15.928032
| 2018-03-05T09:10:18
| 2018-03-05T09:10:18
| 123,891,295
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,298
|
r
|
GTFS files - Combining all databases.R
|
############################################################################-
############################################################################-
##
## NYC Subway GTFS Real Time Archive - Combining all monthly databases ----
##
############################################################################-
############################################################################-
#=========================#
#### Loading packages ####
#=========================#
library(magrittr)
library(dplyr)
library(purrr)
library(lubridate)
library(stringr)
library(iterators)
library(readr)
library(DBI)
library(dbplyr)
library(glue)
# setwd("D:/Files/BASP/R analyses/NYC/MTA/Subway/Subway Time/GTFS Files/2016-2017")
#=========================#
#### Setting up ####
#=========================#
subway_gtfs_2017_05 <- dbConnect(RSQLite::SQLite(), "./2016-2017/subway_gtfs_2017_05_db.sqlite3")
trip_updates_template <-
subway_gtfs_2017_05 %>%
tbl("trip_updates") %>%
head(0) %>%
collect()
vehicle_position_template <-
subway_gtfs_2017_05 %>%
tbl("vehicle_position") %>%
head(0) %>%
collect()
dbDisconnect(subway_gtfs_2017_05)
#==========================#
#### Folder structures ####
#==========================#
# base <- "D:/Files/BASP/R analyses/NYC/MTA/Subway/Subway Time/GTFS Files/2016-2017"
base <- "./2016-2017/"
file_names <-
list.files(
base,
full.names = FALSE,
recursive = FALSE
) %>%
str_subset(".sqlite3")
#==========================#
#### Looping the loop ####
#==========================#
subway_gtfs_2016_2017 <- dbConnect(RSQLite::SQLite(), "./2016-2017/subway_gtfs_2016_2017_db.sqlite3")
for (i in 1:length(file_names)) {
# for (i in 1:2) {
i_trip_updates_res <- icount()
subway_gtfs_res_db <- dbConnect(RSQLite::SQLite(), glue("./2016-2017/{file_names[i]}"))
trip_updates_res <-
dbSendQuery(
conn = subway_gtfs_res_db,
statement = "SELECT * FROM trip_updates")
while (!dbHasCompleted(trip_updates_res)) {
cat("trip_updates_res: ", nextElem(i_trip_updates_res), ", ", sep = "")
trip_updates_chunk <- dbFetch(trip_updates_res, n = 100000)
dbWriteTable(
subway_gtfs_2016_2017,
"trip_updates",
value = trip_updates_chunk,
append = TRUE,
temporary = FALSE
)
}
cat("\n")
cat("\n")
i_vehicle_position_res <- icount()
vehicle_position_res <-
dbSendQuery(
conn = subway_gtfs_res_db,
statement = "SELECT * FROM vehicle_position")
cat("\n-------------------------------\n")
while (!dbHasCompleted(vehicle_position_res)) {
cat("vehicle_position_res: ", nextElem(i_trip_updates_res), ", ", sep = "")
vehicle_position_chunk <- dbFetch(vehicle_position_res, n = 100000)
dbWriteTable(
subway_gtfs_2016_2017,
"vehicle_position",
value = vehicle_position_chunk,
append = TRUE,
temporary = FALSE
)
}
cat("\n-------------------------------\n")
cat("\n")
# rm(trip_updates_chunk)
# rm(vehicle_position_chunk)
gc()
dbDisconnect(subway_gtfs_res_db)
}
#==========================#
#### Creating indexes
#==========================#
db_create_index(subway_gtfs_2016_2017, "trip_updates", "trip_id")
db_create_index(subway_gtfs_2016_2017, "trip_updates", "route_id")
db_create_index(subway_gtfs_2016_2017, "trip_updates", "stop_sequence")
db_create_index(subway_gtfs_2016_2017, "trip_updates", "stop_id")
db_create_index(subway_gtfs_2016_2017, "vehicle_position", "trip_id")
db_create_index(subway_gtfs_2016_2017, "vehicle_position", "route_id")
db_create_index(subway_gtfs_2016_2017, "vehicle_position", "vehicle_id")
###########################################################################################
###########################################################################################
n <- 10000000
# arrival_delay <-
# tbl(subway_gtfs_2016_2017, "trip_updates") %>% head(n) %>% pull(arrival_delay)
#
# start_date <-
# tbl(subway_gtfs_2016_2017, "trip_updates") %>% head(n) %>% pull(start_date)
random_1 <- runif(n, 0, 9) %>% round() #%>% as.integer()
random_2 <- runif(n, 1000000, 9999999) %>% round() #%>% as.integer()
###########################################################################################
###########################################################################################
gdata::object.size(random_1)
gdata::object.size(random_2)
###########################################################################################
###########################################################################################
rows <- integer(0L)
trip_updates_res <-
dbSendQuery(
conn = subway_gtfs_2016_2017,
statement = "SELECT arrival_delay FROM trip_updates")
i_trip_updates_res <- icount()
while (!dbHasCompleted(trip_updates_res)) {
cat(nextElem(i_trip_updates_res), ", ", sep = "")
trip_updates_chunk <- dbFetch(trip_updates_res, n = 1e+6)
rows <- append(rows, nrow(trip_updates_chunk))
gc()
}
# write_lines(nrow(trip_updates_chunk), "rows.csv", append = TRUE)
dbClearResult(trip_updates_res)
###########################################################################################
###########################################################################################
n <- 1e6
trip_updates_1e6 <-
tbl(subway_gtfs_2016_2017, "trip_updates") %>%
head(n) %>%
collect()
trip_updates_1e6_2 <- distinct(trip_updates_1e6)
###########################################################################################
###########################################################################################
subway_gtfs_2016_2017 <-
dbConnect(RSQLite::SQLite(), "./2016-2017/subway_gtfs_2016_2017_db.sqlite3")
subway_gtfs_2016_2017_distinct <-
dbConnect(RSQLite::SQLite(), "./2016-2017/subway_gtfs_2016_2017_distinct_db.sqlite3")
###########################################################################################
###########################################################################################
trip_updates_res <-
dbSendQuery(
conn = subway_gtfs_2016_2017,
statement = "SELECT * FROM trip_updates")
rows <- integer(0L)
rows_distinct <- integer(0L)
i_trip_updates_res <- icount()
###----###----###----###----###----###----###----###----###----###----###
while (!dbHasCompleted(trip_updates_res)) {
cat(nextElem(i_trip_updates_res), ", ", sep = "")
###----###----###----###----###----###----###----###----###----###----###
trip_updates_chunk <- dbFetch(trip_updates_res, n = 1e6)
trip_updates_chunk_distinct <- distinct(trip_updates_chunk)
rows <- append(rows, nrow(trip_updates_chunk))
rows_distinct <- append(rows_distinct, nrow(trip_updates_chunk_distinct))
###----###----###----###----###----###----###----###----###----###----###
dbWriteTable(
subway_gtfs_2016_2017_distinct,
"trip_updates",
value = trip_updates_chunk_distinct,
append = TRUE,
temporary = FALSE
)
gc()
}
write_csv(data_frame(rows = rows, rows_distinct = rows_distinct), "rows.csv", append = FALSE)
###########################################################################################
###########################################################################################
dbClearResult(trip_updates_res)
dbDisconnect(subway_gtfs_2016_2017)
dbDisconnect(subway_gtfs_2016_2017_distinct)
###########################################################################################
###########################################################################################
subway_gtfs_2016_2017_distinct <-
dbConnect(RSQLite::SQLite(), "./2016-2017/subway_gtfs_2016_2017_distinct_db.sqlite3")
db_create_index(subway_gtfs_2016_2017_distinct, "trip_updates", "trip_id")
db_create_index(subway_gtfs_2016_2017_distinct, "trip_updates", "route_id")
db_create_index(subway_gtfs_2016_2017_distinct, "trip_updates", "stop_sequence")
db_create_index(subway_gtfs_2016_2017_distinct, "trip_updates", "stop_id")
###########################################################################################
###########################################################################################
subway_gtfs_2016_2017_distinct <-
dbConnect(RSQLite::SQLite(), "./2016-2017/subway_gtfs_2016_2017_distinct_db.sqlite3")
###########################################################################################
###########################################################################################
vehicle_position_res <-
dbSendQuery(
conn = subway_gtfs_2016_2017,
statement = "SELECT * FROM vehicle_position")
rows <- integer(0L)
rows_distinct <- integer(0L)
i_vehicle_position_res <- icount()
###----###----###----###----###----###----###----###----###----###----###
while (!dbHasCompleted(vehicle_position_res)) {
cat(nextElem(i_vehicle_position_res), ", ", sep = "")
###----###----###----###----###----###----###----###----###----###----###
vehicle_position_chunk <- dbFetch(vehicle_position_res, n = 1e6)
vehicle_position_chunk_distinct <- distinct(vehicle_position_chunk)
rows <- append(rows, nrow(vehicle_position_chunk))
rows_distinct <- append(rows_distinct, nrow(vehicle_position_chunk_distinct))
###----###----###----###----###----###----###----###----###----###----###
dbWriteTable(
subway_gtfs_2016_2017_distinct,
"vehicle_position",
value = vehicle_position_chunk_distinct,
append = TRUE,
temporary = FALSE
)
gc()
}
write_csv(data_frame(rows = rows, rows_distinct = rows_distinct), "rows2.csv", append = FALSE)
###########################################################################################
###########################################################################################
dbClearResult(vehicle_position_res)
dbDisconnect(subway_gtfs_2016_2017_distinct)
###########################################################################################
###########################################################################################
subway_gtfs_2016_2017_distinct <-
dbConnect(RSQLite::SQLite(), "./2016-2017/subway_gtfs_2016_2017_distinct_db.sqlite3")
db_create_index(subway_gtfs_2016_2017, "vehicle_position", "trip_id")
db_create_index(subway_gtfs_2016_2017, "vehicle_position", "route_id")
db_create_index(subway_gtfs_2016_2017, "vehicle_position", "vehicle_id")
###########################################################################################
###########################################################################################
tbl(subway_gtfs_2016_2017_distinct, "trip_updates") %>%
pull(arrival_time) %>%
length()
|
8b41dadcdf4d36b0f055761ada9d6237299566af
|
54149d7b01a2aa6f5715d8425a7137747b182f3e
|
/getBrazil.R
|
acf501df2c51a0f37c8da3aa8805261a3d41afe4
|
[
"MIT"
] |
permissive
|
benflips/nCovForecast
|
9c70d1499a156a6a8304e60ee17caface97b282c
|
35988f40bf7772869ec6e5b9a16f216e66f34e78
|
refs/heads/master
| 2022-04-30T16:56:07.367007
| 2022-03-24T00:48:47
| 2022-03-24T00:48:47
| 246,886,253
| 51
| 43
|
MIT
| 2022-03-24T00:48:48
| 2020-03-12T16:52:44
|
R
|
UTF-8
|
R
| false
| false
| 76
|
r
|
getBrazil.R
|
source('getDataGeneral.R')
getDataCovid19datahubWithoutRecovered('Brazil')
|
0c0f4bd0089d044390949f41e5c73504352ee9d1
|
701f69c6fe2f51801d1ebd12c4ae82d7b077989e
|
/ncbi_functions.R
|
b626c54656ec89887707205e538b99b876f09c0f
|
[] |
no_license
|
lmjakt/ncbi_R
|
ddac4b43be3558e7ed4ef32550dbcd8e9de3678c
|
675819076e15b84f3ad0217a250197020a2a853f
|
refs/heads/main
| 2023-07-02T22:23:20.623602
| 2021-08-06T08:07:36
| 2021-08-06T08:07:36
| 393,291,327
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,568
|
r
|
ncbi_functions.R
|
## in R we can simply use
## readLines to read in a web page. We can presumably also use
## open(), readLine to process bigger web-pages. But here we simply get the simplest
## ones.
## Seems wrong to define variables here; instead we will make a function
## that returns a list of base urls and other useful things.
urlInfo <- function(){
list(base="https://eutils.ncbi.nlm.nih.gov/entrez/eutils/",
search_suffix = "esearch.fcgi?",
summary_suffix = "esummary.fcgi?",
data_suffix = "efetch.fcgi?")
}
## other functions to take this list as a an argument
## terms is a character vector that will be combined
## into a single string
search.ncbi <- function(url, db="pubmed", terms, type="id", max=0){
query=paste(url$base, url$search_suffix, "db=", db, "&", sep="")
query=paste( query, "term=", paste(terms, collapse="+"), "&rettype=", type, sep="" )
if(max && max > 0)
query = paste(query, "&retmax=", max, sep="")
readLines(query)
}
search.ncbi.py <- function(url, years, db="pubmed", terms, type="id", max=0){
terms.list <- paste( paste(terms, collapse="+"),
paste(years, "[pdat]", sep=""), sep="+AND+" )
lapply(terms.list, function(x){
search.ncbi(url, db=db, terms=x, type=type, max=max )
})
}
extract.ids <- function(lines){
gsub("[^0-9]", "", grep("<Id>([0-9]+)</Id>$", lines, value=TRUE))
}
extract.count <- function(lines){
as.numeric( sub(".+?<Count>([0-9]+)</Count>.+", "\\1", grep("<Count>[0-9]+</Count>", tmp[[1]], value=TRUE))[1] )
}
|
c65dfaa8502e5e1143dfdfc24cac3d369b95cbe9
|
c20629bc224ad88e47849943e99fe8bc6ccb1f17
|
/2018-10-05_rms-package-test-drive.R
|
b1b2acd26ca6a8aef9ca0ade7dea730d25d585a0
|
[] |
no_license
|
nayefahmad/R-vocab-and-experiments
|
71a99e4d3ff0414d1306a5c7cabfd79b49df17f9
|
4384d3d473b0a9d28d86c3d36b0b06a1f91b862e
|
refs/heads/master
| 2022-12-13T01:12:11.134952
| 2020-08-24T04:59:52
| 2020-08-24T04:59:52
| 103,333,690
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 610
|
r
|
2018-10-05_rms-package-test-drive.R
|
#*************************************************************
# RMS PACKAGE TEST DRIVE
#*************************************************************
library(rms) # regression modelling strategies
## Not run:
x1 <- runif(100); x2 <- runif(100); y <- sample(0:1, 100, TRUE)
f <- lrm(y ~ x1 + x2, x=TRUE, y=TRUE)
seed <- .Random.seed
b <- bootcov(f)
# Get estimated log odds at x1=.4, x2=.6
X <- cbind(c(1,1), x1=c(.4,2), x2=c(.6,3))
est <- X
ests <- t(X)
bootBCa(est, ests, n=100, seed=seed)
bootBCa(est, ests, type='bca', n=100, seed=seed)
bootBCa(est, ests, type='basic', n=100, seed=seed)
|
9f362389f5a094b5df4604b4c6494ee7315c7812
|
97f4a0ad6a483715336ecb53fc31abedec149b00
|
/5_Visualization_ggplot2/7_statistical_transformations.R
|
b4b8bd027e4d0636a9635f5cae7c5d9c3fd89d98
|
[] |
no_license
|
cosimino81/R-biginner-tutorial
|
3c2d28f89f5812fdde80c0933df02d24265c43af
|
5763ff4d3d5e398885fe19d99d60bec3d2e4ab0b
|
refs/heads/master
| 2020-03-28T06:18:49.105979
| 2018-09-07T13:52:28
| 2018-09-07T13:52:28
| 147,826,956
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,026
|
r
|
7_statistical_transformations.R
|
# Load the data
getwd()
setwd("/home/CURIACOSI1/Cosimo_Projects/Cosimo_RTesting/data")
df <- read.csv('Movie-Ratings.csv')
head(df)
# Chenge the columns name
colnames(df) <- c("Film", "Genre", "CriticRating", "AudienceRating", "BudgetMillions", "Year")
head(df)
# Look at the structure
# In a df the "Factor" are categorical variable whom are assigned with numbers
str(df)
# Summary of the df
summary(df)
# As we can see, the "year" variable is not considerated as a "factor", so we will
# convert this variable to factor
df$Year <- factor(df$Year)
str(df)
# ------------------------ Aesthetics --------------------
# Aesthetics: how the data are mapped in order to be visualized
# Load the packege
library(ggplot2)
# 1st step aesthetic
ggplot(data = df, aes(x = CriticRating, y = AudienceRating))
# 2nd geom
ggplot(data = df, aes(x = CriticRating, y = AudienceRating)) +
geom_point()
# 3th add colors
ggplot(data = df, aes(x = CriticRating, y = AudienceRating, colour= Genre)) +
geom_point()
# 4th add size
ggplot(data = df, aes(x = CriticRating, y = AudienceRating, colour= Genre, size=BudgetMillions,
alpha= 0.5)) +
geom_point()
# --------------------- Plotting with Layer ------------
# Using layers we combine objects sequentially
p <- ggplot(data = df, aes(x = CriticRating, y = AudienceRating, colour= Genre, alpha= 0.5))
# second layer
p + geom_point()
# third layer
p + geom_point() + geom_line()
# ------------------- Overriding Aesthetics ---------------
q <- ggplot(data = df, aes(x= CriticRating, y=AudienceRating, color= Genre))
q + geom_point()
# overriding aes
q + geom_point(aes(size= BudgetMillions, alpha = 0.5))
# overriding the axis
q + geom_point(aes(x= BudgetMillions)) + xlab("Budget Millions $$$")
# more example
q + geom_point() + geom_line()
# line size
q + geom_point() + geom_line(size = 1)
# ---------------- Mapping vs Setting
r <- ggplot(data = df, aes(x= CriticRating, y=AudienceRating))
r + geom_point()
# Add color
# If we want to "map" a color we use the function aes()
# If we want to "set" a color we don't use it
# 1. By mapping (what we have done so far)
r + geom_point(aes(color=Genre))
# 2. By setting
r + geom_point(color="red")
# 1. mapping the size
r + geom_point(aes(size= BudgetMillions))
# 2. setting the size
r + geom_point(size = 5)
# 1. mapping the transparency and color
r + geom_point(aes(alpha = 0.5, color= Genre))
# 2. setting the transparency and color
r + geom_point(alpha = 0.5, color= "red")
# ------------------------- Histogram and Density Charts --------------------
s <- ggplot(data = df, aes(x = BudgetMillions))
s + geom_histogram(binwidth = 10)
# adding the color by filling the bars
s + geom_histogram(binwidth = 10, aes(fill= Genre))
# adding the color on the borders
s + geom_histogram(binwidth = 10, aes(fill= Genre), color= "Black")
# ------------------------- Density Chart -------------------------------
s + geom_density(aes(fill = Genre), alpha =0.6, position = "stack")
# ------------------- Starting Layer ---------------------
t <- ggplot(data = df, aes(x = AudienceRating ))
t + geom_histogram(binwidth = 10, fill= "White", color= "Blue")
# Another way: override the aesthetic
t <- ggplot(data = df)
t + geom_histogram(binwidth = 10, aes(x = AudienceRating ), fill= "White", color= "Blue")
# Critich rating
t <- ggplot(data = df)
t + geom_histogram(binwidth = 10, aes(x = CriticRating ), fill= "White", color= "Blue")
# -------------------------- Statistical transformation ----------------
?geom_smooth()
u <- ggplot(data = df, aes(x = CriticRating, y = AudienceRating, color= Genre))
u + geom_point(size=1) + geom_smooth(fill = NA, size = 0.5)
# boxplot
u <- ggplot(data = df, aes(x = Genre, y = AudienceRating, color = Genre))
u + geom_jitter(size=0.5) + geom_boxplot(alpha=0.5)
# my box plot
s <- ggplot(data = df, aes(x = Year, y= BudgetMillions, color= Year))
s + geom_jitter(size= 0.5) + geom_boxplot(size = 0.5, alpha = 0.5)
|
4a7ea34411eb2478318544c823fe05c9dc8036dc
|
34226aea0b34f7ae2132165b5496f358be18ac1b
|
/R/equation_Comparison.R
|
b7920324be3eb5224ad64e9bb5ab41735f3320d1
|
[] |
no_license
|
gahoo/Codes
|
fa343bd30d76aca2c6ec82033495cb267f4f00fd
|
c150642c01e5b5224e2db856a096a631c93bf4f5
|
refs/heads/master
| 2021-01-01T18:54:07.598936
| 2012-02-17T08:43:08
| 2012-02-17T08:43:08
| 1,549,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,781
|
r
|
equation_Comparison.R
|
Hypergeometric<-function(x,m,k,n){
choose(k,x)*choose(m-k,n-x)/choose(m,n)
}
HypergeometricCDF<-function(x,M,K,N){
sum=0
for(i in 1:x){
sum<-sum+hype(i,M,K,N)
}
sum
}
#######################################
# New Version
# More Concise
Hypergeometric<-function(i,m1,m2,N){
choose(m1,i)*choose(N-m1,m2-i)/choose(N,m2)
}
HypergeometricCDF<-function(pars=NULL,x,m1,m2,N){
if(!is.null(pars)){
x=pars[1]
m1=pars[2]
m2=pars[3]
N=pars[4]
}
sum(sapply(x:min(m1,m2),Hypergeometric,m1,m2,N))
}
#######################################
# Coidx
cooperationindex<-function(pars=NULL,a,b,c,d){
if(!is.null(pars)){
a=pars[1]
b=pars[2]
c=pars[3]
d=pars[4]
}
m=a+b
n=c+d
r=a+c
s=b+d
(a*d-b*c)/sqrt(m*n*r*s)
}
#######################################
# Randomize Matrix
# !Needs modify, otherwise NaN
runint<-function(maximum){
as.integer(runif(1,0,maximum))
}
randMatrix<-function(num,maximum){
a=as.integer(runif(num,0,maximum))
m=maximum-a
b=sapply(m,runint)
m=m-b
c=sapply(m,runint)
d=m-c
cbind(a,b,c,d)
}
##################
time_test<-function(num,maximum){
mm=randMatrix(num,maximum)
co_time<-system.time(coidx<-apply(mm,1,cooperationindex))[[1]]
hy_time<-system.time(hypecdf<-apply(mm,1,HypergeometricCDF))[[1]]
c(co_time,hy_time)
}
################
# Test
cooperationindex(a=303,b=1012,c=787,d=3174)
Hypergeometric(i=20,m1=60,m2=80,N=800)
HypergeometricCDF(x=02,m1=60,m2=80,N=800)
mm=randMatrix(100,1000)
apply(mm,1,sum)
system.time(coidx<-apply(mm,1,cooperationindex))
system.time(hypecdf<-apply(mm,1,HypergeometricCDF))
cbind(mm,coidx,hypecdf)
tobetest<-c(1:20)*500
result<-sapply(tobetest,time_test,1000);result
plot(tobetest,result[1,])
plot(tobetest,result[2,])
plot(result[1,],result[2,])
|
76ed1dc57ef2f6552f08dcb7dc4b6e841cce41df
|
bad538073a6ed5bd9f8f0653db02fdd433137380
|
/tests/test_pkgzip_create_from_bioc.R
|
b436d8d7d10304b6d8262c8b3978ed0b21f7a604
|
[
"Apache-2.0"
] |
permissive
|
WLOGSolutions/RSuite
|
5372cfc031b518e38c765b574a99944324a46275
|
da56a5d1f4a835c9b84b6cebc1df6046044edebe
|
refs/heads/master
| 2021-06-08T14:07:27.322986
| 2021-05-03T14:09:11
| 2021-05-03T14:09:11
| 104,214,650
| 157
| 11
|
Apache-2.0
| 2021-05-03T08:27:54
| 2017-09-20T12:48:44
|
R
|
UTF-8
|
R
| false
| false
| 995
|
r
|
test_pkgzip_create_from_bioc.R
|
#----------------------------------------------------------------------------
# RSuite
# Copyright (c) 2017, WLOG Solutions
#----------------------------------------------------------------------------
context("Testing if creation of PKGZIP from Bioconductor works properly [test_pkgzip_create_from_bioc]")
library(RSuite)
library(testthat)
source("R/test_utils.R")
source("R/project_management.R")
source("R/pkgzip_management.R")
test_that_managed("Create PKGZIP out of sources on Bioc (basic)", {
rver_40plus <- utils::compareVersion(RSuite:::current_rver(), "4.0") >= 0
skip_if_not(rver_40plus) # BiocGenerics needs R4.0 up; remotes does not properly handles Bioc branches
prj <- init_test_project(repo_adapters = c("CRAN"))
pkgzip <- init_test_pkgzip()
RSuite::pkgzip_build_bioc_package("BiocGenerics",
prj = prj, pkg_type = "source", path = pkgzip$path)
expect_that_pkgzip_contains("BiocGenerics", type = "source", pkgzip = pkgzip)
})
|
f9710a595b598265d8bd4fb77402e35cd1dccd88
|
319a13e48a7e26e5ab660c9dba30521834203dab
|
/RFiles/vector-prod2.R
|
72189083ffe89de13aad50c008d65f26dcc83bdb
|
[] |
no_license
|
marco-tn/StaticticsBasic
|
9b4914bec56571754b54481e53f389654178cb3b
|
6dba8d4ac01759cd1e7af302386b9a34f3594475
|
refs/heads/master
| 2020-08-03T13:06:57.357421
| 2019-09-30T03:00:07
| 2019-09-30T03:00:07
| 211,762,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 310
|
r
|
vector-prod2.R
|
a <- 1:3
b <- 4:6
a %*% b # 内積(計算結果は1×1行列)
try(a %*% (1:6)) # 長さが異なるとエラー(tryはエラーが出ても作業を続行するための関数)
a * b # 要素毎の積
a * 1:6 # 長さが異なる場合は足りない方が周期的に拡張される
a/b # 除算も成分毎
|
d255643a3af3d53d835f2a7887630b4cc1454e3f
|
de1b2fcb453aa87d28d0bceb17b016a2b971f0e3
|
/cachematrix.R
|
2116b7079cda615237979d65b411fe24f1e61947
|
[] |
no_license
|
SorokaVladimir/ProgrammingAssignment2
|
a52bfc30dffd1dc31a7baf0d351bc54de44c7cd3
|
84cfd52069783cd94e9044ed96e63cd218e4a374
|
refs/heads/master
| 2022-04-18T12:20:44.650545
| 2020-04-18T10:36:11
| 2020-04-18T10:36:11
| 255,166,075
| 0
| 0
| null | 2020-04-12T20:39:07
| 2020-04-12T20:39:06
| null |
UTF-8
|
R
| false
| false
| 1,180
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#The function above is supposed to set and get the values of both the matrix and the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
j <- NULL
set <- function(y){
x <<- y
j <<- NULL
}
get <- function()x
setInverse <- function(inverse) j <<- inverse
getInverse <- function() j
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
##Please include your own comment to explain your code (Required in Rubric)
#Function 2;cacheSolve is supposed to calculate the inverse of the matrix created using the first function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
j <- x$getInverse()
if(!is.null(j)){
message("getting cached data")
return(j)
}
mat <- x$get()
j <- solve(mat,...)
x$setInverse(j)
j
}
cacheMatrix <- makeCacheMatrix(matrix(1:4, ncol = 2))
cacheSolve(cacheMatrix)
#The code above will calculate the inverse of the matrix incase none is present within the cache
|
a7f54c14731a8adfd8618e487554e28a42c92fe7
|
73ad4f470c092807e2afe31b762ca3a17eca7eb7
|
/man/check_package.Rd
|
49c26c16cd1810b0b9bdcd884aad6f9d9024a269
|
[] |
no_license
|
cran/cleanr
|
252ac1819ecb2433a3a37985c009ecf9328d92a5
|
446f237b7c0e79eeca4dbe37edcfb77f7145334e
|
refs/heads/master
| 2023-06-28T04:37:30.608072
| 2023-06-16T07:30:24
| 2023-06-16T07:30:24
| 77,044,691
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,494
|
rd
|
check_package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.R
\name{check_package}
\alias{check_package}
\title{Check a Package}
\usage{
check_package(path, pattern = "\\\\.[rR]$", ...)
}
\arguments{
\item{path}{Path to the package to be checked.}
\item{pattern}{A pattern to search files with, see \code{\link{list.files}}.}
\item{...}{Arguments to be passed to \code{\link{check_file}}.}
}
\value{
\code{\link[base:invisible]{Invisibly}} \code{\link{TRUE}},
but see \emph{Details}.
}
\description{
Run \code{\link{check_file}} on a package's source.
}
\details{
The function catches the messages of "cleanr"-conditions
\code{\link{throw}}n by \code{\link{check_file}} and, if it caught any,
\code{\link{throw}}s them.
}
\examples{
# create a fake package first:
package_path <- file.path(tempdir(), "fake")
usethis::create_package(package_path, fields = NULL,
rstudio = FALSE, open = FALSE)
directory <- system.file("runit_tests", "source", "R_s4",
package = "cleanr")
file.copy(list.files(directory, full.names = TRUE), file.path(package_path,
"R"))
RUnit::checkTrue(cleanr::check_package(package_path, check_return = FALSE))
}
\seealso{
Other wrappers:
\code{\link{check_directory}()},
\code{\link{check_file_layout}()},
\code{\link{check_file}()},
\code{\link{check_function_layout}()},
\code{\link{check_functions_in_file}()}
}
\concept{wrappers}
|
1aaf51a15bce9a186f6ec3c429cde4481df7f35b
|
81d05363ac3532715a3076d94560b5b01d82010b
|
/preprocess_SNMFN.R
|
c9ceb82d9991cfe019d3f4439cc1c4e52615f8df
|
[] |
no_license
|
lihan-hub/DDCMNMF
|
c26ca74a9866a19eb9e9d5824524dab1f320c240
|
45efaf53eff182159f8edf1ec12e62ce5e18218c
|
refs/heads/main
| 2023-07-17T04:45:29.125543
| 2021-08-18T06:38:54
| 2021-08-18T06:38:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,164
|
r
|
preprocess_SNMFN.R
|
# library(R.matlab)
#load('InfluenceGraph_2017.Rdata')
# CC=read.csv('benchmarks/lung_subtype_benchmarks.csv',header = T)
patMutMatrix=read.table('Lung(102)106/LUSC_Mutation.txt')
patMutMatrix=t(patMutMatrix)
row.names(patMutMatrix)=gsub('\\.','-',row.names(patMutMatrix))
#inter=intersect(CC[,1],row.names(patMutMatrix))
#patMutMatrix=patMutMatrix[inter,]
patOutMatrix=read.table('Lung(102)106/LUNG_Gene_Expression.txt')
patOutMatrix=t(patOutMatrix)
row.names(patOutMatrix)=gsub('\\.','-',row.names(patOutMatrix))
#inter=intersect(CC[,1],row.names(patOutMatrix))
#patOutMatrix=patOutMatrix[inter,]
NCG_drivers=function()
{
benchmarking=read.csv('NCG6_cancergenes.tsv.csv',header = T,na.strings = 'NA',sep=',')[c(2,6)]
drivers=benchmarking[grep(pattern='.lung|lung|pan-cancer|.pan-cancer',benchmarking[,2]),]
####drivers=benchmarking[grep(pattern='.lung|lung',benchmarking[,2]),]
drivers=benchmarking[grep(pattern='.colorectal_adenocarcinoma|colorectal_adenocarcinoma|pan-cancer|.pan-cancer',benchmarking[,2]),]
drivers=benchmarking[grep(pattern='.breast|breast|pan-cancer|.pan-cancer',benchmarking[,2]),]
}
drivers=read.table('Tamborero.txt',header = T,sep = '\t')
main_function=function()
{
res=Generate_infG(patMutMatrix,patOutMatrix,InfluenceGraph,drivers)
patMut=smooth_matrix(patMutMatrix,res$patMutMatrix,InfluenceGraph,0.5)
patOut=res$patOutMatrix
#patOut=smooth_matrix(patOutMatrix,res$patOutMatrix,InfluenceGraph,0.5)
Mutmut=InfluenceGraph[intersect(row.names(InfluenceGraph),colnames(patMut)),intersect(row.names(InfluenceGraph),colnames(patMut))]
Outout=InfluenceGraph[intersect(row.names(InfluenceGraph),colnames(res$patOutMatrix)),intersect(row.names(InfluenceGraph),colnames(res$patOutMatrix))]
finals=rbind(cbind(Outout,res$infG),cbind(t(res$infG),Mutmut))
data=cbind(patOut,patMut)
cc=c(ncol(patOut),ncol(patMut))
XBlockInd=c()
count=1
for(i in cc)
{
if(count==1)
{temp=cc[count]
XBlockInd=rbind(XBlockInd,c(1,temp))
}
else
{
XBlockInd=rbind(XBlockInd,c(temp+1,temp+cc[count]))
temp=XBlockInd[count,2]
}
count=count+1
}
YBlockInd=NULL
SampleLabel=row.names(data)
FeatureLabel=colnames(data)
params=list(NCluster=4,thrd_module=matrix(rep(c(1,0.5),3),nrow = 3,ncol = 2,byrow = T),nloop=5,maxiter=100,tol=10^(-6),thrNet11=0.0001,thrNet12=0.01,threNet22=0.0001,thrXr=10,thrXc=10)###lung 4 cluster,Breast 5 cluster,COLON 4
FeatureType=t(c('expression_data','driver_data'))
Input=list(data=data,XBlockInd=XBlockInd,SampleLabel=SampleLabel,FeatureLabel=FeatureLabel,FeatureType=FeatureType,netAdj=finals,params=params)
writeMat('SNMNMF/zhang_input_SNMNMF/test/subsamples/COLON_SNMNMF.mat',Input=Input)
}
Generate_infG=function(patMutMatrix,patOutMatrix,InfluenceGraph,drivers)
{
patMutMatrix=patMutMatrix[,intersect(colnames(patMutMatrix),drivers[,1])]
if(length(which(colSums(patMutMatrix)==0))>0)
{
patMutMatrix=patMutMatrix[,-which(colSums(patMutMatrix)==0)]
}
inter_mut=intersect(colnames(InfluenceGraph),colnames(patMutMatrix))
patMut=patMutMatrix[,inter_mut]
#process the expression data
filter_patOutMatrix=apply(patOutMatrix,2,scale)
index=which(filter_patOutMatrix<=(-2)|filter_patOutMatrix>=2)
filter_patOutMatrix[index]=1
filter_patOutMatrix[-index]=0
if(length(which(colSums(filter_patOutMatrix)==0))>0)
{
filter_patOutMatrix=filter_patOutMatrix[,-which(colSums(filter_patOutMatrix)==0)]
}
row.names(filter_patOutMatrix)=row.names(patOutMatrix)
inter_out=intersect(row.names(InfluenceGraph),colnames(filter_patOutMatrix))
patOut=filter_patOutMatrix[,inter_out]
infG=InfluenceGraph[inter_out,inter_mut]
if(length(which(rowSums(infG)==0))>0)
{
infG=infG[-which(rowSums(infG)==0),]
}
if(length(which(colSums(infG)==0))>0)
{
infG=infG[,-which(colSums(infG)==0)]
}
patMut=patMutMatrix[,intersect(colnames(infG),colnames(patMut))]
patOut=patOutMatrix[,intersect(row.names(infG),colnames(patOut))]
filter_patOutMatrix=filter_patOutMatrix[,intersect(colnames(filter_patOutMatrix),colnames(patOut))]
#patOut=patOut[,intersect(row.names(infG),colnames(patOut))]
return(list(patMutMatrix=patMut,patOutMatrix=patOut,infG=infG,dysregulate=filter_patOutMatrix))
}
smooth_matrix=function(original_patMut,patMutMatrix,InfluenceGraph,a=0.5)
{
#########below merge the mutation frequency into the patMutMatrix ###bad results
# freq=rowSums(original_patMut)
# original_patMut=original_patMut[1:nrow(original_patMut),]/freq
# patMutMatrix=original_patMut[,colnames(patMutMatrix)]
#####################################################
inter=intersect(colnames(patMutMatrix),row.names(InfluenceGraph))
patMutMatrix=patMutMatrix[,inter]
InfluenceGraph=InfluenceGraph[inter,inter]
rowsum=rowSums(InfluenceGraph)
normalize_InfluenceGraph=InfluenceGraph
for(i in 1:length(rowsum))
{
if(rowsum[i]!=0)
{normalize_InfluenceGraph[i,]=InfluenceGraph[i,]/rowsum[i]}
}
Ft=patMutMatrix
for(iter in 1:3)
{
Ft1=(1-a)*patMutMatrix+a*Ft%*%normalize_InfluenceGraph
Ft=Ft1
}
# if(length(which(rowSums(Ft1)==0))>0)
# {Ft1=Ft1[-which(rowSums(Ft1)==0),]
# }
if(length(which(colSums(Ft1)==0))>0)
{
Ft1=Ft1[,-which(colSums(Ft1)==0)]
}
Ft1
}
|
4e58a8df691b030b6cadf642b243d2368466e3d9
|
ad8a6bd93c52bbd77de571c9ccd1b5faefde7ae5
|
/tests/testthat/test_characteristic_functions.R
|
71354e32e0f5e617c3388ca95f853590ae703688
|
[] |
no_license
|
randomchars42/eenv
|
eea304c3291811aa0c55d2539cc140e195a3f56d
|
be7adf9033d3802e5fdc066b074574d1b1bf6bf8
|
refs/heads/master
| 2021-10-24T11:43:46.627320
| 2019-03-25T19:15:47
| 2019-03-25T19:15:47
| 106,001,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,107
|
r
|
test_characteristic_functions.R
|
library(eenv)
context("Test characteristic functions")
test_that("characteristics are calculated correctly", {
data_test <- tibble::tibble(
id = 1 : 12,
sex = c("f", "f", "f", "m", "m", "f", "f", "m", "m", "f", "m", "f"), # 12/7/5 (t/f/m)
group = c("a", "b", "a", "b", "a", "b", "a", "b", "a", "b", "a", "b"), # a: 6/3/3 (t/f/m), b: 6/4/2 (t/f/m)
group2 = c("a", "b", "c", "a", "b", "c", "a", "b", "c", "a", "b", "c"), # a: 4/3/1 (t/f/m), b: 4/1/3 (t/f/m) c: 4/3/1(t/f/m)
value = c(2, 5, 6, 3, 9, 12, 2, 5, 6, 3, 9, 12))
expect_equal(
unname(characteristic_calc_count(
data = data_test$sex,
result = c(),
events = "f")),
7)
expect_equal(
unname(characteristic_calc_mean(
data = c(3, 5, 5, 3, 5, 5, 5, 3, 3, 3),
result = c())),
4)
#characteristic_get(data = data_test, characteristic = sex, group, group2, events = c("f"), template = "%c (%pt)")
#characteristic_get(data = data_test, characteristic = value, group, group2, quantiles = c("q10", "q5", "q25", "q50", "q75", "q75"), template = "%q50 (%q25 - %q75)", decimals = 0)
})
|
2499a43e7ce3f138a1cbd762457cb0fe1d2cf764
|
9f32f5e41ba10f68bbf2d60df3ef992031501eb4
|
/common/radar.R
|
39825734c79aff86a63522d3bd3536b80acd115f
|
[] |
no_license
|
chinamrwu/AMS
|
6c84380562b91c4a4e00a56c30914d378081f1b5
|
c3c0c5f3fe22752a1ec9651e190f3efd8dfa3334
|
refs/heads/master
| 2022-01-29T02:02:41.906857
| 2019-06-25T07:25:28
| 2019-06-25T07:25:28
| 188,001,820
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
r
|
radar.R
|
#devtools::install_github('neuhausi/canvasXpress')
drawradar <- function(data,strTitle="radar plot"){
library(canvasXpress)
c <- canvasXpress(
data=data,
circularArc=360,
circularRotate=0,
circularType="radar",
colorScheme="Bootstrap",
graphType="Circular",
legendPosition="top",
ringGraphType=list("area"),
showTransition=TRUE,
smpLabelScaleFontFactor =1.5,
title=strTitle,
transitionStep=50,
transitionTime=1500
)
c
}
|
2b6110b4d64ccbfb73ef589293872e3316da8196
|
c97be4ad1dcfaa08be495f46ea5f4f51b2d1a280
|
/Bonus_CorrelationAnalysis/CorrelationAnalysis_MeanValues_ScaledLog_FilterN.R
|
b1671289b9884ee114f26832f01e444da5ee1973
|
[] |
no_license
|
mlocardpaulet/TCR_ABStim2018
|
7d9c78da34ddbf4e7eb2074aca03c28a073dda53
|
16b76aa11c7b82fbd67da8d7f527553fd8d3c3fc
|
refs/heads/master
| 2022-06-27T08:32:39.491643
| 2022-05-11T11:30:26
| 2022-05-11T11:30:26
| 145,111,305
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,807
|
r
|
CorrelationAnalysis_MeanValues_ScaledLog_FilterN.R
|
################################################################################
# Correlation analysis of the Merged samples.
################################################################################
################################################################################
###### Import table
################################################################################
load("RData/08_StatResults_Phospho.RData")
################################################################################
###### Packages
################################################################################
library(reshape2)
library(Hmisc) # Pearson correlation
library(matrixStats) # rowMax()
library(ape) # as.phylo()
library(gplots)
################################################################################
# For this analysis I replace missing values with the mean values of 200 loops. See the Scripts in the folder "StatisticalAnalysis" for more information.
################################################################################
tab <- export[,grepl("MeanLoops", names(export))]
tabtech <- export[,grepl("MeanTech", names(export))]
row.names(tab) <- export$psiteID
# Keep only the sites that have a minimum of 5 measured values in a minimum of one data set (TiO2 or pY-IP).
k <- sapply(seq_len(nrow(tabtech)), function(x) {
length(tabtech[x,][!is.na(tabtech[x,])])
})
tab <- tab[k >= 5,]
########################################################################
# I perform the correlation calculations.
# I keep only the regulated phosphorylation sites:
mat <- tab[row.names(tab) %in% as.character(export$psiteID[export$Regulation == "TRUE"]),]
mat2 <- t(scale(t(mat))) # rowwise scaling
# Combine the pY and the TiO2 data:
pY <- mat2[,grepl("pTyr", colnames(mat2))]
pY <- pY[sapply(seq_len(nrow(pY)), function(x) {length(pY[x,][!is.na(pY[x,])]) > 0}),]
TiO2 <- mat2[,grepl("TiO2", colnames(mat2))]
TiO2 <- TiO2[sapply(seq_len(nrow(TiO2)), function(x) {length(TiO2[x,][!is.na(TiO2[x,])]) > 0}),]
colnames(pY) <- gsub("_pTyr", "", colnames(pY), fixed = T)
colnames(TiO2) <- gsub("_TiO2", "", colnames(TiO2), fixed = T)
pY <- pY[,order(colnames(pY))]
TiO2 <- TiO2[,order(colnames(TiO2))]
pY <- cbind(pY[,1:6], matrix(NA, ncol = 6, nrow = nrow(pY)), pY[,7:18])
colnames(pY) <- colnames(TiO2)
mat2 <- rbind(pY, TiO2)
# Perform the pearson correlation between phosphorylation sites:
pearsCor <- rcorr(t(mat2), type="pearson")
matR <- pearsCor[[1]]
matP <- pearsCor[[3]]
save(list = c("matR", "matP"), file = "RData/09_Correlations.Rdata")
colfunclight <- colorRampPalette(c("darkblue", "blue", "deepskyblue", "white", "yellow", "red", "darkred"))
pdf("Figures/Correlation.pdf", useDingbats=FALSE, 14, 12)
heatmap.2(matR, trace = "n", col = colfunclight(100), cexRow = 0.25, cexCol = 0.25)
dev.off()
################################################################################
# Export for cytoscape
matR <- melt(matR)
matP <- melt(matP)
matRP <- cbind(matR, matP[,ncol(matP)])
names(matRP) <- c("Psite1", "Psite2", "R", "pvalue")
#matRP <- matRP[matRP$pvalue<=0.05,]
#matRP <- matRP[abs(matRP$R)>=0.9,]
matRP <- matRP[!is.na(matRP$Psite1),]
# For CytoscapeFiltering:
matRP$PvalUnder0.01 <- matRP$pvalue <= 0.01
matRP <- data.frame(matRP, "-log10pval" = -(log(matRP$pvalue,10)))
matRP <- matRP[matRP$PvalUnder0.01 != FALSE,]
# Remove duplicated connections:
matRP <- matRP[!is.na(matRP$R),]
for(el in as.character(unique(matRP$Psite1))) {
for (el2 in as.character(unique(matRP$Psite2[matRP$Psite1==el]))) {
matRP <- matRP[!(matRP$Psite1==el2 & matRP$Psite2==el),]
}
}
matRP <- matRP[!duplicated(matRP),]
write.table(matRP[!is.na(matRP$R),], "SupTables/Cytoscape_KineticsCorrelation.txt", sep = "\t", row.names = F, quote = F)
|
035e55bfb920a1adbd17ac07f31e36dd3f80512b
|
573ad01c9f26062287748332b1872b0a99ee83d8
|
/ExploratModelling.R
|
28159b82606cb90706e999e78bba53fdbd355d8f
|
[] |
no_license
|
duncanobunge/DataWrangling
|
fbaf7dffbd50190804acefedd73a1a70d84dcfed
|
b01e547474874900df181894f0ef5733dd7c3063
|
refs/heads/master
| 2020-04-16T23:07:17.607013
| 2019-01-17T07:18:57
| 2019-01-17T07:18:57
| 165,998,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34
|
r
|
ExploratModelling.R
|
#scripts for exploratory modelling
|
8467b87c03c3a64fa14e82e77d0350d0379ad6ae
|
902e96b788bfd6e50c8d8a9d5d7aa5e3534c5666
|
/002_ThugChem/functions/002_PreLaTeX.R
|
4c7247863828ccc87930447249a24681dd8c2d3c
|
[] |
no_license
|
legion949/App2Show
|
860b09f403645d758175af92011ce87cc4f0e164
|
bb502940e3d8d2a18326413439c08161ac6c9212
|
refs/heads/main
| 2023-06-15T04:47:45.942659
| 2021-06-28T13:43:38
| 2021-06-28T13:43:38
| 380,904,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,956
|
r
|
002_PreLaTeX.R
|
# este_simbolo = 2, este_sub = 2
# Funcion "empaquetadora1"
empaquetadora1 <- function(este_simbolo, este_sub){
# Si ambos son informacion correcta...
todo_OK <- TRUE
if (is.na(este_simbolo) | este_simbolo == "") todo_OK <- FALSE
if (is.na(este_sub) | este_sub == "") todo_OK <- FALSE
# Si esta todo OK seguimos...
if (todo_OK) {
# Armamos las partes generales...
partes <- paste(c(este_simbolo, "[", este_sub, "]"), collapse="")
# Pero si tiene valencia 1 lo serruchamos un poco...
if (este_sub == 1) partes <- paste(este_simbolo, collapse="")
# Juntamos las partes...
paquete_armado <- paste(partes, sep="")
return(paquete_armado)
} # Fin armado_hidroxido.
else return("Algo mal en la empaquetadora de oxidos... \n")
} # Fin function empaquetadora***
# Funcion "empaquetadora_oxido"
empaquetadora_oxido <- function(este_simbolo1, este_sub1, este_tipo1,
este_simbolo2, este_sub2){
# Si ambos son informacion correcta...
todo_OK <- TRUE
if (is.na(este_simbolo1) | este_simbolo1 == "") todo_OK <- FALSE
if (is.na(este_sub1) | este_sub1 == "") todo_OK <- FALSE
if (is.na(este_tipo1) | este_tipo1 == "") todo_OK <- FALSE
if (is.na(este_simbolo2) | este_simbolo2 == "") todo_OK <- FALSE
if (is.na(este_sub2) | este_sub2 == "") todo_OK <- FALSE
# Peeero, hay una excepcion: con el oxigeno en una formula de oxido...
if (todo_OK == FALSE) {
if (este_simbolo1 =="O") # Si el primer elmento es el oxigeno...
if (!is.na(este_sub1) | este_sub1 != "") # Y tengo el subindice1
if (!is.na(este_tipo1) | este_tipo1 != "") # Y tengo el tipo de elemento que es element1...
if (is.na(este_simbolo2) | este_simbolo2 == "") # Y no tengo el 2do simbolo...
todo_OK <- TRUE # Esta todo OK!
}
# Si esta todo OK seguimos...
if (todo_OK) {
# 1) Si el elemento no es un gas noble... y no es el oxigeno
if (este_tipo1 != "Gas Noble" && este_simbolo1 != "O") {
# Parte 1 del oxido...
p1_oxido <- paste(c(este_simbolo1, "[", este_sub1, "]"), collapse="")
if (este_sub1 == 1) p1_oxido <- paste(este_simbolo1, collapse="")
# Parte 2 del oxido...
p2_oxido <- paste(c(este_simbolo2, "[", este_sub2, "]"), collapse="")
if (este_sub2 == 1) p2_oxido <- paste(este_simbolo2, collapse="")
# Oxido Armado LaTeX
armado_oxido <- paste(p1_oxido, p2_oxido, sep="*")
} # Fin if 1)
########################################################################
# 2) Si es el oxigeno...
if (este_simbolo1 == "O") {
# Parte 1 del oxido...
p1_oxido <- paste(c(este_simbolo1, "[", este_sub1, "]"), collapse="")
if (este_sub1 == 1) p1_oxido <- paste(este_simbolo1, collapse="")
# Parte 2 del oxido...
p2_oxido <- ""
# Oxido Armado LaTeX
armado_oxido <- paste(p1_oxido, sep="")
} # Fin if 2)
########################################################################
# 3) Si es un gas noble...
if (este_tipo1 == "Gas Noble") {
# Parte 1 del oxido...
p1_oxido <- paste(c(este_simbolo1, "[", este_sub1, "]"), collapse="")
if (este_sub1 == 1) p1_oxido <- paste(este_simbolo1, collapse="")
# Parte 2 del oxido..
p2_oxido <- paste(c(este_simbolo2, "[", este_sub2, "]"), collapse="")
if (este_sub2 == 1) p2_oxido <- paste(este_simbolo2, collapse="")
# Oxido Armado LaTeX
armado_oxido <- paste(p1_oxido, " + ", p2_oxido, sep="")
} # Fin if 3)
########################################################################
return(armado_oxido)
} # Fin armado_hidroxido.
else return("Algo mal en la empaquetadora_oxido()... \n")
} # Fin function empaquetadora_oxido***
|
d422854df944d8979a18d3643207ef732bd40943
|
b75900de18d763ded6dab35b0dd42436ab4b7446
|
/man/access_import.Rd
|
feb1ee9b916016334ea426aa3d1a9863048d60ba
|
[
"MIT"
] |
permissive
|
ove-ut3/impexp
|
081910fa6c9cca3717c54dc3e02a9c88249a37c2
|
d23bf96afa95b73c532e4634fd001097daa05e22
|
refs/heads/master
| 2021-01-06T13:15:10.478366
| 2020-05-12T08:30:33
| 2020-05-12T08:30:33
| 241,339,115
| 0
| 0
|
NOASSERTION
| 2020-05-12T08:30:34
| 2020-02-18T11:00:49
| null |
UTF-8
|
R
| false
| true
| 523
|
rd
|
access_import.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/access.R
\name{access_import}
\alias{access_import}
\title{Import a Microsoft Access database table.}
\usage{
access_import(table, path)
}
\arguments{
\item{table}{Name of the table to import as a character.}
\item{path}{Path to the Access database.}
}
\value{
A data frame.
}
\description{
Import a Microsoft Access database table.
}
\examples{
impexp::access_import("Table_impexp", system.file("extdata/impexp.accdb", package = "impexp"))
}
|
5e551e493f8e78a247d68aab32514938bd33a777
|
33fa7ba1b68ea3401812fa958bb4122d81098826
|
/PyTorch_SAT6/sat_6_conf_matrix.R
|
6974b28664f12b78f265f3579427c40bf7690bd9
|
[] |
no_license
|
carlos-alberto-silva/wvview_geodl_examples
|
7a415e3e52155804fb7a6c494cbba772aae21e7e
|
b70eb4540105fac07f6be5091ef97047b961b237
|
refs/heads/master
| 2023-06-21T08:56:26.064154
| 2021-07-28T14:27:14
| 2021-07-28T14:27:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
r
|
sat_6_conf_matrix.R
|
library(dplyr)
library(caret)
# Read in results CSV
result <- read.csv("C:/Maxwell_Data/archive/chips2/test_result4.csv")
# Set reference and predicted columns to factors
result$class <- as.factor(result$class)
result$predicted <- as.factor(result$predicted)
# Use caret to create confusion matrix
cm <- confusionMatrix(data=result$predicted, reference=result$class, mode="everything")
# Print confusion matrix
cm
|
147f5456661d82545fe21152038729b6c9ba4c37
|
3c5fee29a00ec142ccd009fa61a5b90acb7fa4a4
|
/Problem_Set_1.R
|
b8865c984337c68212f12864ea7c2fb58a594073
|
[] |
no_license
|
Steffen333701/Econometrics_Applications
|
44de341fc1cdefb72e818e5b2e4b29b789e64424
|
1524e99d8fd98eee9585697afb51a5ee648cdc83
|
refs/heads/master
| 2020-03-10T08:40:06.560689
| 2018-04-21T18:25:21
| 2018-04-21T18:25:21
| 129,291,169
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,286
|
r
|
Problem_Set_1.R
|
# Problem set 1
# clean space and set wd
ls()
rm(list = ls())
setwd("C:/Users/Steffen_Laptop/Documents/R/Econometrics_applications/Econometrics_Applications")
# Packages
library(tidyverse)
library(psych)
library(skimr)
########################################################################
# load the data
datps1 <- read.csv(file = "ps1_nls.csv")
# 1
summary(datps1)
sapply(datps1, sd, na.rm=TRUE)
describe(datps1)
skim(datps1)
datps1 %>% summarise_all (.funs = funs( mean = mean(.), sd = sd(.), min = min(.), max = max(.))) %>%
gather() %>%
serarate (key, into=c("variable", "stat"), sep = "_") %>%
dcast (formula = variable ~ stat)
# iq of 50 is kind of low?
#2
hist(datps1$logWage)
hist(datps1$educ)
hist(datps1$age)
hist(datps1$iq)
ggplot(data=datps1, aes(datps1$logWage)) + geom_histogram(binwidth = 0.03)
ggplot(data=datps1, aes(datps1$educ)) + geom_histogram(binwidth = 1)
ggplot(data=datps1, aes(datps1$age)) + geom_histogram(binwidth = 1)
ggplot(data=datps1, aes(datps1$iq)) + geom_histogram(binwidth = 1)
#3
conditexp <- data.frame(educ = Na, mean.of.logwage = Na)
for (i in min(datps1$educ):max(datps1$educ))
{
conditexp[i,1] <- i
conditexp[i,2] <- mean(datps1[datps1$educ == i, "logWage"])}
ggplot(data=conditexp[conditexp$educ >= min(datps1$educ),], aes(x=educ, y=mean.of.logwage)) + geom_line()
#4
reg.model<-lm(logWage ~ educ, data=datps1)
summary(reg.model)
ggplot(data=conditexp[conditexp$educ >= min(datps1$educ),], aes(x=educ, y=mean.of.logwage)) +
geom_line(color = "blue", size= 2) +
geom_abline(intercept = reg.model$coefficients[1], slope = reg.model$coefficients[2], color="red", size= 2)
## 5 ====================
#one more year of educations increases the logwage by 6%
#two more years of education increases logwage by 12%
## 6 ====================
datps1$exper <- datps1$age - datps1$educ - 6
## 7 ====================
reg.model.2 <- lm(logWage ~ educ + exper + I(exper^2), data = datps1)
summary(reg.model.2)
## 8 ====================
coeff.8<- data.frame(matrix(unlist(reg.model.2[1]), nrow=4, byrow=T))
ern.8a <- coeff.8[1,1] + coeff.8[2,1] * 9 + coeff.8[3,1] * (40-9-6) + coeff.8[4,1] * (40-9-6)^2
ern.8b <- coeff.8[1,1] + coeff.8[2,1] * 13 + coeff.8[3,1] * (40-13-6) + coeff.8[4,1] * (40-13-6)^2
ern.8.diff = ern.8a - ern.8b
## 9 ====================
dat.ps1.refactored <- data.frame(datps1$logWage ,datps1$educ, datps1$educ + datps1$exper, datps1$educ*46 + datps1$exper^2)
colnames(dat.ps1.refactored) <- c("A", "B", "C", "D")
reg.model.5 <- lm(A ~ B+C+D, data=dat.ps1.refactored)
summary(reg.model.5)
coeff.9 = data.frame(matrix(unlist(reg.model.5[1]), nrow=4, byrow=T))
ern.9.diff = coeff.9[2,1] * (9-13)
## 10 ===================
reg.model.3 <- lm(logWage ~ educ + exper + I(exper^2) + age, data = datps1)
summary(reg.model.3)
# Here one can see perfect multicollinearity
## 11 ===================
# Omitted variable bias
# expect coefficient on education to become smaller
# education has a positive influence on earnings and i expect education and iq to be positively correlated
## 12 ===================
reg.model.4 <- lm(logWage ~ educ + iq, data = datps1)
summary(reg.model.4)
# i expect and 10 point iq increase to increase earnings by 4%
|
3576d6c33bed0f1ce5ab4dab931b3769b2761d0d
|
29ece7358a11b69690cf09c2b82fdc98c63531b0
|
/man/GraphsVIPOPLSDA.Rd
|
eee830fd6dd0c8ba059855f71e7cbb6a15c09629
|
[] |
no_license
|
AlzbetaG/Metabol
|
4bfe6ee161f262f5c97770bc276cbde2a7a61b61
|
5c513d3d3902610b9893123bcb918be2a85b2b65
|
refs/heads/master
| 2020-05-28T06:19:23.034258
| 2019-05-30T20:50:00
| 2019-05-30T20:50:00
| 188,906,403
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,353
|
rd
|
GraphsVIPOPLSDA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GraphsVIPOPLSDA.R
\name{GraphsVIPOPLSDA}
\alias{GraphsVIPOPLSDA}
\title{Orthogonal partial least squares - discriminant analysis (OPLS-DA)}
\usage{
GraphsVIPOPLSDA(data, name, groupnames, tsf = "clr", top = 30,
qu = 0.75, QCs = FALSE)
}
\arguments{
\item{data}{Data table with variables (metabolites) in columns. Samples in rows are sorted according to specific groups.}
\item{name}{A character string or expression indicating a name of data set. It occurs in names of every output.}
\item{groupnames}{A character vector defining specific groups in data. Every string must be specific for each group and they must not overlap.}
\item{tsf}{Data transformation must be defined by "clr" (default), "log", "log10", "PQN", "lnPQN", "pareto" or "none". See Details.}
\item{top}{How many most important variables (in absolute values) should be highlighted in s-plot? The default is 30.}
\item{qu}{Which quantile of the important variables (in absolute values) should be highlighted in s-plot? The default is 0.75.}
\item{QCs}{logical. If FALSE (default) quality control samples (QCs) are automatically distinguished and skipped.}
}
\value{
Score plot and s-plots of OPLS-DA.
Excel file with s-plot summary for every variable.
}
\description{
Makes orthogonal partial least squares - discriminant analysis (OPLS-DA), displays score plots and s-plots.
}
\details{
Data transformation: with "clr" clr trasformation is used (see References), with "log" natural logarithm is used, with "log10" decadic logarithm is used, with "pareto" data are only scaled by Pareto scaling, with "PQN" probabilistic quotient normalization is done, with "lnPQN" natural logarithm of PQN transformed data is done, with "none" no tranformation is done.
S-plots can be used only for comparison of two groups. If there is more groups in data, all possible combinations of pairs are evaluated.
Up to twenty different groups can be distinguished in data (including QCs).
}
\references{
Aitchison, J. (1986) The Statistical Analysis of Compositional Data Monographs on Statistics and Applied Probability. Chapman & Hall Ltd., London (UK). p. 416.
Gaude, E.et al. (2012) muma: Metabolomics Univariate and Multivariate Analysis. R package version 1.4. \url{https://CRAN.R-project.org/package=muma}
}
|
19931d908cac239e082dac357af5df76ea33a338
|
4460d2c7adbb48baf7e2cd84024c2514ed1b1cb9
|
/scripts/scran.R
|
22ff90335df8ce392fe2cd6e990b6eaca864d1c3
|
[] |
no_license
|
DimitriTernovoj/SingleCellProject
|
0dbda9ca78ab72513b19d20c33148ecbd58444bb
|
4bec8bee7e630ed2b6908989c422dced53ccd902
|
refs/heads/master
| 2023-06-01T08:21:47.491796
| 2020-10-24T13:45:58
| 2020-10-24T13:45:58
| 307,434,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 446
|
r
|
scran.R
|
library(scran)
#read in data
data_mat_df <- read.csv("file_dir/data_mat.csv", header=FALSE)
data_mat <- data.matrix(data_mat_df)
input_groups <- read.csv("file_dir/input_groups.csv", header=TRUE)
#calculate sizefactors
size_factors <- computeSumFactors(data_mat, clusters=input_groups$groups, min.mean=0.1)
#export sizefactors
write.table(size_factors, file= "file_dir/size_factors.csv", sep= ",", row.names = FALSE, col.names="size_factors")
|
025bfddc2ecae8f8cce5318433b4a3c296604bea
|
abcb78ddbbc03256a9e40fbd7f4790a735b91a52
|
/tests/testthat/test-arrange-SpatVector.R
|
649803180587a49d00b0befafd5a9807796d6922
|
[
"MIT"
] |
permissive
|
dieghernan/tidyterra
|
f94f0b8544b24f231022570fa51e2f2acc09238a
|
b0214a95186c1d63eb89ce92f7704ab0396d0c3b
|
refs/heads/main
| 2023-09-04T11:47:01.063904
| 2023-08-16T06:19:58
| 2023-08-16T06:19:58
| 488,274,429
| 134
| 4
|
NOASSERTION
| 2023-09-10T07:27:12
| 2022-05-03T15:59:17
|
R
|
UTF-8
|
R
| false
| false
| 1,544
|
r
|
test-arrange-SpatVector.R
|
test_that("Arrange with SpatVector", {
f <- system.file("extdata/cyl.gpkg", package = "tidyterra")
v <- terra::vect(f)
v2 <- v %>% arrange(dplyr::desc(cpro))
tab <- v %>%
as_tibble() %>%
arrange(dplyr::desc(cpro))
expect_true(nrow(v2) == nrow(tab))
expect_s4_class(v2, "SpatVector")
expect_identical(as_tibble(v2), tab)
})
test_that("Arrange with 2 vars SpatVector", {
f <- system.file("extdata/cyl.gpkg", package = "tidyterra")
v <- terra::vect(f)
v$area_end <- terra::expanse(v)
v$cat <- ifelse(v$cpro < "30", "B", "A")
v2 <- v %>% arrange(cat, dplyr::desc(area_end))
tab <- v %>%
as_tibble() %>%
arrange(cat, dplyr::desc(area_end))
expect_true(nrow(v2) == nrow(tab))
expect_s4_class(v2, "SpatVector")
expect_identical(as_tibble(v2), tab)
})
# From dplyr
test_that("grouped arrange ignores group, unless requested with .by_group", {
df <- data.frame(g = c(2, 1, 2, 1), x = 4:1)
df <- terra::vect(df, geom = c("g", "x"), keepgeom = TRUE)
gf <- group_by(df, g)
expect_equal(
as_tibble(arrange(gf, x)),
as_tibble(gf)[4:1, ]
)
expect_equal(
as_tibble(arrange(gf, x, .by_group = TRUE)),
as_tibble(gf)[c(4, 2, 3, 1), , ]
)
})
test_that("arrange updates the grouping structure", {
df <- data.frame(g = c(2, 2, 1, 1), x = c(1, 3, 2, 4))
df <- terra::vect(df, geom = c("g", "x"), keepgeom = TRUE)
res <- df %>%
group_by(g) %>%
arrange(x)
expect_s4_class(res, "SpatVector")
expect_equal(as.list(group_rows(res)), list(c(2L, 4L), c(1L, 3L)))
})
|
50ba6598ece44e2ee59e5b4a0d719a9346092df8
|
3c135a96e70cbc0c457b00d4a8e4fa2df44e7682
|
/Datastorytelling and Visualization Analysis.R
|
f12c9610fe03ac787b9878d8966a4177ec4807a9
|
[] |
no_license
|
shekharsingh8811/DataStorytellingAndVisualization
|
694753e65c7b052271db0b4d89ae163c67bb987c
|
a36a68fa39a8e1699b088c6e091351288bf2dfb3
|
refs/heads/master
| 2020-07-26T07:59:53.977570
| 2019-11-26T21:10:55
| 2019-11-26T21:10:55
| 208,584,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,228
|
r
|
Datastorytelling and Visualization Analysis.R
|
getwd()
setwd("/Users/shekharsingh/Documents/Big Data and Business Analytics/Analytics1/RStudio")
dataset <- read.csv("FIFA_2019.csv", header=TRUE)
View(dataset)
dim(dataset)
summary(dataset)
str(dataset)
head(dataset)
#fitting linear regression
fit.lr = lm(formula = Potential ~ Crossing+Finishing+HeadingAccuracy+ShortPassing+Volleys+Dribbling+Curve+FKAccuracy+LongPassing+BallControl+Acceleration+SprintSpeed+Agility+Reactions+Balance+ShotPower+Jumping+Stamina+Strength+LongShots+Aggression+Interceptions+Positioning+Vision+Penalties+Composure+Marking+StandingTackle+SlidingTackle+GKDiving+GKHandling+GKKicking+GKPositioning+GKReflexes,
data = dataset)
summary(fit.lr)
fit.lr2 = lm(formula = Overall ~ Crossing+Finishing+HeadingAccuracy+ShortPassing+Volleys+Dribbling+Curve+FKAccuracy+LongPassing+BallControl+Acceleration+SprintSpeed+Agility+Reactions+Balance+ShotPower+Jumping+Stamina+Strength+LongShots+Aggression+Interceptions+Positioning+Vision+Penalties+Composure+Marking+StandingTackle+SlidingTackle+GKDiving+GKHandling+GKKicking+GKPositioning+GKReflexes,
data = dataset)
summary(fit.lr2)
fit.lr3 = lm(formula = Overall ~ Age,
data = dataset)
summary(fit.lr3)
|
4dd3f6e62a2d95b1d960a885d9587e198ea1ef60
|
b4cc2e543a3822cd9e03a660348b3da6c47a8a14
|
/man/summariseRjpp.Rd
|
e10d33c7d633933d3f4d901df675b69b2656d9ec
|
[] |
no_license
|
hferg/BTprocessR
|
09e078ed5f97c6f63db877794f74c39003ed4fd5
|
ce8ecd4cc422605438c98787ad326b0a077a1eb7
|
refs/heads/master
| 2021-05-11T06:25:43.805695
| 2020-03-04T16:36:11
| 2020-03-04T16:36:11
| 117,988,586
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,356
|
rd
|
summariseRjpp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rjpp.R
\name{summariseRjpp}
\alias{summariseRjpp}
\title{summariseRjpp
This functions takes the somewhat massive output of the rjpp function and
pares it down to the scalars and/or rates the user is interested in.
QUESTION: Should this be limited to a single scalar type? That would include
the rates and the origins?
QUESTION: Is threshold important here? Or could that be in the autoplot
method?}
\usage{
summariseRjpp(PP, scalar)
}
\arguments{
\item{scalar}{The scalar to summarise the results of. Either:
node_scalar, branch_scalar, rate, lambda, delta, kappa or node_branch}
\item{pp}{An object of class "rjpp" - typically the output of the rjpp
function.}
\item{threshold}{The probability threshold over which scalars will be show.
When equal to zero ALL scalars in the posterior will be returned. When equal
to 0.5 only scalars present in greater than 50% of posterior samples will be
returned, and so on.}
}
\description{
summariseRjpp
This functions takes the somewhat massive output of the rjpp function and
pares it down to the scalars and/or rates the user is interested in.
QUESTION: Should this be limited to a single scalar type? That would include
the rates and the origins?
QUESTION: Is threshold important here? Or could that be in the autoplot
method?
}
|
69f2b3966908fec603c1689f78da12961c37b0b8
|
6dedc1aa6d3d83a86f818ea4667a44fae1c57b49
|
/inst/testdata/generate_test_data.R
|
fb0b65bc3f25478ed2d3140db020aa3a7df8421e
|
[
"MIT"
] |
permissive
|
GarettHeineck/platypus
|
bbf2e5119b6614b2862b86be4933896dc147506f
|
080e3d3649eabd4317d2ce5d761ab086c2b8ef88
|
refs/heads/master
| 2022-12-30T19:01:07.949779
| 2020-10-17T21:33:21
| 2020-10-17T21:33:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,455
|
r
|
generate_test_data.R
|
test_mask_1 <- array(c(255, 111, 0, 222,
255, 111, 0, 222,
255, 111, 0, 222), dim = c(2, 2, 3))
test_mask_2 <- array(c(255, 111, 222, 0,
255, 111, 222, 0,
255, 111, 222, 0), dim = c(2, 2, 3))
test_mask_3 <- array(c(222, 111, 0, 255,
222, 111, 0, 255,
222, 111, 0, 255), dim = c(2, 2, 3))
for (i in 1:3) {
filename_img = paste0("inst/testdata/dir/images/test_image_", i, ".png")
filename_mask = paste0("inst/testdata/dir/masks/test_mask_", i, ".png")
png(filename = filename_img)
grid.raster(get(paste0("test_mask_", i)) / 255)
dev.off()
png(filename = filename_mask)
grid.raster(get(paste0("test_mask_", i)) / 255)
dev.off()
}
test_mask_1_1 <- array(c(0, 111, 0, 0,
0, 111, 0, 0,
0, 111, 0, 0), dim = c(2, 2, 3))
test_mask_1_2 <- array(c(0, 0, 0, 222,
0, 0, 0, 222,
0, 0, 0, 222), dim = c(2, 2, 3))
test_mask_1_3 <- array(c(255, 0, 0, 0,
255, 0, 0, 0,
255, 0, 0, 0), dim = c(2, 2, 3))
test_mask_2_1 <- array(c(0, 111, 0, 0,
0, 111, 0, 0,
0, 111, 0, 0), dim = c(2, 2, 3))
test_mask_2_2 <- array(c(0, 0, 222, 0,
0, 0, 222, 0,
0, 0, 222, 0), dim = c(2, 2, 3))
test_mask_2_3 <- array(c(255, 0, 0, 0,
255, 0, 0, 0,
255, 0, 0, 0), dim = c(2, 2, 3))
test_mask_3_1 <- array(c(0, 111, 0, 0,
0, 111, 0, 0,
0, 111, 0, 0), dim = c(2, 2, 3))
test_mask_3_2 <- array(c(222, 0, 0, 0,
222, 0, 0, 0,
222, 0, 0, 0), dim = c(2, 2, 3))
test_mask_3_3 <- array(c(0, 0, 0, 255,
0, 0, 0, 255,
0, 0, 0, 255), dim = c(2, 2, 3))
for (i in 1:3) {
filename_img = paste0("inst/testdata/nested_dirs/image_", i, "/images/test_image_", i, ".png")
png(filename = filename_img)
grid.raster(get(paste0("test_mask_", i)) / 255)
dev.off()
for (j in 1:3) {
filename_mask = paste0("inst/testdata/nested_dirs/image_", i, "/masks/test_mask_", i, "_", j, ".png")
png(filename = filename_mask)
grid.raster(get(paste0("test_mask_", i, "_", j)) / 255)
dev.off()
}
}
|
b7ba38a3ffe3c33a9d109190090bc5a08981df07
|
b599e97542c6df5add3e4b53586097705b10ce74
|
/man/onelump_varenv.Rd
|
00d44e08a13a672eee1cdfdbb6e299a241985905
|
[] |
no_license
|
ajijohn/NicheMapR
|
09c435107b9e4aa0fd5b7982510a65e76680f1ed
|
98386659036cc55df840df7339af519a766b88d2
|
refs/heads/master
| 2021-01-09T05:34:23.544532
| 2017-01-28T07:29:16
| 2017-01-28T07:29:16
| 80,757,456
| 0
| 1
| null | 2017-02-02T18:51:23
| 2017-02-02T18:51:23
| null |
UTF-8
|
R
| false
| true
| 6,848
|
rd
|
onelump_varenv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/onelump_varenv.R
\name{onelump_varenv}
\alias{onelump_varenv}
\title{One-lump Transient Heat Budget for Variable Environment}
\usage{
onelump(t, Tc_init, thresh, AMASS, lometry, Tairf, Tradf, velf, Qsolf, Zenf, ...)
}
\arguments{
\item{t}{= seq(1,3600,60), time intervals (s) at which output is required}
\item{time}{= 1, time (s) at beginning of spline of environmental conditions}
\item{Tc_init}{= 5, initial temperature (degrees C) Organism shape, 0-5, Determines whether standard or custom shapes/surface area/volume relationships are used: 0=plate, 1=cyl, 2=ellips, 3=lizard (desert iguana), 4=frog (leopard frog), 5=custom (see details)}
\item{thresh}{= 29, threshold temperature (degrees C) at which summary statistics are wanted}
\item{AMASS}{= 500, animal mass (g)}
\item{lometry}{= 2, Organism shape, 0-5, Determines whether standard or custom shapes/surface area/volume relationships are used: 0=plate, 1=cyl, 2=ellips, 3=lizard (desert iguana), 4=frog (leopard frog), 5=custom (see parameter 'customallom')}
\item{Tairf}{air temperature function with time, generated by 'approxfun' (degrees C)}
\item{Tradf}{radiant temperature function with time, generated by 'approxfun'(degrees C), averaging ground and sky}
\item{velf}{wind speed function with time, generated by 'approxfun' (m/s)}
\item{Qsolf}{radiation function with time, generated by 'approxfun' (W/m2)}
\item{Zenf}{zenith angle of sun function with time, generated by 'approxfun' (90 is below horizon), degrees}
\item{Flshcond}{= 0.5, Thermal conductivity of flesh (W/mK, range: 0.412-2.8)}
\item{q}{= 0, metabolic heat production rate W/m3}
\item{Spheat}{= 0.85, Specific heat of flesh J/(kg-K)}
\item{EMISAN}{= 0.95, Emissivity of animal (0-1)}
\item{ABS}{= 0.85, solar absorptivity, decimal percent}
\item{customallom}{= c(10.4713,.688,0.425,0.85,3.798,.683,0.694,.743), Custom allometry coefficients. Operates if lometry=5, and consists of 4 pairs of values representing the parameters a and b of a relationship AREA=a*mass^b, where AREA is in cm2 and mass is in g. The first pair are a and b for total surface area, then a and b for ventral area, then for sillhouette area normal to the sun, then sillhouette area perpendicular to the sun}
\item{shape_a}{= 1., Proportionality factor (-) for going from volume to area, keep this 1 (redundant parameter that should be removed)}
\item{shape_b}{= 3, Proportionality factor (-) for going from volume to area, represents ratio of width:height for a plate, length:diameter for cylinder, b axis:a axis for ellipsoid}
\item{shape_c}{= 0.6666666667, Proportionality factor (-) for going from volume to area, represents ratio of length:height for a plate, c axis:a axis for ellipsoid}
\item{posture}{= 'n' pointing normal 'n', parallel 'p' to the sun's rays, or 'b' in between?}
\item{FATOSK}{= 0.4, Configuration factor to sky (-) for infrared calculations}
\item{FATOSB}{= 0.4, Configuration factor to subsrate for infrared calculations}
\item{sub_reflect}{= 0.2, substrate solar reflectivity, decimal percent}
\item{PCTDIF}{= 0.1, proportion of solar energy that is diffuse (rather than direct beam)}
\item{press}{= 101325, air pressure (Pa)}
\item{VTMIN}{= 24, Voluntary thermal minimum, degrees C (lower body temperature for foraging)}
}
\value{
Tc Core temperature (deg C)
Tcf Final (steady state) temperature (deg C), if conditions remained constant indefinately
tau Time constant (s)
dTc Rate of change of core temperature (deg C/s)
abs2 solar absorptivity
}
\description{
Transient, 'one-lump', heat budget for computing rate of change of temperature
under environmental conditions that vary with time, using interpolation functions to
estimate environmental conditions at particular time intervals.
Michael Kearney & Warren Porter developed this R function in July 2014.
}
\examples{
loc="Alice Springs, Australia"
micro<-micro_global(loc=loc) # run the model with default location and settings
metout<-as.data.frame(micro$metout) # above ground microclimatic conditions, min shade
shadmet<-as.data.frame(micro$shadmet) # above ground microclimatic conditions, max shade
soil<-as.data.frame(micro$soil) # soil temperatures, minimum shade
shadsoil<-as.data.frame(micro$shadsoil) # soil temperatures, maximum shade
# append dates
days<-rep(seq(1,12),24)
days<-days[order(days)]
dates<-days+metout$TIME/60/24-1 # dates for hourly output
dates2<-seq(1,12,1) # dates for daily output
metout<-cbind(dates,metout)
soil<-cbind(dates,soil)
shadmet<-cbind(dates,shadmet)
shadsoil<-cbind(dates,shadsoil)
# combine relevant input fields
micro_sun_all<-cbind(metout[,1:5],metout[,8],soil[,4],metout[,13:15],metout[,6])
colnames(micro_sun_all)<-c('dates','JULDAY','TIME','TALOC','TA1.2m','VLOC','TS','ZEN','SOLR','TSKYC','RHLOC')
micro_shd_all<-cbind(shadmet[,1:5],shadmet[,8],shadsoil[,4],shadmet[,13:15],shadmet[,6])
colnames(micro_shd_all)<-c('dates','JULDAY','TIME','TALOC','TA1.2m','VLOC','TS','ZEN','SOLR','TSKYC','RHLOC')
# loop through middle day of each month
juldays=c(15,46,74,105,135,166,196,227,258,288,319,349)
mons=c("January","February","March","April","May","June","July","August","September","October","November","December")
for(i in 1:length(juldays)){
simday=juldays[i]
micro_sun<-subset(micro_sun_all, micro_sun_all$JULDAY==simday)
micro_shd<-subset(micro_shd_all,micro_shd_all$JULDAY==simday)
# use approxfun to create interpolations for the required environmental variables
time<-seq(0,60*24,60) #60 minute intervals from microclimate output
time<-time*60 # minutes to seconds
Qsolfun_sun<- approxfun(time, c(micro_sun[,9],(micro_sun[1,9]+micro_sun[24,9])/2), rule = 2)
Tradfun_sun<- approxfun(time, rowMeans(cbind(c(micro_sun[,7],(micro_sun[1,7]+micro_sun[24,7])/24),c(micro_sun[,10],(micro_sun[1,10]+micro_sun[24,10])/24)),na.rm=TRUE), rule = 2)
Qsolfun_shd<- approxfun(time, c(micro_shd[,9],(micro_shd[1,9]+micro_shd[24,9])/2)*(1-micro$maxshade), rule = 2)
Tradfun_shd<- approxfun(time, rowMeans(cbind(c(micro_shd[,7],(micro_shd[1,7]+micro_shd[24,7])/24),c(micro_shd[,10],(micro_shd[1,10]+micro_shd[24,10])/24)),na.rm=TRUE), rule = 2)
velfun<- approxfun(time, c(micro_sun[,6],(micro_sun[1,6]+micro_sun[24,6])/2), rule = 2)
Tairfun_sun<- approxfun(time, c(micro_sun[,4],(micro_sun[1,4]+micro_sun[24,4])/2), rule = 2)
Tairfun_shd<- approxfun(time, c(micro_shd[,4],(micro_shd[1,4]+micro_shd[24,4])/2), rule = 2)
Zenfun<- approxfun(time, c(micro_sun[,8],90), rule = 2)
# choose full sun environment
Tairfun<-Tairfun_sun
Tradfun<-Tradfun_sun
Qsolfun<-Qsolfun_sun
t=seq(1,3600*24,60) # sequence of times for predictions (1 min intervals)
Tbs<-onelump_varenv(t=t) # run the model
plot(t/3600,Tbs$Tc,type='l',ylim=c(-10,70),ylab='Temperature, deg C',xlab='hour of day', main=paste(loc,mons[i]))
}
}
|
36ed2f8f2361e8d01e24a73cbb393e33d381bd0a
|
83289e8da8ccb731bd9c37964f3ce2d62c5beabf
|
/script-5_modules.R
|
1e0c7798ff78af78e0d61e57009dbbc32b228083
|
[
"MIT"
] |
permissive
|
moldach/gene-set-enrichment-R
|
52782dd687db07cf5b2490648747a2a6799fe7ed
|
6b8ce73e3b53873c2577849de5a240a510fb8909
|
refs/heads/master
| 2020-03-18T10:39:17.506602
| 2019-10-10T20:05:40
| 2019-10-10T20:05:40
| 134,625,671
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,882
|
r
|
script-5_modules.R
|
load("kaforou.rda")
## When using read.csv() make sure to use stringsAsfactor=FALSE
## to assure you the character strings are not
## google global .Rprofile to change this so you will never accidently forget
## to turn character strings into factors
## some sanitizing
kaforou$genes <- kaforou$genes[ , c("ID", "Symbol", "Definition", "Entrez_Gene_ID", "RefSeq_ID") ]
kaforou$genes$Symbol <- as.character(kaforou$genes$Symbol)
kaforou$genes$ID <- as.character(kaforou$genes$ID)
kaforou$genes$Entrez_Gene_ID <- as.character(kaforou$genes$Entrez_Gene_ID) # R converted characters into factors with load() so lets turn them back into characters
kaforou$targets$cg <- gsub("kaforou\\.", "", kaforou$targets$cg)
## basic differential analysis - just to look what is there
d <- model.matrix(~ 0 + cg, data=kaforou$targets)
colnames(d) <- gsub("cg", "", colnames(d))
c <- makeContrasts(Malawi="(Malawi.TB-Malawi.LTB)", SA="(SA.TB-SA.LTB)",
TBvsLTBI="((Malawi.TB-Malawi.LTB)+(SA.TB-SA.LTB))/2", levels=d)
fit <- eBayes(contrasts.fit(lmFit(kaforou, d), c))
topTable(fit, coef="TBvsLTBI")
library(tmod)
res <- tmodLimmaTest(fit, fit$genes$Symbol)
tmodPanelPlot(res, filter.rows.pval = 1e-5)
# We see type 1 interferon response which we expect, B cells as well
## first, a small example
## we manually inspect correlations for some of the genes
i <- "ILMN_1799848"
#Ctrl + Shift + 2
kaforou$genes[i,]
## absolute correlation coefficients
cc <- abs(cor(kaforou$E[i, ], t(kaforou$E))) # Making a correlation between this one gene and all the other genes
dim(cc) # 1 row, many columns
cc <- t(cc)[,1] # Transpose this very long row into one column
head(sort(cc, decreasing=TRUE)) # Sort from largest to smallest
## what are these genes?
kaforou$genes[ names(head(sort(cc, decreasing=T))), ]
## plot showing correlation between two genes
j <- "ILMN_2261600"
plot(kaforou$E[i, ], kaforou$E[j,], pch=19, col=factor(kaforou$targets$group),
bty="n", xlab=kaforou$genes[i, "Symbol"], ylab=kaforou$genes[j, "Symbol"])
legend("topleft", levels(factor(kaforou$targets$group)), pch=19, col=1:2, bty="n")
# plot(ks$E[i, ], ks$E[j,], pch=19, col=factor(ks$targets$group), bty="n", xlab=ks$genes[i, "Symbol"], ylab=ks$genes[j, "Symbol"])
# legend("topleft", levels(factor(ks$targets$group)), pch=19, col=1:2, bty="n")
## 1. primitive clustering
## A rudimentary clustering can be directly produced when creating a heatmap.
## for a heatmap, we want to select genes that are significantly regulated
tt <- topTable(fit, coef="TBvsLTBI", number=Inf, sort.by="p")
X <- kaforou$E[tt$ID[1:150], ]
head(X[,1:5])
## select a color scheme
## the fu is a *function* that can be used to generate colors.
## for example, try fu(5) and fu(15)
## also note how R can understand colors: using the HTML hexadecimal notation style, "#320A4B"
library(RColorBrewer)
fu <- colorRampPalette(c("purple", "black", "cyan"))
fu(10) # If you use an uneven number you get black in the middle
## heatmap
## trace: no idea who uses this option, ever
## scale: the expression of a gene will be converted to a z-score.
## alternative: calculate z-scores for a column
## dendrogram: row, col or both [default].
library(gplots)
group.col <- c("cyan", "purple")[ factor(kaforou$targets$group)]
colnames(X) <- paste0(kaforou$targets$group, ".", colnames(X))
par(bg="#eeeeee")
heatmap.2(X, trace="n", scale="r", col=fu,
labCol=kaforou$targets$group,
ColSideColors = group.col)
# We see how many modules we would be able to identify (vertically) 7 bars
## a second version of the same figure. Here, we do not want to reorder the samples and
## calculate a dendrogram for the columns, but simply show the samples grouped
ordo <- order(kaforou$targets$group)
heatmap.2(X[,ordo], trace="n", scale="r", dendrogram="row", col=fu, Colv=NULL,
labCol=kaforou$targets$group[ordo],
ColSideColors = group.col[ordo])
# Correlation clusters take ^2 more compute resources than genes (e.g. 40,000genes * 40,000)
plotDensities(kaforou,legend=F)
axis(1)
axis(side=1,at=1:15)
## 2. Simple clustering.
## First, select a number of genes based on their absolute expression
## level. We select only genes for which the upper boundary of IQR
## is higher than 7 (arbitrary threshold)
uq <- apply(kaforou$E, 1, function(x) quantile(x, 0.75))
sel <- uq > 7 # Select upper quartile greater than 7
ks <- kaforou[sel, ]
## from what is left, we select 20% of the genes with the largest IQR
iqrs <- apply(ks$E, 1, IQR)
sel <- iqrs > quantile(iqrs, 0.8)
sum(sel)
ks <- ks[sel, ]
X <- ks$E
## Clustering with hclust
## We start with clustering the samples
## First, you need to calculate the distances.
dd <- dist(t(X))
hc <- hclust(dd)
plot(hc, labels=ks$targets$group)
# what if we choose three clusters
# abline(h=101)
clusts <- cutree(hc, h = 101)
clusts <- cutree(hc, h = 180)
# we started with clustering the samples and not the genes because we have fewer and it will be easier to see
## clusts is a vector; names are the sample IDs,
## we can ask how these clusters relate to our known phenotype
table(clusts, ks$targets$group) # We see that first cluster is almost all TB, second and third clusters are predominantly LTB
table(clusts, ks$targets$Cohort) # We see that clusters also seem to correspond to the different populations; couldn't decide what is more important (TB or cohort) we didn't include it but it would likely cluster by sex as well
## We can do a similar thing for genes
## first, with euclidean distances
dd <- dist(X)
hc <- hclust(dd)
plot(hc, labels=FALSE)
# Much harder to see if there is anything meaninful here
# Decision to cut tree is rather arbitrary then
abline(h=40)
clusts <- cutree(hc, h = 40)
uc <- unique(clusts)
uc # number from 1:11
head(clusts) # which ones associate to clusters
m2g <- sapply(uc, function(i) unique(ks$genes[ names(clusts[ clusts == i ]), "Symbol" ]), simplify=FALSE)
m <- data.frame(ID=paste0("M.", uc), Title=uc, N=sapply(uc, function(i) sum(clusts == i)))
names(m2g) <- m$ID
library(tmod) # Let's do a hypergeometric to test against the background of all genes in this reduced dataset (not the full dataset)
res <- sapply(m2g, function(cl) tmodHGtest(cl, ks$genes$Symbol), simplify=F) #
res # we see nulls where we couldn't find any genes, we also see there aren't that many genes in each of the modules
## Go back up the script and instead of choosing 40 choose closer to the bottom of the tree and then
## sort(sapply(m2g,length))
## res shows N=gene universe, n=genes in modules, B=number of ?, b=number of genes in this module
res <- res[ !sapply(res, is.null) ]
res <- res[ sapply(res, nrow) > 0 ]
res <- sapply(res, function(x) { x$AUC <- log(x$E) ; x }, simplify=F) # log
names(res) <- paste0("M.", seq_along(res)) # Create module names
tmodPanelPlot(res)
tmodPanelPlot(res, pval.thr = 0.05)
## now, we will use correlation as distance metrics
cc <- cor(t(ks$E))
cc <- 1 - abs(cc)
cd <- as.dist(cc)
hc <- hclust(cd)
plot(hc, labels=ks$targets$group)
clusts <- cutree(hc, h = 0.5)
uc <- unique(clusts)
m2g <- sapply(uc, function(i) ks$genes[ names(clusts[ clusts == i ]), "Symbol" ], simplify=FALSE)
m <- data.frame(ID=paste0("M.", uc), Title=uc, N=sapply(uc, function(i) sum(clusts == i)))
names(m2g) <- m$ID
sel <- m$N > 30
sum(sel)
m2g <- m2g[sel]
m <- m[sel,]
res <- sapply(m2g, function(cl) tmodHGtest(cl, ks$genes$Symbol), simplify=F)
res <- res[ !sapply(res, is.null) ]
res <- res[ sapply(res, nrow) > 0 ]
res <- sapply(res, function(x) { x$AUC <- log(x$E) ; x }, simplify=F)
tmodPanelPlot(res)
library(WGCNA)
## uncomment the following in terminal R, but not in Rstudio
# allowWGCNAThreads()
## we test a range of the "power" parameter
powers <- c(c(1:5), seq(from = 6, to=30, by=3))
s <- pickSoftThreshold(t(ks$E), powerVector=powers, verbose=5)
with(s$fitIndices, plot(Power, -sign(slope) * SFT.R.sq, type="n",
xlab="Soft Threshold (power)", ylab="Scale Free Topology Model Fit,signed R^2",
main = "Scale independence"))
with(s$fitIndices, text(Power, -sign(slope) * SFT.R.sq, Power, col="red"))
abline(h=0.90,col="red")
s <- 16
adjacency <- adjacency(t(ks$E), power = s)
TOM <- TOMsimilarity(adjacency)
d.t <- 1 - TOM
geneTree <- hclust(as.dist(d.t), method = "average")
plot(geneTree, labels=FALSE, hang=0.4)
m <- cutreeDynamic(dendro=geneTree, distM=dissTOM, deepSplit=2, minClusterSize=30, pamRespectsDendro=FALSE)
col <- labels2colors(m)
plotDendroAndColors(geneTree, col)
length(m)
library(tmod)
head(ks$genes)
res <- sapply(unique(names(m)), function(i) tmodHGtest(fg=ks$genes$Symbol[m == i], bg=ks$genes$Symbol), simplify=F)
names(res) <- paste0("M", names(res))
res <- res[ sapply(res, nrow) > 0 ]
sapply(res, function(x) { colnames(x)[7] <- "AUC" ; x }, simplify=F)
res <- sapply(res, function(x) { colnames(x)[7] <- "AUC" ; x }, simplify=F)
tmodPanelPlot(res)
|
1143685a1dc3bd5ecb54284d090e793f45c61b03
|
f1d4d986bbfe4d08026fb1c7f5e921becfb8895d
|
/man/addOrRestoreVariable.Rd
|
fc97292aaf047e192cb39df9de41f0701f77c2fe
|
[
"Apache-2.0"
] |
permissive
|
mickash/Adaptive-Bayesian-Networks
|
988d3f2fcfeed921055035437e1f4c52f5f89660
|
56611cf9f8562ebcbfa17d876d2a7d27c201b67a
|
refs/heads/master
| 2020-09-09T14:51:25.727845
| 2019-11-13T14:20:59
| 2019-11-13T14:20:59
| 221,476,013
| 1
| 0
| null | 2019-11-13T14:21:01
| 2019-11-13T14:18:43
|
R
|
UTF-8
|
R
| false
| false
| 868
|
rd
|
addOrRestoreVariable.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AddNode.R
\name{addOrRestoreVariable}
\alias{addOrRestoreVariable}
\title{Add or restore variable to data tables}
\usage{
addOrRestoreVariable(cn, tableinfo, newNode, newvalues)
}
\arguments{
\item{cn}{An RODBC connection}
\item{tableinfo}{A table information object}
\item{newNode}{The name of the new variable}
\item{newValues}{The number of values the new variable has}
}
\value{
The update tableinfo object
}
\description{
Add or restore variable to data tables.
Checks if the new variable to be added is already present in the master data tables.
If so, and it is valid (the desired values match stored values) it is 'restored' which
simply means adding it to the datamap.
Otherwise the variable is added to the master data tables and then to the map.
}
\keyword{internal}
|
3075a299e4945eace3cf3fbcccd78dd2a191dbd6
|
5c4d81b75c6c49cff7470bf7cb914e0fbf3a3015
|
/R/syschange.R
|
b547d28f7b88ccd9c355bd26788e5022ded1b2db
|
[] |
no_license
|
cran/svenssonm
|
0ce3fded9f92ba4040caf1ba795664ba228b0b89
|
12bdf4e80a630e54540ed9ac995d2fc3f6f183a2
|
refs/heads/master
| 2020-12-30T10:11:01.956263
| 2017-08-03T13:32:01
| 2017-08-03T13:32:01
| 99,242,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,899
|
r
|
syschange.R
|
#' Systematic Change
#'
#' The value and the standard error of relative position (RP), the systematic change in
#' position between the two ordered categorical classification.
#' Also, the value and the standard error of relative concentration (RC), a comprehensive
#' evaluation of the systematic change.
#'
#' @param t The contingency table for Svensson's method, a two-dimension matrix.
#' @name syschange
#' @return \code{rp} and \code{rc} give the RP and RC value. \code{rpse} and \code{rcse}
#' give the standard error of RP and RC.
#' @seealso \code{\link{con_ta}} for generating contingency table. \code{\link{indichange}}
#' for individual change. \code{\link{sresult}} for summary of Svensson's method analysis.
#' @examples
#' x <- c (1:5,5:1)
#' y <- c(1:5,1,1,5,4,1)
#' z <- con_ta(x,y,)
#' rp(z)
#' rpse(z)
#' rc(z)
#' rcse(z)
NULL
# > NULL
#' @rdname syschange
#' @export
rp <- function(t) {
ch <- function(t) {
d = dim(t)[1]
for (i in 1:(floor(d/2))) {
temp = t[i]
t[i] = t[d + 1 - i]
t[d + 1 - i] = temp
}
return(t)
}
Rsum = ch(as.matrix(apply(t, 1, sum))) #yi
Csum = as.matrix(apply(t, 2, sum)) #xi
Tsum = sum(t) #n
Rcum = c(0, cumsum(Rsum)[-dim(t)[1]]) #C(y)i-1
Ccum = c(0, cumsum(Csum)[-dim(t)[1]]) #C(x)i-1
P0 = sum(Rsum * Ccum)/Tsum^2
P1 = sum(Rcum * Csum)/Tsum^2
RP = P0 - P1
return(RP)
}
#' @rdname syschange
#' @export
rpse <- function(t) {
rpk <- function(t) {
l = nrow(t)
y = t
h = matrix(0, l, l)
for (i in 1:l) {
for (j in 1:l) {
if (y[i, j] != 0) {
y[i, j] = y[i, j] - 1 #deleted one observation
h[i, j] = rp(y) #rpk: the rp with one observation deleted
}
y = t
}
}
return(h)
}
n = sum(t)
rpkvec = c(rpk(t))
tvec = c(t) #make the original matrix become vectors
rpd = sum(rpkvec * tvec)/n #mean of rpk
b = grep(0, tvec) #find the position of all the zeros
jvarrp = (n - 1)/n * sum(tvec[-b] * (rpkvec[-b] - rpd)^2)
jserp = sqrt(jvarrp)
RPSE = (n - 1) * jserp/n
return(RPSE)
}
#' @rdname syschange
#' @export
rc <- function(t) {
ch <- function(t) {
d = dim(t)[1]
for (i in 1:(floor(d/2))) {
temp = t[i]
t[i] = t[d + 1 - i]
t[d + 1 - i] = temp
}
return(t)
}
Rsum = ch(as.matrix(apply(t, 1, sum))) #yi
Csum = as.matrix(apply(t, 2, sum)) #xi
Tsum = sum(t)
Rcum = c(0, cumsum(Rsum)[-dim(t)[1]]) #C(y)i-1
Rcum1 = cumsum(Rsum) #C(y)i
Ccum = c(0, cumsum(Csum)[-dim(t)[1]]) #C(x)i-1
Ccum1 = cumsum(Csum) #C(x)i
P0 = sum(Rsum * Ccum)/Tsum^2
P1 = sum(Rcum * Csum)/Tsum^2
M = min(P0 - P0^2, P1 - P1^2)
RC = sum(Rsum * Ccum * (Tsum - Ccum1) - Csum * Rcum * (Tsum - Rcum1))/(M * Tsum^3)
return(RC)
}
#' @rdname syschange
#' @export
rcse <- function(t) {
rck <- function(t) {
l = nrow(t)
y = t
h = matrix(0, l, l)
for (i in 1:l) {
for (j in 1:l) {
if (y[i, j] != 0) {
y[i, j] = y[i, j] - 1 #deleted one observation
h[i, j] = rc(y) #rpk: the rp with one observation deleted
}
y = t
}
}
return(h)
}
n = sum(t)
rckvec = c(rck(t))
tvec = c(t) #make the original matrix become vectors
rcd = sum(rckvec * tvec)/n #mean of rpk
b = grep(0, tvec) #find the position of all the zeros
jvarrc = (n - 1)/n * sum(tvec[-b] * (rckvec[-b] - rcd)^2)
jserc = sqrt(jvarrc)
RCSE = (n - 1) * jserc/n
return(RCSE)
}
|
94e2ecbd387d642bff5d9565440b8a34b3f11667
|
5f0cfcec5194f11137db76056ef2b3836ab80ff8
|
/R/boxcoxEstimation.R
|
5dd8dc843489c3a9656e11588464e65c25177432
|
[] |
no_license
|
JakeJing/treevo
|
54d341655f1e6ddac5ab73df38c890be557e7d17
|
3429ba37e8dc7c79cf441361d07c000f07423b6e
|
refs/heads/master
| 2021-01-12T01:20:10.296046
| 2016-10-03T01:09:15
| 2016-10-03T01:09:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,631
|
r
|
boxcoxEstimation.R
|
#' Box-Cox Estimation
#'
#' This function Box-Cox transforms summary values
#'
#'
#' @param summaryValuesMatrix Matrix of summary statistics from simulations
#' @return Returns a matrix of Box-Cox transformed summary statistics with the
#' same dimensions as summaryValuesMatrix.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished; Bates et al. 2009
#' @keywords boxcoxEstimation Box-Cox
#' @examples
#'
#' #data(res)
#'
#' #boxcoxEstimation(summaryValuesMatrix)
#'
boxcoxEstimation<-function (summaryValuesMatrix) {
#library("car", quietly = T)
boxcoxLambda <- rep(NA, dim(summaryValuesMatrix)[2])
boxcoxAddition <- rep(NA, dim(summaryValuesMatrix)[2])
for (summaryValueIndex in 1:dim(summaryValuesMatrix)[2]) {
boxcoxAddition[summaryValueIndex] <- 0
lowValue <- min(summaryValuesMatrix[, summaryValueIndex]) - 4 * sd(summaryValuesMatrix[, summaryValueIndex])
if (lowValue <= 0) {
boxcoxAddition[summaryValueIndex] <- 4 * abs(lowValue)
}
summary <- summaryValuesMatrix[, summaryValueIndex] + boxcoxAddition[summaryValueIndex]
boxcoxLambda[summaryValueIndex] <- 1
if (sd(summaryValuesMatrix[, summaryValueIndex]) > 0) {
newLambda <- as.numeric(try(powerTransform(summary, method = "Nelder-Mead")$lambda))
if (!is.na(newLambda)) {
boxcoxLambda[summaryValueIndex] <- newLambda
}
}
summaryValuesMatrix[, summaryValueIndex] <- summary^boxcoxLambda[summaryValueIndex]
}
return(list(boxcoxAddition = boxcoxAddition, boxcoxLambda = boxcoxLambda,
boxcoxSummaryValuesMatrix = summaryValuesMatrix))
}
|
f16825c68b34255f40d69517ff703a53060c4bad
|
ba2383526b3a5401bc6619708b9689ab021ef33d
|
/snow17_10.18inprog.R
|
cce7341cbd62a39b197d36a32d25b065ecd79d17
|
[] |
no_license
|
dbo99/snow17tests
|
840e346652ee9fe1460e3e1bf4dcb9506136b446
|
38983cf9df6591f8dac9bb0700da2871d6975f9c
|
refs/heads/master
| 2020-03-31T12:01:16.009564
| 2020-02-19T19:46:22
| 2020-02-19T19:46:22
| 152,200,736
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,406
|
r
|
snow17_10.18inprog.R
|
{rm(list = ls())
setwd("~/Documents/Rscripts/snow17")
library(tidyverse)
library(lubridate) #doesn't always automatically read-in with tidyverse
library(data.table)
#library(plotly)
df <- read.csv("stcroix.csv")
df <- df %>% mutate(date = mdy(date), tavg_c = ((tmax_c + tmin_c)/2), doy = yday(date)) %>%
filter(date > "1982-8-16", date < "1988-8-16")
prcp <- df$p_mm
tavg <- df$tavg_c
doy <- df$doy
# input timestep interval in hours, current setup requires each to be the same
ts_t <- 24 # temperature [C]
ts_p <- 24 # precipitation [mm]
snow17 <- function(par, prcp, tavg, elev, doy,
ini.tstep.state =
c(0, 0, 0, 0, 0, 0)) {
#we_solid[mm] #we_liquid[mm] #ati #heatdeficit[mm] #swe #si_tempcover
# set parameters (major or minor as assigned by model creator E. Anderson)
elev <- 1768 #representative mean areal elevation [m]
scf <- 0.97 #(major) correction for snow gage deficiency, eg under reporting snow depth from wind/sublimation [unitless]
mfmax <- 0.68 #(major) max melt factor during non-rain periods [mm/C/timestep] (varies with forest type/aspect, prevailing wind, etc)
mfmin <- 0.15 #(major) min melt factor during non-rain periods [mm/C/timestep] (varies with forest type/aspect, etc)
uadj <- 0.09 #(major) avg wind function during rain on snow events [mm/mb/C]
si <- 750 #(major) #threshold above which there's always 100% snow cover [mm]
pxtemp <- 1.0 #(minor) snow/rain threshold temp [C]
mbase <- 0.5 #(minor) base temperature for snowmelt calcs [C]
tipm <- 0.1 #(minor) antecedent temperature index [unitless], intended to represent temperature inside the snow cover but near the surface, for a gradient
plwhc <- 0.05 #(minor) max amount of liquid water able to be held by snowpack (percent of liquid water holding capacity) [unitless]
nmf <- 0.3 #(minor) maximum negative melt factor [mm/C/timestep]
daygm <- 0.3 #(minor) constant melt rate at snow-soil interface [mm/timestep]
hsnof <- 0.2 # minimum qualifying hourly snow fall rate to leave depletion curve
par <- c(scf, mfmax, mfmin, uadj, si, pxtemp, mbase, tipm, plwhc, nmf, daygm)
## set basin-specific areal depletion curve
#meanarealwe_to_ai_x <- c(0.05, 0.05, .09, .15, .23, .37, .56, .72, .86, .93, 1)
meanarealwe_to_ai_x <- c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
percentarealsnow_y <- c(0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
dt_arealdeplete <- data.table(meanarealwe_to_ai_x, percentarealsnow_y)
meltandrain <- vector(mode = "numeric", length = length(prcp))
aesc <- vector(mode = "numeric", length = length(prcp))
# loop through each timestep
for (i in 1:length(prcp)) {
# set initial states (update at loop end)
we_solid <- ini.tstep.state[1]
we_liquid <- ini.tstep.state[2]
ati <- ini.tstep.state[3]
heatdef <- ini.tstep.state[4]
swe <- ini.tstep.state[5]
si_tempcover <- ini.tstep.state[6]
# set current ground temperature and precipitation
grndairtemp <- tavg[i] # mean air temperature of time step [C]
precip <- prcp[i] # total precipitation for time step [mm]
# set binary precipitation form & amounts (original fortran version has other choices)
if (grndairtemp <= pxtemp) {
# snowing
we_newsnow <- precip
rain <- 0
} else {
# raining
we_newsnow <- 0
rain <- precip
}
# new snow swe [mm]
we_newsnow_gadj <- we_newsnow * scf # (bias correct the gage(s))
# if 'significant' snow fall (0.2 mm/hr, save last time step's swe & % area cover) ('hsnof' = 0.2)
#if (we_newsnow_gadj >= hsnof * ts_p) {
#swe_b4newsnow <- swe # "SB" in manual
#fracarealcover_b4newsnow <- fracarealcover
#}
# new snow density [g/cm^3]
if (grndairtemp <= -15) { #snow density assumed about the same below ~15 C
den_newsnow <- 0.05
}
else {
den_newsnow <- 0.05 + 0.0017 * grndairtemp^1.5 #manual's snow density scaling increase with temperature
}
# new snow depth [cm] #centimeters for output convenience
depth_newsnow = (0.1 * we_newsnow_gadj)/den_newsnow
# new snow temperature
t_newsnow <- min(0, grndairtemp)
## areal extent curve
# to apply heat exchange only to snow covered area, also implicitly reduces melt factor rate as snow recedes
swe <- swe + we_newsnow_gadj
# define temp cover index
if (swe >= hsnof * ts_p) {
si_tempcover <- swe + 0.75 * we_newsnow_gadj
} #[mm]
else {
si_tempcover <- si_tempcover + 0.75 * we_newsnow_gadj } #[mm]
max_we_ap <- swe #[mm] #max water equivalent during accumulation period - revisit
arealindex <- max(1.0e-100, min(max_we_ap, si)) #[mm]
meanarealwe <- swe # [mm] #swe before any melt below ##same as max_we_ap, right?
meanarealwe_to_ai <- min(1, meanarealwe/si) #max(0, min(1, meanarealwe/arealindex))
fracarealcover <- with(dt_arealdeplete, approx(meanarealwe_to_ai_x, percentarealsnow_y, xout = meanarealwe_to_ai)) %>% tail(1) %>% unlist() %>% unname()
aesc[i] <- fracarealcover
# energy exchange at snow/air surface when no surface melt
#..
# change (increase) in the heat deficit due to new snowfall [mm] (heat amount needed to heat new snow to 0C)
# 80 cal/g: latent heat of fusion
# 0.5 cal/g/C: specific heat of ice
heatdefincreasefromnewsnow <- - (t_newsnow * we_newsnow_gadj)/(80/0.5)
# define/update antecedent temperature index (represents near surface snow temp from past snow & temp history),
# most recent air temps weighed by decreasing amounts
# if 'significant' new snow (>1.5 mm/hr), use new snow's temp, otherwise compute ati from last ati as variable going into representation of shallow snow temp evolution
if (we_newsnow_gadj > 1.5 * ts_p) {
ati <- t_newsnow
} else {
tipm_ts_t <- 1 - ((1 - tipm)^(ts_t/6))
ati <- ati + tipm_ts_t * (grndairtemp - ati)
}
ati <- min(ati, 0) #ati can't exceed 0
# define sinusoidal seasonal variation in the non-rain melt factor (assume 365 day year)
N_Mar21 <- doy[i] - 80
sp <- (0.5 * sin((N_Mar21 * 2 * pi)/365)) + 0.5 # the seasonal pattern assumed
sp_adj <- 1 # Seasonal variation adjustment, none when latitude is below ~54N
mf <- ts_t/6 * ((sp * sp_adj * (mfmax - mfmin)) + mfmin) # the seasonally varying non-rain melt factor
t_snowsurf <- min(0, grndairtemp)
# now with melt factor for the day of year and the snow surface temperature, get the temperature gradient (increase or decrease)
## driven change in heat deficit due to the temperature gradient within the upper part of the snowpack [mm], for only the snow covered fraction:
heatdefchangefromprofilegradient <- nmf * ts_p/6 * mf/mfmax * (ati - t_snowsurf) * fracarealcover
# solar & atmospheric snow melt
t_rain <- max(grndairtemp, 0) # rain temp is air temp as long as it's above 0C
if (rain > 0.25 * ts_p) { # if 'significant' rate of 0.25 mm/hr
# rain-on-snow melt #assumed overcast, high humidity (>90%), emissivity of 1 at cloud elev temp, which is close to ground temp
stefan_bolt <- 6.12 * (10^(-10)) # Stefan-Boltzman constant [mm/K/hr]
e_sat <- 2.7489 * (10^8) * exp((-4278.63/(grndairtemp + 242.792))) # saturated vapor pressure at grndairtemp [mb]
p_atm <- 33.86 * (29.9 - (0.335 * (elev/100)) + (0.00022 * ((elev/100)^2.4))) # elevation is in hundreds of meters (incorrect in snow17 manual)
term1 <- stefan_bolt * ts_p * (((grndairtemp + 273)^4) - (273^4))
term2 <- 0.0125 * rain * t_rain
term3 <- 8.5 * uadj * (ts_p/6) * ((0.9 * e_sat - 6.11) + (0.00057 * p_atm * grndairtemp))
melt_satmos <- term1 + term2 + term3
melt_satmos <- max(melt_satmos, 0) # enforces positive melt
melt_satmos <- melt_satmos * fracarealcover # only snow covered fraction can melt
}
else if ((rain <= 0.25 * ts_p) && (grndairtemp > mbase)) { # if insignificant rain and air temp is above snowmelt threshold (usually 0C)
# non-rain or very little rain melt - melt factor driven and accomodates heat from small rain amounts
melt_satmos <- (mf * (grndairtemp - mbase) * (ts_p/ts_t)) + (0.0125 * rain * t_rain)
melt_satmos <- max(melt_satmos, 0) # melt can't be negative
melt_satmos <- melt_satmos * fracarealcover # only snow covered area can melt
} else {
melt_satmos <- 0 #otherwise, no solar/atmospheric melt without significant rain or temps (listed above)
}
# update ice water equivalent with bias-corrected new snow amount
we_solid <- we_solid + we_newsnow_gadj # water equivalent of total ice portion of the snow cover [mm]
# adjust heat deficit from any new snow and the evolving profile gradient from ground air temp & last new snow's temp history
heatdef <- max(heatdef + heatdefincreasefromnewsnow + heatdefchangefromprofilegradient, 0) # [mm]
# but limit a deep snowpack's ability to keep its surface from melting
if (heatdef >= (1/3 * we_solid)) {
# set heat deficit limit
heatdef <- 1/3 * we_solid #not in 2006 documentation.., check whether in more recent & whether in ops version
}
if (melt_satmos < we_solid) { # if solar+atmos melt is less than the ice's water equivalent
we_solid <- we_solid - melt_satmos # reduce ice water equivalent by the solar+atmos melt amount
snowsurfavailliq <- melt_satmos + rain # surface liquid content is sum of solar/atmospheric melt and any rain
liquidstorcap <- plwhc * we_solid # but the pack can retain up this much, the plwhc % of the solid water equivalent
if ((snowsurfavailliq + we_liquid) > (heatdef + (heatdef * plwhc) + liquidstorcap)) {
# if the solar+atmos melt + rain + existing liquid > heat deficit + deficit's liq stor cap + solid's liq stor cap
# ie if there's sufficient available liquid water to overcome the total deficit & liquid storage capacity,
# the snow is ripe:
# excess liquid water is solar+atmos melt + rain + existing liquid - total deficit - liquid held by the pack
excessliquid <- snowsurfavailliq + we_liquid - heatdef - (heatdef * plwhc) - liquidstorcap
# increase the just-reduced we_solid, as water 'refreezes' returning pack up to 0C
we_solid <- we_solid + heatdef #written in manual but seems wrong
# liquid water in the pack is equal to the % maximum
we_liquid <- liquidstorcap
heatdef <- 0
}
else if ((snowsurfavailliq + we_liquid) >= heatdef) {
# if the solar+atmos melt + rain + existing liquid [mm] > heat deficit, but not its and liquidstorcap's extra capacity
# the snow's not ripe
excessliquid <- 0 # there's no excess liquid (aka no melt and rain)
# still increase the just-reduced we_solid, as water 'refreezes' returning pack up to 0C
we_solid <- we_solid + heatdef
# the new amount of liquid water is adjusted by the difference of solar+atmos melt + any rain and the heat deficit
we_liquid <- we_liquid + snowsurfavailliq - heatdef
# and the pack is at equilibrium - not cold enough for a deficit or warm enough to melt and run off
heatdef <- 0
} else if ((snowsurfavailliq + we_liquid) < heatdef) {
# if solar+atmos melt + rain is less than the heat deficit
# the snow's again not ripe
excessliquid <- 0 # there's no excess liquid (aka no melt and rain)
# the solid equivalent is adjusted by and increase of the new solar+atmos melt and rain input and existing liquid (cold pack freezes everything)
we_solid <- we_solid + snowsurfavailliq + we_liquid # we_solid increases because water refreezes as heat deficit is decreased
# the heat deficit
heatdef <- heatdef - snowsurfavailliq - we_liquid
}
}
else { #if solar+atmos melt > we_solid
melt_satmos <- we_solid + we_liquid #solar+atmos melt is simply the swe
we_solid <- 0 # any ice is now gone, zero
we_liquid <- 0 # and interstitial liquid, too
excessliquid <- melt_satmos + rain # add rain for total excessliquid (aka meltandrain)
}
# update antecedent temp index - if there's no deficit there can't be a profile gradient
if (heatdef == 0) {
ati = 0
}
# for existing snow, reduce swe further slightly, slightly increase excess liquid
# lithospheric snow melt - constant daily amount of melt that takes place at the snow-ground interface
if (we_solid > daygm) { # if more swe than the daily ground melt rate assumed
melt_litho_liqloss <- (daygm/we_solid) * we_liquid * fracarealcover
melt_litho_solidloss <- daygm * fracarealcover
melt_litho <- melt_litho_liqloss + melt_litho_solidloss
we_solid <- we_solid - melt_litho_solidloss
we_liquid <- we_liquid - melt_litho_liqloss
excessliquid <- excessliquid + melt_litho
swe <- we_solid + we_liquid
}
else {
melt_litho <- 0 # if bare or less swe than daily ground melt, no ground melt
excessliquid <- excessliquid + melt_litho
}
# save excess liquid as "meltandrain" output - input to rainfall/runoff model (eg sac-sma)
meltandrain[i] <- excessliquid
# update states for next time step
ini.tstep.state <- c(we_solid, we_liquid, ati, heatdef, swe = we_solid + we_liquid, si_tempcover)
}
#return(meltandrain)
return(aesc)
}
arealextent <- snow17(par, prcp, tavg, elev, doy)
df <- data.frame(df, arealextent)
ggplot(df, aes(date, arealextent)) + geom_line()
#ggplotly(p)
}
#df2 <- data.frame(meanarealwe, arealindex, meanarealwe_to_ai)
#liqlag <- 5.33 * (1 - exp ((-0.03*(ts_p/6)* we_solid )/excessliquid ) )
|
5b9de5aa5b6ffbea8d92e0c900e64f66b8867ac8
|
aad7c7b66c10940ab3cb23cb24192b2417e74fef
|
/man/isCellLine.Rd
|
cc16410540ec5b9d6b85d22f429827ef5e686325
|
[] |
no_license
|
TransmissibleCancerGroup/dftdLowCov
|
7b029a3a2b62e359b60343d6579c3a8be9136099
|
2f884d69654b4289ef322932ba5077940c260ebe
|
refs/heads/master
| 2021-01-02T04:49:03.263038
| 2020-05-28T16:15:21
| 2020-05-28T16:15:21
| 239,495,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 289
|
rd
|
isCellLine.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnv_categorisation.R
\name{isCellLine}
\alias{isCellLine}
\title{Return a vector of CNV IDs for cell line samples}
\usage{
isCellLine(cnvtable)
}
\description{
Return a vector of CNV IDs for cell line samples
}
|
4e2c4a875e25387f2393f1719f0f2ff83d022758
|
0951487545bb680b6e68a005eab55c783cb4e19d
|
/R/tp73_boxplots_rna.R
|
64b42e480915327c90469f530fa3786d88a42822
|
[] |
no_license
|
UCSF-Costello-Lab/lgg_epi_mut_evolution
|
750956096040b20a2429de9da24901f46f26d527
|
1a8c03ea1a8c3ffff5c02f68fc668845c22855fe
|
refs/heads/master
| 2020-04-29T07:35:02.148351
| 2015-12-07T20:30:40
| 2015-12-07T20:30:40
| 44,123,593
| 1
| 0
| null | 2015-10-28T00:46:15
| 2015-10-12T17:41:59
|
R
|
UTF-8
|
R
| false
| false
| 2,116
|
r
|
tp73_boxplots_rna.R
|
cuffnorm_loc <- 'data/cuffnorm_gencode_all'
trans <- function(x) log(x, 2)
pseudocount <- .2
samps <- read.table("data/rna_samples2.txt", header=T, sep = "\t", as.is = T)
rec_grade <- aggregate(Grade ~ Patient, data = samps , FUN = max)
names(rec_grade)[2] <- "Grade_rec"
samps_full <- merge(samps, rec_grade)
tp73_full <- scan(file = 'data/tp73_full.txt', what = '')
tp73_trunc <- scan(file = 'data/tp73_trun.txt', what = '')
dat <- read.table(file.path(cuffnorm_loc, "isoforms.fpkm_table"), header = T, sep = '\t', as.is=T)
tp73_full_ind <- which(dat[,1] %in% tp73_full)
tp73_trunc_ind <- which(dat[,1] %in% tp73_trunc)
dat <- dat[c(tp73_full_ind, tp73_trunc_ind),]
p_4 <- with(samps_full, RNA.seq[Tumor == "Primary" & Grade_rec == 4])
r_4 <- with(samps_full, RNA.seq[Tumor != "Primary" & Grade_rec == 4])
p_23 <- with(samps_full, RNA.seq[Tumor == "Primary" & Grade_rec != 4])
r_23 <- with(samps_full, RNA.seq[Tumor != "Primary" & Grade_rec != 4])
g4_p <- trans(dat[, grep(paste0(p_4, collapse = '|'), names(dat), value = T)]+pseudocount)
g4_r <- trans(dat[, grep(paste0(r_4, collapse = '|'), names(dat), value = T)]+pseudocount)
g23_p <- trans(dat[, grep(paste0(p_23, collapse = '|'), names(dat), value = T)]+pseudocount)
g23_r <- trans(dat[, grep(paste0(r_23, collapse = '|'), names(dat), value = T)]+pseudocount)
save(g4_p,g4_r, g23_p,g23_r, file = "data/tp73_isoforms.RData")
full_dat <- rbind(t(g23_p), t(g23_r), t(g4_p), t(g4_r))
full_small <- full_dat[,colnames(full_dat) %in% tp73_trunc_ind]
full_big <- full_dat[,colnames(full_dat) %in% tp73_full_ind]
small_mean <- rowMeans(full_small)
big_mean <- rowMeans(full_big)
gbm_groups <- rep( c("g23_p", "g23_r", "g4_p", "g4_r"), c(ncol(g23_p), ncol(g23_r), ncol(g4_p), ncol(g4_r)))
pdf('results/tp73_trunc.pdf')
boxplot(small_mean ~ gbm_groups, pch = 17, col = rep(c('green4','darkorange1'), each = 2), main = "short transcripts", ylim = c(log(.2,2), 0))
dev.off()
pdf('results/tp73_full.pdf')
boxplot(big_mean ~ gbm_groups, pch = 17, col = rep(c('green4','darkorange1'), each = 2), main = "long transcripts", ylim = c(log(.2,2), 0))
dev.off()
|
e2de1bf0bdfcb4b7838132096bb55445553332c3
|
81c0158c28d8bf0de94ef8676083dbbfbce06e73
|
/man/aefaResults.Rd
|
4627aa7defe61742f48b52d14c45d5c00a1f4120
|
[] |
no_license
|
seonghobae/kaefa
|
3daf1c294292b30b2589ee0d23bfb5788f20a217
|
266b7cfbc3d117cd8e85095970b4184a1cb315be
|
refs/heads/master
| 2022-05-16T09:50:53.236346
| 2022-05-03T05:12:20
| 2022-05-03T05:12:20
| 102,938,712
| 3
| 0
| null | 2018-03-06T14:37:56
| 2017-09-09T08:18:23
|
R
|
UTF-8
|
R
| false
| true
| 1,119
|
rd
|
aefaResults.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kaefa.R
\name{aefaResults}
\alias{aefaResults}
\title{summarise AEFA results}
\usage{
aefaResults(mirtModel, rotate = NULL, suppress = 0,
which.inspect = NULL, printRawCoefs = F, simplifyRawCoefs = T,
returnModel = F)
}
\arguments{
\item{mirtModel}{estimated aefa model}
\item{rotate}{rotation method. Default is NULL, kaefa will be automatically select the rotation criteria using aefa calibrated model.}
\item{suppress}{cutoff of rotated coefs. Generally .30 is appropriate but .10 will be good in practically}
\item{which.inspect}{which you are want to inspect of a calibration trial? If NULL, the last model will return.}
\item{printRawCoefs}{print the raw IRT coefs.}
\item{simplifyRawCoefs}{print the simplified raw IRT coefs if available when printRawCoefs = TRUE.}
\item{returnModel}{return converted MIRT model. default is FALSE}
}
\value{
summary of aefa results
}
\description{
summarise AEFA results
}
\examples{
\dontrun{
testMod1 <- aefa(mirt::Science, minExtraction = 1, maxExtraction = 2)
aefaResults(testMod1)
}
}
|
a633d3c153bd9acb85e21520343ed482f557a528
|
1cb7c773f534d184ac189c6b7afbc7728bd39d91
|
/R/model.R
|
e63895ff35d136483f34131d8ad38dca44a77fcd
|
[] |
no_license
|
gaborcsardi/finmix
|
bc743cfa939aae0918d4471cccf2872a98c2e41a
|
a68ebf10f0348663338731bca1a9ab93598641eb
|
refs/heads/main
| 2023-08-28T04:09:45.447626
| 2021-09-01T07:22:54
| 2021-09-01T07:22:54
| 422,958,511
| 0
| 0
| null | 2021-10-30T18:17:37
| 2021-10-30T18:17:36
| null |
UTF-8
|
R
| false
| false
| 130,495
|
r
|
model.R
|
## Copyright (C) 2013 Lars Simon Zehnder
#
# This file is part of finmix.
#
# finmix is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# finmix is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with finmix. If not, see <http://www.gnu.org/licenses/>.
.model <- setClass( "model",
representation( dist = "character",
r = "integer",
K = "integer",
weight = "matrix",
par = "list",
indicmod = "character",
indicfix = "logical",
T = "matrix"),
validity = function( object ) {
.init.valid.Model( object )
## else: OK ##
TRUE
},
prototype( dist = character(),
r = integer(),
K = integer(),
weight = matrix(),
par = list(),
indicmod = character(),
indicfix = logical(),
T = matrix()
)
)
## Constructor for class 'model' ##
"model" <- function( dist = "poisson", r, K,
weight = matrix(), par = list(),
indicmod = "multinomial",
indicfix = FALSE, T = matrix() )
{
if ( missing( K ) ) {
K <- .check.K.Model( weight )
} else {
K <- as.integer( K )
if ( K == 1 && dist == "cond.poisson" ) {
dist <- "poisson"
}
}
if ( missing( r ) ) {
r <- .check.r.Model( dist )
} else {
r <- as.integer( r )
}
if ( missing( weight ) && K > 1 ) {
weight <- .check.weight.Model( K )
} else {
weight <- as.matrix( weight )
}
if ( !missing( T ) ) {
T <- .check.T.Model( T )
} else {
if ( dist == "binomial" ) {
T <- matrix( as.integer( 1 ) )
}
}
.model( dist = dist, r = r, K = K, weight = weight,
par = par, indicmod = indicmod,
indicfix = indicfix, T = T )
}
setMethod( "hasWeight", "model",
function( object, verbose = FALSE)
{
if ( !all( is.na(object@weight ) ) ) {
if ( ncol( object@weight ) == object@K ) {
return( TRUE )
} else {
if ( verbose ) {
stop( paste("Wrong dimension of ",
"slot 'weight' of ",
"'model' object." ,
"Weights must be of ",
"dimension 1 x K.",
sep = "" ) )
} else {
return( FALSE )
}
}
} else {
if ( verbose ) {
stop( paste( "Slot 'weight' of 'model' ",
"object is empty.",
sep = "" ) )
} else {
return( FALSE )
}
}
}
)
setMethod( "hasT", "model",
function( object, verbose = FALSE )
{
if ( !all( is.na( object@T ) ) ) {
return( TRUE )
} else {
if ( verbose ) {
stop( paste( "Slot 'T' of 'model' ",
"object is empty.",
sep = "" ) )
} else {
return( FALSE )
}
}
}
)
setMethod( "hasPar", "model",
function( object, verbose = FALSE )
{
.haspar.Model( object, verbose )
}
)
### ----------------------------------------------------------------------
### Simulate method
### @description Simulates values for a specified model in an 'model'
### object.
### @par model an S4 'model' object; with specified parameters
### @par N an R 'integer' value specifying the number of
### values to be simulated
### @par varargin an S4 'fdata' object; with specified variable
### dimension @r and repetitions @T
### @return an S4 object of class 'fdata' holding the simulated
### @see ?simulate
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------
setMethod( "simulate", "model",
function( model, N = 100, varargin, seed = 0 )
{
## TODO: CHeck model for parameters. Check varargin for dimension. Check
## model anf varargin for consistency.
if ( !missing( seed ) ) {
set.seed( seed )
} ## Implemented maybe finmixOptions with a state variable seed
if ( !hasWeight( model ) ) {
model@weight <- matrix( 1 / model@K, nrow = 1, ncol = model@K )
}
## Start simulating the allocations
S <- .simulate.indicators.Model( model, N )
if ( missing( varargin ) ) {
varargin <- fdata( r = model@r, T = matrix( 1, nrow = N ),
exp = matrix( 1, nrow = N ), S = S )
} else {
varargin@S <- S
}
if ( hasPar( model, verbose = TRUE ) ) {
.simulate.data.Model( model, N, varargin )
}
}
)
## plot ##
setMethod("plot", "model",
function(x, y, dev = TRUE, ...)
{
dist <- x@dist
if(dist == "normal") {
.plot.Normal.Model(x, dev, ...)
} else if (dist == "normult") {
.plot.Normult.Model(x, dev, ...)
} else if (dist == "exponential") {
.plot.Exponential.Model(x, dev, ...)
} else if (dist == "student") {
.plot.Student.Model(x, dev, ...)
} else if (dist == "studmult") {
.plot.Studmult.Model(x, dev, ...)
} else if (dist %in% c("poisson", "cond.poisson")) {
.plot.Poisson.Model(x, dev, ...)
} else if (dist == "binomial") {
if( abs( max( x@T ) - min( x@T ) ) > 1e-6 ) {
stop("Plotting a binomial distribution with varying
repetitions in slot 'T' is not possible.")
}
.plot.Binomial.Model(x, dev, ...)
}
}
)
setMethod("plotPointProc", signature(x = "model",
dev = "ANY"),
function(x, dev = TRUE, ...)
{
hasPar(x, verbose = TRUE)
hasWeight(x, verbose = TRUE)
if (x@dist == "poisson") {
.plotpointproc.Poisson(x, dev)
}
}
)
## Marginal Mixture ##
setMethod("mixturemar", "model",
function(object, J)
{
.mixturemar.Model(object, J)
}
)
## Show ##
setMethod("show", "model",
function(object)
{
cat("Object 'model'\n")
cat(" class :", class(object), "\n")
cat(" dist :", object@dist, "\n")
cat(" r :", object@r, "\n")
cat(" K :", object@K, "\n")
if (hasPar(object)) {
cat(" par : List of",
length(object@par), "\n")
}
if (!object@indicfix) {
cat(" weight :",
paste(dim(object@weight), collapse = "x"),
"\n")
}
cat(" indicmod :", object@indicmod, "\n")
cat(" indicfix :", object@indicfix, "\n")
if (object@dist == "binomial" && !all(is.na(object@T))) {
cat(" T :",
paste(dim(object@T), collapse = "x"), "\n")
}
}
)
## Getters ##
setMethod( "getDist", "model",
function( object )
{
return( object@dist )
}
)
setMethod( "getR", "model",
function( object )
{
return( object@r )
}
)
setMethod( "getK", "model",
function( object )
{
return( object@K )
}
)
setMethod( "getWeight", "model",
function( object )
{
return( object@weight )
}
)
setMethod( "getPar", "model",
function( object )
{
return( object@par )
}
)
setMethod( "getIndicmod", "model",
function( object )
{
return( object@indicmod )
}
)
setMethod( "getIndicfix", "model",
function( object )
{
return( object@indicfix )
}
)
setMethod( "getT", "model",
function( object )
{
return( object@T )
}
)
## Setters ##
setReplaceMethod( "setDist", "model",
function( object, value )
{
object@dist <- value
.valid.dist.Model( object )
return( object )
}
)
setReplaceMethod( "setR", "model",
function( object, value )
{
object@r <- as.integer( value )
validObject( object )
return( object )
}
)
setReplaceMethod( "setK", "model",
function( object, value )
{
object@K <- as.integer( value )
.valid.K.Model( object )
if ( object@K > 1 ) {
object@weight <- .check.weight.Model( object@K )
} else {
weight <- matrix()
storage.mode( weight ) <- "numeric"
object@weight <- weight
}
return( object )
}
)
setReplaceMethod( "setWeight", "model",
function( object, value )
{
object@weight <- as.matrix( value )
object@K <- ncol( object@weight )
.valid.weight.Model( object )
return( object )
}
)
setReplaceMethod( "setPar", "model",
function( object, value )
{
object@par <- value
.valid.par.Model( object )
return( object )
}
)
setReplaceMethod( "setIndicmod", "model",
function( object, value )
{
object@indicmod <- value
return( object )
}
)
setReplaceMethod( "setIndicfix", "model",
function( object, value )
{
object@indicfix <- value
return( object )
}
)
setReplaceMethod( "setT", "model",
function( object, value )
{
object@T <- matrix( value )
.valid.T.Model( object )
return( object )
}
)
### Private functions
### These functions are not exported
### Checking.
### Checking is used for in the constructor.
### Arguments for the slots are checked for validity and
### if missing are given by default values. Altogether the
### constructor tries to construct a fully specified model
### object with consistent slots.
### Check K: If weights are provided by the user, the number
### of components is set to the number of columns of the weights.
### If argument 'weight' is missing from the call, the number of
### components is assumed to be one.
".check.K.Model" <- function( weight )
{
if ( !all( is.na( weight ) ) ) {
return( NCOL( weight ) )
} else {
return( as.integer( 1 ) )
}
}
### Check r: The dimension of the model is determined in regard to
### the defined distribution in argument 'dist' (if missing the
### default is 'poisson'). For univariate distributions it is set
### to one and for multivariate distribution as a default to two.
".check.r.Model" <- function( dist )
{
univ <- .get.univ.Model()
multiv <- .get.multiv.Model()
if ( dist %in% univ ) {
return( as.integer( 1 ) )
} else if ( dist %in% multiv ) {
return( as.integer( 2 ) )
} else {
stop( paste( "Unknown distribution in slot ",
"'dist' of 'model' object.",
sep = "" ) )
}
}
### Check weight: If argument 'weight' is missing from the call
### equally balanced weights are given as a default.
".check.weight.Model" <- function( K )
{
weight <- matrix( 1 / K, nrow = 1, ncol = K )
return( weight )
}
### Check T: If repetitions are given they are checked in regard
### to validity. In case of non-numeric objects an error is thrown.
### In case of objects of type 'numeric' it is implicitly converted
### to type 'integer'.
".check.T.Model" <- function( T )
{
if ( !all( is.na( T ) ) ) {
if ( !is.numeric( T ) ) {
stop (paste( "Wrong specification of slot 'T' in ",
"'model' object. Repetitions must be of ",
"type 'integer'.", sep = "" ) )
} else {
storage.mode( T ) <- "integer"
return( T )
}
}
}
### Marginal model
".mixturemar.Model" <- function( obj, J )
{
if ( object@dist == "normult" ) {
.mixturemar.normult.Model( obj, J )
} else if ( object@dist == "studmult" ) {
.mixturemar.studmult.Model(obj, J )
} else {
stop( "A marginal distribution can only be obtained from
multivariate distributions." )
}
}
".mixturemar.normult.Model" <- function( obj, J )
{
dist <- ifelse( length( J ) == 1, "normal", "normult" )
r <- length( J )
K <- obj@K
weight <- obj@weight
mu <- obj@par$mu[J, ]
sigma <- obj@par$sigma[J, J, ]
par <- list( mu = mu, sigma = sigma )
indicmod <- "multinomial"
indicfix <- TRUE
margin.model <- .model( dist = dist, r = r, K = K,
weight = weight, par = par,
indicmod = indicmod,
indicfix = indicfix )
validObject( margin.model )
return( margin.model )
}
".mixturemar.studmult.Model" <- function( obj, J )
{
dist <- ifelse( length( J ) == 1, "student", "studmult" )
r <- length( J )
K <- obj@K
weight <- obj@weight
mu <- obj@par$mu[J, ]
sigma <- obj@par$sigma[J, J, ]
df <- obj@par$df
par <- list( mu = mu, sigma = sigma, df = df )
indicmod <- "multinomial"
indicfix <- TRUE
margin.model <- .model( dist = dist, r = r, K = K,
weight = weight, par = par,
indicmod = indicmod,
indicfix = indicfix )
validObject( margin.model )
return( margin.model )
}
### ==============================================================
### Simulate
### --------------------------------------------------------------
### --------------------------------------------------------------
### .simulate.indicators.Model
### @description Simulates the indicators.
### @par obj an S4 object of class 'model'
### @par N an R 'integer' object
### @return an R 'matrix' object with N simulated indi-
### cators.
### @details indicators are simulated via the slot @weight
### the 'model' object
### @see ?simulate
### @author Lars Simon Zehnder
### --------------------------------------------------------------
### TODO: Implement C++ function.
".simulate.indicators.Model" <- function( obj, N )
{
K <- obj@K
if ( K == 1 ) {
S <- matrix(as.integer( 1 ), nrow = N, ncol = K )
} else {
## if (model@indicmod = "") -> "Multinomial"
## if Markov else
if ( obj@indicmod == "multinomial" ) {
rnd <- runif( N )
rnd <- matrix( rnd, nrow = N, ncol = K )
weightm <- matrix( obj@weight, nrow = N, ncol = K,
byrow = TRUE )
S <- apply( ( t( apply( weightm, 1, cumsum ) ) < rnd ), 1, sum ) + 1
S <- matrix( S, nrow = N )
storage.mode( S ) <- "integer"
}
}
return( S )
}
### --------------------------------------------------------------------
### .simulate.data.Model
### @description Simulates the simulation functions for a specific model.
### @par obj an S4 'model' object
### @par N an R 'integer' object; number of simulated values
### @par fdata.obj an S4 'fdata' object
### @return an S4 object of class 'fdata' with simulated values
### @see ?fdata, ?simulate
### @author Lars Simon Zehnder
### ---------------------------------------------------------------------
".simulate.data.Model" <- function( obj, N, fdata.obj )
{
dist <- obj@dist
if ( dist == "poisson" || dist == "cond.poisson" ) {
.simulate.data.poisson.Model(obj, N, fdata.obj)
} else if ( dist == "binomial" ) {
.simulate.data.binomial.Model(obj, N, fdata.obj)
} else if ( dist == "exponential" ) {
.simulate.data.exponential.Model( obj, N, fdata.obj )
} else if ( dist == "normal" ) {
.simulate.data.normal.Model( obj, N, fdata.obj )
} else if ( dist == "student" ) {
.simulate.data.student.Model ( obj, N, fdata.obj )
} else if ( dist == "normult" ) {
.simulate.data.normult.Model( obj, N, fdata.obj )
}
}
### ---------------------------------------------------------------------
### .simulate.data.poisson.Model
### @description Simulates values from a Poisson mixture using pre-
### specified model and indicators
### @par obj an S4 object of class 'model'
### @par N an R 'integer' object; number of simulated values
### @par fdata.obj an S4 object of class 'fdata'
### @return an S4 object of class 'fdata' with simulated values
### @see ?simulate, model:::.simulate.data.Model, ?rpois
### @author Lars Simon Zehnder
### ---------------------------------------------------------------------
".simulate.data.poisson.Model" <- function( obj, N, fdata.obj )
{
fdata.obj@type <- "discrete"
fdata.obj@sim <- TRUE
fdata.obj@y <- matrix( rpois( N, fdata.obj@exp * obj@par$lambda[fdata.obj@S] ) )
return( fdata.obj )
}
### ---------------------------------------------------------------------
### .simulate.data.binomial.Model
### @description Simulates values from a Binomial mixture using pre-
### specified model and indicators
### @par obj an S4 object of class 'model'
### @par N an R 'integer' object; number of simulated values
### @par fdata.obj an S4 object of class 'fdata'
### @return an S4 object of class 'fdata' with simulated values
### @see ?simulate, model:::.simulate.data.Model, ?rbinom
### @author Lars Simon Zehnder
### ---------------------------------------------------------------------
".simulate.data.binomial.Model" <- function( obj, N, fdata.obj )
{
if ( !hasT(fdata.obj ) ) {
fdata.obj@T <- as.matrix( 1 )
}
fdata.obj@type <- "discrete"
fdata.obj@sim <- TRUE
fdata.obj@y <- matrix( rbinom( N, fdata.obj@T, obj@par$p[fdata.obj@S] ) )
return( fdata.obj )
}
### ---------------------------------------------------------------------
### .simulate.data.exponential.Model
### @description Simulates values from an Exponential mixture using
### specified model and indicators.
### @param obj an S4 object of class 'model'
### @param N an R 'integer' object; number of simulated values
### @param fdata.obj an S4 object of class 'fdata'
### @return an S4 object of class 'fdata' with simulated values
### @see ?simulate, model:::.simulate.data.Model, ?rexp
### @author Lars Simon Zehnder
### ---------------------------------------------------------------------
".simulate.data.exponential.Model" <- function( obj, N, fdata.obj )
{
fdata.obj@type <- "continuous"
fdata.obj@sim <- TRUE
fdata.obj@y <- matrix( rexp( N, obj@par$lambda[fdata.obj@S] ) )
return( fdata.obj )
}
### ---------------------------------------------------------------------
### .simulate.data.normal.Model
### @description Simulates values from a Normal mixture using
### specified model and indicators.
### @param obj an S4 object of class 'model'
### @param N an R 'integer' object; number of simulated values
### @param fdata.obj an S4 object of class 'fdata'
### @return an S4 object of class 'fdata' with simulated values
### @see ?simulate, model:::.simulate.data.Model, ?rnorm
### @author Lars Simon Zehnder
### ---------------------------------------------------------------------
".simulate.data.normal.Model" <- function( obj, N, fdata.obj )
{
fdata.obj@type <- "continuous"
fdata.obj@sim <- TRUE
fdata.obj@y <- matrix( rnorm( N, obj@par$mu[fdata.obj@S],
obj@par$sigma[fdata.obj@S] ) )
return( fdata.obj )
}
".simulate.data.student.Model" <- function( obj, N, fdata.obj )
{
fdata.obj@type <- "continuous"
fdata.obj@sim <- TRUE
omega <- rgamma( N, obj@par$df[fdata.obj@S] / 2,
rate = 2 / obj@par$df[fdata.obj@S] )
fdata.obj@y <- as.matrix( obj@par$mu[fdata.obj@S] +
sqrt( obj@par$sigma[fdata.obj@S] / omega ) *
rnorm( N, 0.0, 1.0 ) )
return( fdata.obj )
}
".simulate.data.normult.Model" <- function( obj, N, fdata.obj )
{
fdata.obj@type <- "continuous"
fdata.obj@sim <- TRUE
fdata.obj@y <- matrix( numeric(), nrow = N, ncol = obj@r )
fdata.obj@r <- obj@r
for ( i in 1:N ) {
fdata.obj@y[i, ] <- rmvnorm( 1, mean = obj@par$mu[, fdata.obj@S[i]],
sigma = obj@par$sigma[,, fdata.obj@S[i]],
method = "chol" )
}
return( fdata.obj )
}
### Plotting
### Plot Poisson models: Poisson models are discrete
### models and a barplot is used.
### The range for the x-axis is determined via the
### quantiles of the largest and smallest Poisson model
### in the mixture.
".plot.Poisson.Model" <- function(model.obj, dev, ...)
{
if (.check.grDevice() && dev) {
dev.new(title = "Model plot")
}
lambda <- model.obj@par$lambda
weight <- model.obj@weight
xlim.up <- qpois(.9999, lambda = max(lambda))
xlim.low <- qpois(.0001, lambda = min(lambda))
x.grid <- seq(xlim.low, xlim.up, by = 1)
y.grid <- sapply(x.grid, dpois, lambda = lambda)
y.grid <- weight %*% y.grid
main.title <- paste("Poisson Mixture K = ",
model.obj@K, sep="")
label.grid <- axisTicks(c(xlim.low, xlim.up), log = FALSE,
nint = 10)
bp <- barplot(y.grid, main = main.title, axes = F,
col = "gray65", border = "gray65", ...)
axis(side = 2, cex = .7, cex.axis = .7)
axis(side = 1, tick = FALSE, at = bp[which(x.grid %in% label.grid)],
labels = label.grid, cex.axis = .7)
mtext(side = 1, "x", cex = .7, cex.axis = .7, line = 3)
mtext(side = 2, "P(x)", cex = .7, cex.axis = .7, line = 3)
}
### Plot Binomial models: Binomial models are discrete
### models and line model is used.
### The grid for the x-axis is determined by taking
### the
".plot.Binomial.Model" <- function(model.obj, dev, ...)
{
if (.check.grDevice() && dev) {
dev.new(title = "Model plot")
}
n <- model.obj@T[1]
p <- model.obj@par$p
weight <- model.obj@weight
xlim <- max(n, na.rm = TRUE)
x.grid <- seq(0, xlim, by = 1)
y.grid <- sapply(x.grid, dbinom, size = n, p = p)
y.grid <- weight %*% y.grid
main.title <- paste("Binomial Mixture K = ",
model.obj@K, sep = "")
plot(x.grid, y.grid, main = main.title, type = "h",
xlab = "x", ylab = "P(x)", ...)
points(x.grid, y.grid, pch = 20)
}
".plot.Exponential.Model" <- function(model.obj, dev, ...)
{
if (.check.grDevice() && dev) {
dev.new(title = "Model plot")
}
lambda <- model.obj@par$lambda
weight <- model.obj@weight
min.lambda <- min(lambda, na.rm = TRUE)
xlim <- qexp(.9999, rate = min.lambda)
x.grid <- seq(0, ceiling(xlim), length =
as.integer(100 * lambda^(-2)))
y.grid <- sapply(x.grid, dexp, rate = lambda)
y.grid <- weight %*% y.grid
main.title <- paste("Exponential Mixture K = ",
model.obj@K, sep = "")
plot(x.grid, y.grid, main = main.title, type = "l",
xlab = "x", ylab = "P(x)", ...)
}
".plot.Student.Model" <- function(model.obj, dev, ...)
{
if (.check.grDevice() && dev) {
dev.new(title = "Model plot")
}
mu <- model.obj@par$mu
sigma <- model.obj@par$sigma
df <- model.obj@par$df
weight <- model.obj@weight
max.mu <- max(mu, na.rm = TRUE)
max.sigma <- max(sigma, na.rm = TRUE)
min.df <- min(df, na.rm = TRUE)
xlim <- max.mu + max.sigma * qt(.9999, min.df)
x.grid <- seq(-xlim, xlim, length = 1000) + max.mu
y.grid <- sapply(x.grid, "-", mu)
y.grid <- apply(y.grid, 2, "/", sigma)
y.grid <- apply(y.grid, 2, dt, df = df)
y.grid <- apply(y.grid, 2, "/", sqrt(sigma))
y.grid <- t(weight %*% y.grid)
main.title <- paste("Student-t Mixture K = ",
model.obj@K, sep="")
plot(x.grid, y.grid, main = main.title, type = "l",
xlab = "x", ylab = "P(x)", ...)
}
".plot.Normal.Model" <- function(model.obj, dev, ...)
{
if (.check.grDevice() && dev) {
dev.new(title = "Model Plot")
}
mu <- model.obj@par$mu
sigma <- model.obj@par$sigma
weight <- model.obj@weight
max.mu <- max(mu, na.rm = TRUE)
max.sigma <- max(mu, na.rm = TRUE)
xlim <- qnorm(.9999, mean = max.mu,
sd = max.sigma)
x.grid <- seq(-xlim, xlim, length = 1000) + max.mu
y.grid <- sapply(x.grid, dnorm, mean = mu,
sd = sigma)
y.grid <- weight %*% y.grid
main.title <- paste("Normal Mixture K = ",
model.obj@K, sep = "")
plot(x.grid, y.grid, main = main.title, type = "l",
xlab = "x", ylab = "P(x)", ...)
}
".plot.Normult.Model" <- function(model.obj, dev, ...)
{
K <- model.obj@K
r <- model.obj@r
if (r == 2) {
if (.check.gr.Device() && dev) {
dev.new(title = "Model: Perspective plot")
}
xyz.grid <- .generate.Grid.Normal(model.obj)
main.title = paste("Multivariate Normal Mixture K = ",
K, sep = "")
persp(xyz.grid$x, xyz.grid$y, xyz.grid$z, col = "gray65",
border = "gray47", theta = 55, phi = 30, expand = 0.5,
lphi = 180, ltheta = 90, r = 40, d = 0.1,
ticktype = "detailed", zlab = "P(x)", xlab = "r = 1",
ylab = "r = 2", cex = 0.7, cex.lab = 0.7, cex.axis = 0.7)
} else if (r > 2 && r < 6) {
if (.check.grDevice() && dev) {
dev.new(title = "Model: Contour plots")
}
if (r == 3) {
par(mfrow = c(1, r), mar = c(2, 2, 2, 2),
oma = c(4, 5, 1, 5))
} else if (r == 4) {
par(mfrow = c(2, 3), mar = c(2, 2, 2, 2),
oma = c(4, 5, 1, 5))
} else {
par(mfrow = c(2, 5), mar = c(2, 2, 2, 2),
oma = c(4, 5, 1, 5))
}
for (i in seq(1, r - 1)) {
for (j in seq(1, r)) {
marmodel <- mixturemar(model.obj, J = c(i, j))
xyz.grid <- .generate.Grid.Normal(marmodel)
contour(xyz.grid$x, xyz.grid$y, xyz.grid$z,
col = "gray47", cex = 0.7, cex.axis = 0.7,
xlab = paste("r = ", i, sep = ""),
ylab = paste("r = ", j, sep = ""))
}
}
} else {
stop("Method 'plot' for 'model' objects is not implemented for
model dimensions of r > 5.")
}
}
".plot.Normult.Model" <- function(model.obj, dev, ...)
{
K <- model.obj@K
r <- model.obj@r
if (r == 2) {
if (.check.gr.Device() && dev) {
dev.new(title = "Model: Perspective plot")
}
xyz.grid <- .generate.Grid.Student(model.obj)
main.title = paste("Multivariate Student-t Mixture K = ",
K, sep = "")
persp(xyz.grid$x, xyz.grid$y, xyz.grid$z, col = "gray65",
border = "gray47", theta = 55, phi = 30, expand = 0.5,
lphi = 180, ltheta = 90, r = 40, d = 0.1,
ticktype = "detailed", zlab = "P(x)", xlab = "r = 1",
ylab = "r = 2", cex = 0.7, cex.lab = 0.7, cex.axis = 0.7)
} else if (r > 2 && r < 6) {
if (.check.grDevice() && dev) {
dev.new(title = "Model: Contour plots")
}
if (r == 3) {
par(mfrow = c(1, r), mar = c(2, 2, 2, 2),
oma = c(4, 5, 1, 5))
} else if (r == 4) {
par(mfrow = c(2, 3), mar = c(2, 2, 2, 2),
oma = c(4, 5, 1, 5))
} else {
par(mfrow = c(2, 5), mar = c(2, 2, 2, 2),
oma = c(4, 5, 1, 5))
}
for (i in seq(1, r - 1)) {
for (j in seq(1, r)) {
marmodel <- mixturemar(model.obj, J = c(i, j))
xyz.grid <- .generate.Grid.Student(marmodel)
contour(xyz.grid$x, xyz.grid$y, xyz.grid$z,
col = "gray47", cex = 0.7, cex.axis = 0.7,
xlab = paste("r = ", i, sep = ""),
ylab = paste("r = ", j, sep = ""))
}
}
} else {
stop("Method 'plot' for 'model' objects is not implemented for
model dimensions of r > 5.")
}
}
".generate.Grid.Normal" <- function(model.obj)
{
mu <- model.obj@par$mu
sigma <- model.obj@par$sigma
weight <- model.obj@weight
func <- function(s, t)
{
value <- 0
for (k in seq(1, K)) {
value <- value + weight[k] *
dmvnorm(cbind(s, t), mean = mu[, k],
sigma = sigma[,, k])
}
}
mu.norm <- apply(mu, 2, function(x) sqrt(sum(x^2)))
max.mu.index <- tail(sort(mu.norm, index = TRUE)$ix, 1)
max.mu <- mu[, max.mu.index]
sigma.det <- apply(sigma, 3, det)
max.sigma.index <- tail(sort(sigma.det, index = TRUE)$ix, 1)
max.sigma <- sigma[,, max.sigma.index]
xylim <- qmvnorm(.9999, mean = max.mu,
sigma = max.sigma)$quantile
x.grid <- seq(-xylim, xylim, length = 100)
xy.grid <- cbind(x.grid, x.grid)
xy.grid <- t(apply(xy.grid, 1, "+", max.mu))
z.grid <- outer(xy.grid[, 1], xy.grid[, 2], func)
grid.list <- list(x = xy.grid[, 1], y = y.grid[, 2],
z = z.grid)
return(grid.list)
}
".generate.Grid.Student" <- function(model.obj)
{
mu <- model.obj@par$mu
sigma <- model.obj@par$sigma
df <- model.obj@par$df
weight <- model.obj@weight
func <- function(s, t)
{
value <- 0
for (k in seq(1, K)) {
value <- value + weight[k] *
dmvt(cbind(s, t), delta = mu[, k],
sigma = sigma[,, k], df = df[k])
}
}
mu.norm <- apply(mu, 2, function(x) sqrt(sum(x^2)))
max.mu.index <- tail(sort(mu.norm, index = TRUE)$ix, 1)
max.mu <- mu[, max.mu.index]
sigma.det <- apply(sigma, 3, det)
max.sigma.index <- tail(sort(sigma.det, index = TRUE)$ix, 1)
max.sigma <- sigma[,, max.sigma.index]
min.df <- min(df, na.rm = TRUE)
xylim <- qmvt(.9999, delta = max.mu,
sigma = max.sigma, df = min.df)$quantile
x.grid <- seq(-xylim, xylim, length = 100)
xy.grid <- cbind(x.grid, x.grid)
xy.grid <- t(apply(xy.grid, 1, "+", max.mu))
z.grid <- outer(xy.grid[, 1], xy.grid[, 2], func)
grid.list <- list(x = xy.grid[, 1], y = y.grid[, 2],
z = z.grid)
return(grid.list)
}
### plotPointProc
".plotpointproc.Poisson" <- function(x, dev)
{
K <- x@K
if (.check.grDevice() && dev) {
dev.new(title = "Point Process Representation")
}
if (min(x@par$lambda) < 1) {
lambda <- log(x@par$lambda)
} else {
lambda <- x@par$lambda
}
y.grid <- rep(0, K)
size.grid <- as.vector(x@weight * 4)
col.grid <- gray.colors(K, start = 0.2,
end = 0.5)
plot(lambda, y.grid, pch = 20, col = col.grid,
cex = size.grid, cex.lab = .7, cex.axis = .7,
main = "", ylab = "", xlab = "")
mtext(side = 1, bquote(lambda), cex = .7, cex.lab = .7,
line = 3)
legend.names <- list("", K)
for (k in seq(1, K)) {
legend.names[[k]] <- bquote(lambda[.(k)])
}
legend("topright", legend = do.call(expression, legend.names),
col = col.grid, fill = col.grid)
}
### Has
### Checks if a 'model' object has specified parameters.
".haspar.Model" <- function( obj, verbose )
{
if ( length( obj@par ) > 0 ) {
dist <- obj@dist
if ( dist %in% c( "poisson", "cond.poisson" ) ) {
.haspar.poisson.Model( obj, verbose )
} else if ( dist == "binomial" ) {
.haspar.binomial.Model(obj, verbose)
} else if ( dist == "exponential" ) {
.haspar.exponential.Model( obj, verbose )
} else if ( dist == "normal" ) {
.haspar.normal.Model( obj, verbose )
} else if ( dist == "student" ) {
.haspar.student.Model( obj, verbose )
} else if ( dist == "normult" ) {
.haspar.normult.Model( obj, verbose )
} else if ( dist == "studmult" ) {
.haspar.studmult.Model( obj, verbose )
}
} else {
if ( verbose ) {
stop( paste( "Slot 'par' of 'model' object is ",
"empty.", sep = "" ) )
} else {
return( FALSE )
}
}
}
### -----------------------------------------------------------------
### .haspar.poisson.Mode
### @description Checks if a Poisson model has fully specified
### parameters. If verbose is set to TRUE an error
### is thrown.
### @par obj an S4 object of class 'model'
### @par verbose an object of class 'logical'
### @return either TRUE or FALSE if parameters are fully
### specified or not. In case verbose == FALSE an
### error is thrown.
### -----------------------------------------------------------------
".haspar.poisson.Model" <- function(obj, verbose)
{
if ( length( obj@par ) == 0 ) {
if ( verbose ) {
stop( "Slot @par in 'model' object is empty.",
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !"lambda" %in% names( obj@par ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Binomial models ",
"need a parameter vector named 'lambda'.",
sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( length( obj@par$lambda ) != obj@K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par of ",
"'model' object. Slot @K does not match ",
"dimension of parameters in @par$lambda.",
sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
return( TRUE )
}
}
}
}
### -------------------------------------------------------------------
### .haspar.binomial.Model
### @description Checks if a Binomial model has fully specified
### parameters. If verbose is set to TRUE an error is
### thrown.
### @par obj an S4 object of class 'model'
### @par verbose an object of class 'logical'
### @return either TRUE or FALSE if parameters are fully
### specified or not. In case verbose == TRUE an
### error is thrown.
### -------------------------------------------------------------------
".haspar.binomial.Model" <- function(obj, verbose)
{
if ( length( obj@par ) == 0 ) {
if ( verbose ) {
stop( "Slot @par in 'model' object is empty.",
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ("p" %in% names(obj@par)) {
if ( verbose ) {
stop( paste( "Wring specification of slot @par ",
"in 'model' object. Binomial models ",
"need a parameter named 'p'.", sep = "" ),
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( length( obj@par$p ) != obj@K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par of ",
"'model' object. Slot @K does not ",
"match the dimension of parameters ",
"in @par$p.", sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
return( TRUE )
}
}
}
}
### ------------------------------------------------------------------
### .haspar.exponential.Model
### @description Checks if an Exponential model has fully specified
### parameters. If verbose is set to TRUE an error is
### thrown.
### @param obj an S4 object of class 'model'
### @param verbose an object of class 'logical'
### @return either TRUE or FALSE if parameters are fully specified or
### nor. In case verbose == TRUE an error is thrown .
### ------------------------------------------------------------------
".haspar.exponential.Model" <- function( obj, verbose )
{
if ( length( obj@par ) == 0 ) {
if ( verbose ) {
stop( "Slot @par in 'model' object is empty",
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !"lambda" %in% names( obj@par ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Exponential ",
"models need a parameter named ",
"'lambda'.", sep = "" ),
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( length( obj@par$lambda ) != obj@K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par in ",
"'model' object. Number of Exponential ",
"parameters in @par$lambda must match ",
"number of components in slot @K.",
sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
return ( TRUE )
}
}
}
}
### ------------------------------------------------------------------
### .haspar.normal.Model
### @description Checks if a Normal model has fully specified
### parameters. If verbose is set to TRUE an error is
### thrown.
### @param obj an S4 object of class 'model'
### @param verbose an object of class 'logical'
### @return either TRUE or FALSE if parameters are fully specified or
### not. In case verbose == TRUE an error is thrown .
### ------------------------------------------------------------------
".haspar.normal.Model" <- function( obj, verbose )
{
K <- obj@K
if ( length( obj@par ) == 0 ) {
if ( verbose ) {
stop( "Slot @par in 'model' object is empty.",
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !( "mu" %in% names( obj@par ) ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Normal models ",
"need a mean vector named 'mu'.",
sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( length( obj@par$mu ) != K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Slot @K does ",
"not match dimension of parameter ",
"@par$mu." , sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !( "sigma" %in% names( obj@par ) ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Normal models ",
"need a standard deviation vector ",
"named 'sigma'.", sep = "" ),
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( length( obj@par$sigma ) != K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Slot @K does ",
"not match dimension of parameter ",
"par@$sigma.", sep = "" ), call. = FALSE )
}
} else {
return( TRUE )
}
}
}
}
}
}
### ------------------------------------------------------------------
### .haspar.normal.Model
### @description Checks if a Normal model has fully specified
### parameters. If verbose is set to TRUE an error is
### thrown.
### @param obj an S4 object of class 'model'
### @param verbose an object of class 'logical'
### @return either TRUE or FALSE if parameters are fully specified or
### not. In case verbose == TRUE an error is thrown .
### ------------------------------------------------------------------
".haspar.normult.Model" <- function( obj, verbose )
{
K <- obj@K
if ( length( obj@par ) == 0 ) {
if ( verbose ) {
stop( "Slot @par in 'model' object is empty.",
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !( "mu" %in% names( obj@par ) ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Normal models ",
"need a mean vector named 'mu'.",
sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( ncol( obj@par$mu ) != K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Slot @K does ",
"not match dimension of parameter ",
"@par$mu." , sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !( "sigma" %in% names( obj@par ) ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Normal models ",
"need a standard deviation vector ",
"named 'sigma'.", sep = "" ),
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( dim( obj@par$sigma )[3] != K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Slot @K does ",
"not match dimension of parameter ",
"par@$sigma.", sep = "" ), call. = FALSE )
}
} else {
return( TRUE )
}
}
}
}
}
}
### ------------------------------------------------------------------
### .haspar.student.Model
### @description Checks if a Normal model has fully specified
### parameters. If verbose is set to TRUE an error is
### thrown.
### @param obj an S4 object of class 'model'
### @param verbose an object of class 'logical'
### @return either TRUE or FALSE if parameters are fully specified or
### not. In case verbose == TRUE an error is thrown .
### ------------------------------------------------------------------
".haspar.student.Model" <- function( obj, verbose )
{
K <- obj@K
if ( length( obj@par ) == 0 ) {
if ( verbose ) {
stop( "Slot @par in 'model' object is empty.",
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !( "mu" %in% names( obj@par ) ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Student-t models ",
"need a mean vector named 'mu'.",
sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( length( obj@par$mu ) != K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Slot @K does ",
"not match dimension of parameter ",
"@par$mu." , sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !( "sigma" %in% names( obj@par ) ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Student-t models ",
"need a standard deviation vector ",
"named 'sigma'.", sep = "" ),
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( length( obj@par$sigma ) != K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Slot @K does ",
"not match dimension of parameter ",
"par@$sigma.", sep = "" ), call. = FALSE )
}
} else {
if ( !"df" %in% names( obj@par ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Student-t models ",
"need a vector with degrees of freedom ",
"named 'df'.", sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
return( TRUE )
}
}
}
}
}
}
}
### ------------------------------------------------------------------
### .haspar.student.Model
### @description Checks if a Normal model has fully specified
### parameters. If verbose is set to TRUE an error is
### thrown.
### @param obj an S4 object of class 'model'
### @param verbose an object of class 'logical'
### @return either TRUE or FALSE if parameters are fully specified or
### not. In case verbose == TRUE an error is thrown .
### ------------------------------------------------------------------
".haspar.studmult.Model" <- function( obj, verbose )
{
K <- obj@K
if ( length( obj@par ) == 0 ) {
if ( verbose ) {
stop( "Slot @par in 'model' object is empty.",
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !( "mu" %in% names( obj@par ) ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Student-t models ",
"need a mean vector named 'mu'.",
sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( ncol( obj@par$mu ) != K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Slot @K does ",
"not match dimension of parameter ",
"@par$mu." , sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( !( "sigma" %in% names( obj@par ) ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Student-t models ",
"need a standard deviation vector ",
"named 'sigma'.", sep = "" ),
call. = FALSE )
} else {
return( FALSE )
}
} else {
if ( dim( obj@par$sigma )[3] != K ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Slot @K does ",
"not match dimension of parameter ",
"par@$sigma.", sep = "" ), call. = FALSE )
}
} else {
if ( !"df" %in% names( obj@par ) ) {
if ( verbose ) {
stop( paste( "Wrong specification of slot @par ",
"in 'model' object. Student-t models ",
"need a vector with degrees of freedom ",
"named 'df'.", sep = "" ), call. = FALSE )
} else {
return( FALSE )
}
} else {
return( TRUE )
}
}
}
}
}
}
}
### Validity
### Validity checking of model objects is implemented
### in two versions: an initializing version relying partly
### on warnings and amore restrictive version relying exclusively
### on errors.
### The less restrictive validity check is used in setters and
### and the fully restrictive version in the constructor and later
### usage of model object (e.g. see 'mcmcstart()')
### -----------------------------------------------------------------------------
### .init.valid.Model
### @description Initial validity check for model object
### @par obj a model object
### @return An error in case certain conditions are failed or there are
### inconsistencies.
### @see ?model, ?vignette('finmix'), .init.valid.*, .valid.*
### @author Lars Simon Zehnder
### -----------------------------------------------------------------------------
".init.valid.Model" <- function(obj)
{
.valid.dist.Model(obj)
.init.valid.K.Model(obj)
.init.valid.r.Model(obj)
.init.valid.par.Model(obj)
.init.valid.weight.Model(obj)
.init.valid.T.Model(obj)
}
### -----------------------------------------------------------------------------
### .init.Model
### @description Validity check for model object
### @par obj a model object
### @return An error in case certain conditions are failed or a warning
### if there are inconsistencies.
### @see ?model, ?vignette('finmix'), .init.valid.*, .valid.*
### @author Lars Simon Zehnder
### -----------------------------------------------------------------------------
".valid.Model" <- function(obj)
{
.valid.dist.Model(obj)
.valid.K.Model(obj)
.valid.r.Model(obj)
.valid.par.Model(obj)
.valid.weight.Model(obj)
.valid.T.Model(obj)
}
### ----------------------------------------------------------------------------
### .valid.dist.Model
### @description Initial validity check for the distribution of a finite
### mixture model
### @par obj a model object
### @return An error in case the distribution is unknown.
### @see ?model, ?vignette('finmix')i
### ----------------------------------------------------------------------------
".valid.dist.Model" <- function( obj )
{
dists <- c( "normal", "normult", "exponential",
"student", "studmult", "poisson",
"cond.poisson", "binomial")
indicmod.dists <- c( "multinomial" )
if ( length( obj@dist ) > 0 ) {
if ( !( obj@dist %in% dists ) ) {
stop( paste( "Unknown distribution in slot 'dist' ",
"of 'model' object.", sep = "" ),
call. = FALSE )
} else {
if ( !( obj@indicmod %in% indicmod.dists ) ) {
stop( paste( "Unknown indicator distribution in slot ",
"'indicmod' of 'model' object.", sep = "" ),
call. = FALSE )
}
}
}
}
### ----------------------------------------------------------------------------
### .init.valid.K.Model
### @description Initial validity check for the number of components K of
### a finite mixture model.
### @par obj a model object
### @return An error if the number of components are not a positive
### integer
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------------
".init.valid.K.Model" <- function( obj )
{
if ( obj@K < 1 ) {
stop( paste( "Wrong specification of slot 'K' of ",
"'model' object. Number of components ",
"must be a positive integer.", sep = "" ),
call. = FALSE )
} else {
if ( !all( is.na( obj@weight ) ) ) {
if ( obj@K != ncol( obj@weight ) ) {
stop( paste( "Dimension of slot 'weight' in ",
"'model' object does not match ",
"number of components in slot 'K'.",
sep = "" ),
call. = FALSE )
}
}
.init.valid.par.Model( obj )
}
}
### ----------------------------------------------------------------------------
### .valid.K.Model
### @description Validity check for the number of components K of
### a finite mixture model.
### @par obj a model object
### @return An error if the number of components are not a positive
### integer and a warning if the number of components do not
### match the dimension of the weights.
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------------
".valid.K.Model" <- function( obj )
{
if ( obj@K < 1 ) {
stop( paste( "Wrong specification of slot 'K' of ",
"'model' object. Number of components ",
"must be a positive integer.", sep = "" ),
call. = FALSE )
} else {
if ( !all( is.na( obj@weight ) ) ) {
if ( obj@K != ncol( obj@weight ) ) {
warning( paste( "Dimension of slot 'weight' in ",
"'model' object does not match ",
"number of components in slot 'K'.",
sep = "" ),
call. = FALSE )
}
}
.valid.par.Model( obj )
}
}
### ----------------------------------------------------------------------------
### .init.valid.r.Model
### @description Initial validity check for variable dimension r.
### @par obj a model object
### @return An error in case the variable dimension r is not a positive
### integer or the dimension does not fit the distribution model.
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------------
".init.valid.r.Model" <- function( obj )
{
univ <- .get.univ.Model()
multiv <- .get.multiv.Model()
if ( obj@r < 1 ) {
stop( paste( "Wrong specification of slot 'r' ",
"in 'model' object. Dimension of ",
"variables must be a positive integer.",
sep ="" ),
call. = FALSE )
} else {
if ( ( obj@dist %in% univ ) && obj@r > 1 ) {
stop( paste( "Wrong specification of slot 'r' ",
"in 'model' object. Univariate ",
"distributions can only have one ",
"dimension.", sep = "" ),
call. = FALSE )
} else if ( ( obj@dist %in% multiv ) && obj@r < 2 ) {
stop( paste( "Wrong specification of slot 'r' ",
"in 'model' object. Multivariate ",
"distributions must have dimension ",
"greater one.", sep ="" ),
call. = FALSE )
}
}
}
### ----------------------------------------------------------------------------
### .init.valid.r.Model
### @description Initial validity check for variable dimension r.
### @par obj a model object
### @return An error in case the variable dimension r is not a positive
### integer or a warning if the dimension does not fit the
### distribution model.
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------------
".valid.r.Model" <- function( obj )
{
univ <- .get.univ.Model()
multiv <- .get.multiv.Model()
if ( obj@r < 1 ) {
stop(paste( "Wrong specification of slot 'r' ",
"in 'model' object. Dimension of ",
"variables must be positive.",
sep ="" ),
call. = FALSE )
} else {
if ( ( obj@dist %in% univ ) && obj@r > 1 ) {
stop( paste( "Wrong specification of slot 'r' ",
"in 'model' object. Univariate ",
"distributions can only have one ",
"dimension.", sep = "" ),
call. = FALSE )
} else if ( ( obj@dist %in% multiv ) && obj@r < 2 ) {
stop( paste( "Wrong specification of slot 'r' ",
"in 'model' object. Multivariate ",
"distributions must have dimension ",
"greater one.", sep ="" ),
call. = FALSE )
}
}
}
### ----------------------------------------------------------------------------
### .init.valid.weight.Model
### @description Initial validity check for the weights of a finite mixture
### model.
### @par obj a model object
### @return An error if the dimension of the weight vector does not fit
### the model or if the weights do not sum to 1, are negative or
### larger than one.
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------------
".init.valid.weight.Model" <- function( obj )
{
if ( !all( is.na( obj@weight ) ) ) {
if ( nrow( obj@weight ) > 1 ) {
stop( paste( "Wrong dimension of slot 'weight' in ",
"'model' object. Dimension of slot ",
"'weight' must be 1 x K.", sep = "" ),
call. = FALSE )
} else {
if ( ncol( obj@weight ) != obj@K ) {
stop( paste( "Wrong number of weights in slot 'weight' of ",
"'model' object. Number of weights does not ",
"match number of components in slot 'K'.", sep = "" ),
call. = FALSE )
} else {
if ( is.integer( obj@weight ) ) {
stop( paste( "Wrong specification of slot 'weight' of ",
"'model' object. Weights must be of type ",
"'numeric'.", sep = "" ),
call. = FALSE )
}
if ( !is.numeric( obj@weight ) ) {
stop( paste( "Wrong specification of slot 'weight' of ",
"'model' object. Weights must be of type ",
"'numeric'.", sep = "" ),
call. = FALSE )
}
if ( any( obj@weight <= 0 ) || any( obj@weight >= 1 ) ) {
stop( paste( "Weights in slot 'weight' of 'model' ",
"object must be positive.", sep = "" ),
call. = FALSE )
} else {
if ( round( sum( obj@weight ) ) != 1 ) {
stop( paste( "Weights in slot 'weight' of 'model' ",
"object must sum to one.", sep = "" ),
call. = FALSE )
}
}
}
}
}
}
### ------------------------------------------------------------------------------------
### .valid.weight.Model
### @description Validity check for the weights of a finite mixture model.
### @par obj a model object
### @return An error if the weights are not of type 'numeric' and a warning
### if the weigths do not conform to the number of components K,
### do not sum to one or are not values between 0 and 1.
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### -------------------------------------------------------------------------------------
".valid.weight.Model" <- function( obj )
{
if ( !all( is.na( obj@weight ) ) ) {
if ( nrow( obj@weight ) > 1 ) {
warning( paste( "Wrong dimension of slot 'weight' in ",
"'model' object. Dimension of slot ",
"'weight' must be 1 x K.", sep = "" ),
call. = FALSE )
} else {
if ( ncol( obj@weight ) != obj@K ) {
warning( paste( "Wrong number of weights in slot 'weight' of ",
"'model' object. Number of weights does not ",
"match number of components in slot 'K'.", sep = "" ),
call. = FALSE )
} else {
if ( is.integer(obj@weight ) ) {
stop( paste( "Wrong specification of slot 'weight' of ",
"'model' object. Weights must be of type ",
"'numeric'.", sep = "" ),
call. = FALSE )
}
if ( !is.numeric( obj@weight ) ) {
stop( paste( "Wrong specification of slot 'weight' of ",
"'model' object. Weights must be of type ",
"'numeric'.", sep = "" ),
call. = FALSE )
}
if ( any( obj@weight <= 0 ) || any( obj@weight >= 1 ) ) {
warning( paste( "Weights in slot 'weight' of 'model' ",
"object must be positive.", sep = "" ),
call. = FALSE )
} else {
if ( round( sum( obj@weight ) ) != 1 ) {
warning( paste( "Weights in slot 'weight' of 'model' ",
"object must sum to one.", sep = "" ),
call. = FALSE )
}
}
}
}
}
}
### -------------------------------------------------------------------------------------
### .init.valid.T.Model
### @description Initial validity check for the repetitions of a Binomial mixture.
### @par obj a model object
### @return An error in case the reptitions are not of type integer, have
### the wrong dimension, or non-positive values.
### @see ?model, ?vignette('finmix')
### --------------------------------------------------------------------------------------
".init.valid.T.Model" <- function( obj )
{
if ( !all( is.na( obj@T ) ) ) {
if ( !is.integer( obj@T ) ) {
stop( paste( "Wrong type of slot 'T' in 'model' object ",
"Repetitions must be of type 'integer'.",
sep = "" ),
call. = FALSE )
}
if ( nrow( obj@T ) > 1 && ncol( obj@T ) > 1 ) {
stop( paste( "Wrong dimension of slot 'T' in 'model' ",
"object. Repetitions can only be ",
"one-dimensional", sep = "" ),
call. = FALSE )
}
if ( any( obj@T < 1 ) ) {
stop( paste( "Wrong specification of slot 'T' in 'model' ",
"object. Repetitions must be positive integers ",
"or NA.", sep = "" ),
call. = FALSE)
}
}
}
### -------------------------------------------------------------------------------------
### .valid.T.Model
### @description Validity check for the repetitions of a Binomial mixture.
### @par obj a model object
### @return An error in case the reptitions are not of type integer, have
### the wrong dimension, or non-positive values.
### @see ?model, ?vignette('finmix')
### --------------------------------------------------------------------------------------
".valid.T.Model" <- function( obj )
{
if ( !all( is.na( obj@T ) ) ) {
if ( !is.integer( obj@T ) ) {
stop( paste( "Wrong type of slot 'T' in 'model' object ",
"Repetitions must be of type 'integer'.",
sep = "" ),
call. = FALSE )
}
if ( nrow( obj@T ) > 1 && ncol( obj@T ) > 1 ) {
stop( paste( "Wrong dimension of slot 'T' in 'model' ",
"object. Repetitions can only be ",
"one-dimensional", sep = "" ),
call. = FALSE )
}
if ( any( obj@T < 1 ) ) {
stop( paste( "Wrong specification of slot 'T' in 'model' ",
"object. Repetitions must be positive integers ",
"or NA.", sep = "" ),
call. = FALSE)
}
}
}
### -------------------------------------------------------------------------------
### .init.valid.par.Model
### @description Initial validity check of model parameters
### @par obj a model object
### @return An error if parameters fail certain conditions
### @detail This validity check is called in the S4 constructor
### 'model()' and ensures that the user constructs an inherently
### consistent model object.
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### --------------------------------------------------------------------------------
".init.valid.par.Model" <- function( obj )
{
dist <- obj@dist
if ( length( obj@par ) > 0) {
if ( dist %in% c( "poisson", "cond.poisson" ) ) {
.init.valid.Poisson.Model( obj )
} else if ( dist == "binomial" ) {
.init.valid.Binomial.Model( obj )
} else if ( dist == "normal" ) {
.init.valid.Normal.Model( obj )
} else if ( dist == "normult" ) {
.init.valid.Normult.Model( obj )
} else if ( dist == "student" ) {
.init.valid.Student.Model( obj )
} else if ( dist == "studmult" ) {
.init.valid.Studmult.Model( obj )
}
}
}
### -------------------------------------------------------------------------------
### .valid.par.Model
### @description Validity check of model parameters
### @par obj a model object
### @return An error if parameters fail certain necessary conditions and
### a warning if parameters fail consistency.
### @detail This validity check is called in the setters to ensure that
### slots can be changed without errors but help the user to
### end up with an inherently consistent model object.
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### --------------------------------------------------------------------------------
".valid.par.Model" <- function(obj)
{
dist <- obj@dist
if ( length( obj@par ) > 0 ) {
if ( dist %in% c( "poisson", "cond.poisson" ) ) {
.valid.Poisson.Model( obj )
} else if ( dist == "binomial" ) {
.valid.Binomial.Model( obj )
} else if ( dist == "exponential" ) {
.valid.Exponential.Model( obj )
} else if ( dist == "normal" ) {
.valid.Normal.Model( obj )
} else if ( dist == "normult" ) {
.valid.Normult.Model( obj )
} else if ( dist == "student" ) {
.valid.Student.Model( obj )
} else if ( dist == "studmult" ) {
.valid.Studmult.Model( obj )
}
}
}
### -----------------------------------------------------------------------------
### .init.valid.Poisson.Model
### @description Initial validity check for parameters of a Poisson mixture.
### @par obj a model object
### @return An error if parameters fail certain conditions.
### @detail This initial validity check is called in the S4 constructor
### 'model()' and ensures that the user constructs an inherently
### consistent model object.
### The parameter list must contain an element 'lambda' that is
### an 1 x K array, vector or matrix with numeric or integer values
### all positive.
### @see ?model
### @author Lars Simon Zehnder
### -------------------------------------------------------------------------------
".init.valid.Poisson.Model" <- function( obj )
{
if ( length( obj@par ) > 0 ) {
if ( "lambda" %in% names( obj@par ) ) {
if ( !is.array( obj@par$lambda ) && !is.vector( obj@par$lambda ) &&
!is.matrix( obj@par$lambda ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Poisson parameters must be either an ",
"array, a vector or a matrix of dimension ",
"1 x K.", sep = "" ),
call. = FALSE )
}
obj@par$lambda <- as.vector( obj@par$lambda )
if ( !is.numeric( obj@par$lambda ) && !is.integer( obj@par$lambda ) ) {
stop( paste( "Wrong specification in slot 'par' of 'model' object. ",
"Parameters must be of type 'numeric' or 'integer'.",
sep = "" ),
call. = FALSE )
}
if ( length( obj@par$lambda ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"lambda must be either an array, a vector ",
"or a matrix of dimension 1 x K.", sep = "" ),
call. = FALSE )
} else {
if ( any( obj@par$lambda <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Poisson parameters ",
"must be positive.", sep = "" ),
call. = FALSE )
}
}
} else {
warning( paste( "Wrong specification of slot 'par' in 'model' object. ",
"Poisson parameters must be named 'lambda'.", sep = ""),
call. = FALSE )
}
}
}
### -----------------------------------------------------------------------------
### .valid.Poisson.Model
### @description Validity check for parameters of a Poisson mixture.
### @par obj a model object
### @return An error if parameters do fail certain necessary conditions.
### A warning if parameters do fail consistency.
### @detail This validity check is called in the setters to ensure that
### slots can be changed without errors but help the user to
### get a inherently consistent model object.
### The parameter list must contain an element 'lambda' that is
### an 1 x K array, vector or matrix with numeric or integer values
### all positive.
### @see $model
### @author Lars Simon Zehnder
### -----------------------------------------------------------------------------
".valid.Poisson.Model" <- function( obj )
{
if ( length( par) > 0 ) {
if ( "lambda" %in% names( obj@par ) ) {
if ( !is.array( obj@par$lambda ) && !is.vector( obj@par$lambda ) &&
!is.matrix( obj@par$lambda ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Poisson parameters must be either an ",
"array, a vector or a matrix of dimension ",
"1 x K.", sep = "" ),
call. = FALSE )
}
obj@par$lambda <- as.vector( obj@par$lambda )
if ( !is.numeric( obj@par$lambda ) && !is.integer( obj@par$lambda ) ) {
stop( paste( "Wrong specification in slot 'par' of 'model' object. ",
"Parameters must be of type 'numeric' or 'integer'.",
sep = "" ),
call. = FALSE )
}
if ( length( obj@par$lambda ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"lambda must be either an array, a vector ",
"or a matrix of dimension 1 x K.", sep = "" ),
call. = FALSE )
} else {
if ( any( obj@par$lambda <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Poisson parameters ",
"must be positive.", sep = "" ),
call. = FALSE )
}
}
} else {
stop( paste( "Wrong specification of slot 'par' in 'model' object. ",
"Poisson parameters must be named 'lambda'.", sep = "" ),
call. = FALSE )
}
}
}
### ------------------------------------------------------------------------------
### .init.valid.Binomial.Model
### @description Initial validity check for parameters of a Binomial mixture.
### @par obj a model object
### @return An error if parameters fail certain conditions
### @detail This initial validity check is called in the S4 constructor
### 'model()' and ensures that the user constructs an inherently
### consistent model object.
### The parameter list must contain an 1 x K array, vector, or
### matrix with probabilities, all between 0 and 1.
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ------------------------------------------------------------------------------
".init.valid.Binomial.Model" <- function(model.obj)
{
if ( length( obj@par ) ) {
if ( !"p" %in% names( obj@par ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Binomial mixtures need a ",
"probability vector named 'p'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$p ) && !is.vector( obj@par$p ) &&
!is.matrix( obj@par$p ) ) {
stop( paste( "Wrong specification of slot @par: ",
"p must be either an array, a vector ",
"or a matrix of dimension 1 x K", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( obj@par$p ) || is.integer( obj@par$p ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be either of type ,",
"'numeric' or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$p ) != obj@K ) {
stop( paste( "Wrong specification of slot @par: ",
"p must be an array, a vector ",
"or a matrix of dimension 1 x K", sep = "" ),
call. = FALSE )
} else if ( !all( obj@par$p > 0 && obj@par$p < 1 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Binomial parameters must be all ",
"between 0 and 1.", sep = "" ),
call. = FALSE )
}
}
if (dim(model.obj@T)[1] > 1 && dim(model.obj@T)[2] > 1) {
stop(paste("Dimensions of repetitions 'T' for binomial mixture",
"model do not match conditions. Only one-dimensional",
"repetitions can be used in a binomial mixture model."), sep ="")
}
}
### ------------------------------------------------------------------------------
### .valid.Binomial.Model
### @description Validity check for parameters of a Binomial mixture.
### @par obj a model object
### @return An error if parameters fail certain necessary conditions and
### a warning if parameters fail consistency
### @detail This validity check is called in the setters to ensure that
##ä slots can be changed without errors but help the user to
### end up with an inherently consistent model object.
### The parameter list must contain an 1 x K array, vector, or
### matrix with probabilities, all between 0 and 1.
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ------------------------------------------------------------------------------
".valid.Binomial.Model" <- function(model.obj)
{
if ( length( obj@par ) ) {
if ( !"p" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"Binomial mixtures need a ",
"probability vector named 'p'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$p ) && !is.vector( obj@par$p ) &&
!is.matrix( obj@par$p ) ) {
stop( paste( "Wrong specification of slot @par: ",
"p must be either an array, a vector ",
"or a matrix of dimension 1 x K", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( obj@par$p ) || is.integer( obj@par$p ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be either of type ,",
"'numeric' or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$p ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"p must be an array, a vector ",
"or a matrix of dimension 1 x K", sep = "" ),
call. = FALSE )
} else if ( !all( obj@par$p > 0 && obj@par$p < 1 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Binomial parameters must be all ",
"between 0 and 1.", sep = "" ),
call. = FALSE )
}
}
if (dim(model.obj@T)[1] > 1 && dim(model.obj@T)[2] > 1) {
stop(paste("Dimensions of repetitions 'T' for binomial mixture",
"model do not match conditions. Only one-dimensional",
"repetitions can be used in a binomial mixture model."), sep ="")
}
}
### -----------------------------------------------------------------------------
### .init.valid.Exponential.Model
### @description Initial validity check for parameters of a Exponential
### mixture.
### @par obj a model object
### @return An error if parameters fail certain conditions.
### @detail This initial validity check is called in the S4 constructor
### 'model()' and ensures that the user constructs an inherently
### consistent model object.
### The parameter list must contain an element 'lambda' that is
### an 1 x K array, vector or matrix with numeric or integer values
### all positive.
### @see ?model
### @author Lars Simon Zehnder
### -------------------------------------------------------------------------------
".init.valid.Exponential.Model" <- function( obj )
{
if ( length( obj@par ) > 0 ) {
if ( "lambda" %in% names( obj@par ) ) {
if ( !is.array( obj@par$lambda ) && !is.vector( obj@par$lambda ) &&
!is.matrix( obj@par$lambda ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Exponential parameters must be either an ",
"array, a vector or a matrix of dimension ",
"1 x K.", sep = "" ),
call. = FALSE )
}
obj@par$lambda <- as.vector( obj@par$lambda )
if ( !is.numeric( obj@par$lambda ) && !is.integer( obj@par$lambda ) ) {
stop( paste( "Wrong specification in slot 'par' of 'model' object. ",
"Parameters must be of type 'numeric' or 'integer'.",
sep = "" ),
call. = FALSE )
}
if ( length( obj@par$lambda ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"lambda must be either an array, a vector ",
"or a matrix of dimension 1 x K.", sep = "" ),
call. = FALSE )
} else {
if ( any( obj@par$lambda <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Exponential parameters ",
"must be positive.", sep = "" ),
call. = FALSE )
}
}
} else {
warning( paste( "Wrong specification of slot 'par' in 'model' object. ",
"Exponential parameters must be named 'lambda'.", sep = ""),
call. = FALSE )
}
}
}
### -----------------------------------------------------------------------------
### .valid.Exponential.Model
### @description Validity check for parameters of a Exponential mixture.
### @par obj a model object
### @return An error if parameters do fail certain necessary conditions.
### A warning if parameters do fail consistency.
### @detail This validity check is called in the setters to ensure that
### slots can be changed without errors but help the user to
### get a inherently consistent model object.
### The parameter list must contain an element 'lambda' that is
### an 1 x K array, vector or matrix with numeric or integer values
### all positive.
### @see $model
### @author Lars Simon Zehnder
### -----------------------------------------------------------------------------
".valid.Exponential.Model"<- function( obj )
{
if ( length( par) > 0 ) {
if ( "lambda" %in% names( obj@par ) ) {
if ( !is.array( obj@par$lambda ) && !is.vector( obj@par$lambda ) &&
!is.matrix( obj@par$lambda ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Exponential parameters must be either an ",
"array, a vector or a matrix of dimension ",
"1 x K.", sep = "" ),
call. = FALSE )
}
obj@par$lambda <- as.vector( obj@par$lambda )
if ( !is.numeric( obj@par$lambda ) && !is.integer( obj@par$lambda ) ) {
stop( paste( "Wrong specification in slot 'par' of 'model' object. ",
"parameters must be of type 'numeric' or 'integer'.",
sep = "" ),
call. = FALSE )
}
if ( length( obj@par$lambda ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"lambda must be either an array, a vector ",
"or a matrix of dimension 1 x K.", sep = "" ),
call. = FALSE )
} else {
if ( any( obj@par$lambda <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Exponential parameters ",
"must be positive.", sep = "" ),
call. = FALSE )
}
}
} else {
stop( paste( "Wrong specification of slot 'par' in 'model' object. ",
"Exponential parameters must be named 'lambda'.", sep = "" ),
call. = FALSE )
}
}
}
### ------------------------------------------------------------------------------
### .init.valid.Normal.Model
### @description Initial validity check for parameters of a univariate
### Normal mixture.
### @par obj a model object
### @return An error if parameters fail certain conditions
### @detail This initial validity check is called in the S4 constructor
### 'model()' and ensures that the user constructs an inherently
### consistent model object.
### The parameter list must contain the following elements:
### mu: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values
### sigma: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values, all positive
### df: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values, all positive
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### -------------------------------------------------------------------------------
".init.valid.Normal.Model" <- function( obj )
{
if ( length( obj@par ) > 0 ) {
if ( !"mu" %in% names( obj@par ) ) {
stop( paste( "Wrong specification of slot @par: ",
"univariate Normal mixtures need ",
"a mean matrix named 'mu'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$mu ) && !is.vector( obj@par$mu ) &&
!is.matrix( obj@par$mu ) ) {
stop( paste( "Wrong specification of slot @par: ",
"mu must be either an array, a vector ",
"or a matrix of dimension 1 x K. ", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$mu ) ) ||
is.integer( as.vector( obj@par$mu ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( length( obj@par$mu ) != obj@K ) {
stop( paste( "Wrong specification of slot @par: ",
"mu must be a matrix of dimension 1 x K ",
"or a vector of size K.", sep = "" ),
call. = FALSE )
}
if ( !"sigma" %in% names( obj@par ) ) {
stop( paste( "Wrong specification of slot @par: ",
"univariate Normal mictures need ",
"a variance vector named ",
"'sigma'", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$sigma ) ) ||
is.integer( as.vector( obj@par$sigma ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( any( obj@par$sigma <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must contain variances, all ",
"positive.", sep = "" ),
.call = FALSE )
} else if ( !is.array( obj@par$sigma ) && !is.vector( obj@par$sigma ) &&
!is.matrix( obj@par$sigma ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must be either an array, a vector, ",
"or a matrix of dimension 1 x K.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$sigma ) != obj@K ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must be either an array, a vector, ",
"or a matrix, ",
"or a matrix of dimension ",
"1 x K.", sep = "" ),
call. = FALSE )
}
}
}
### ------------------------------------------------------------------------------
### .valid.Normal.Model
### @description Validity check for parameters of a univariate Normal
### mixture.
### @par obj a model object
### @return An error if parameters fail certain necessary conditions and
### a warning if parameters fail consistency.
### @detail This validity check is called in the setters to ensure that
### slots can be changed without errors but help the user to
### end up with an inherently consistent model object.
### The parameter list must contain the following elements:
### mu: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values
### sigma: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values, all positive
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### -------------------------------------------------------------------------------
".valid.Normal.Model" <- function( obj )
{
if ( length( obj@par ) > 0 ) {
if ( !"mu" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"univariate Normal mixtures need ",
"a mean matrix named 'mu'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$mu ) && !is.vector( obj@par$mu ) &&
!is.matrix( obj@par$mu ) ) {
warning( paste( "Wrong specification of slot @par: ",
"mu must be either an array, a vector ",
"or a matrix of dimension 1 x K. ", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$mu ) ) ||
is.integer( as.vector( obj@par$mu ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( length( obj@par$mu ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"mu must be a matrix of dimension 1 x K ",
"or a vector of size K.", sep = "" ),
call. = FALSE )
}
if ( !"sigma" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"univariate Normal mictures need ",
"a variance vector named ",
"'sigma'", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$sigma ) ) ||
is.integer( as.vector( obj@par$sigma ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( any( obj@par$sigma <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must contain variances, all ",
"positive.", sep = "" ),
.call = FALSE )
} else if ( !is.array( obj@par$sigma ) && !is.matrix( obj@par$sigma ) &&
!is.matrix( obj@par$sigma ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must be either an array, a vector, ",
"or a matrix of dimension 1 x K.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$sigma ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must be either an array, a vector, ",
"or a matrix, ",
"or a matrix of dimension ",
"1 x K.", sep = "" ),
call. = FALSE )
}
}
}
### ----------------------------------------------------------------------------
### .init.valid.Normult.Model
### @description Initial validity check for parameters of a multivariate
### Normal mixture.
### @par obj a model object
### @return An error if parameters fail certain conditions
### @detail This initial validity check is called in the S4 constructor
### 'model()' and ensures that the user constructs an inherently
### consistent model object.
### The parameter list must contain the foillowing elements:
### mu: an r x K matrix containing 'numeric' or
### 'integer' values
### sigma: am r x r x K array containing 'numeric' or
### 'integer' matrices, all symmetric/positive
### definite
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------------
".init.valid.Normult.Model" <- function( obj )
{
if ( length( obj@par ) > 0 ) {
if ( !"mu" %in% names( obj@par ) ) {
stop( paste( "Wrong specification of slot @par: ",
"multivariate Normal mixtures need ",
"a mean matrix named 'mu'.", sep = "" ),
call. = FALSE )
} else if ( !is.matrix( obj@par$mu ) ) {
stop( paste( "Wrong specification of slot @par: ",
"mu is not a matrix. ", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( obj@par$mu ) || is.numeric( obj@par$mu ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be of type 'numeric ",
"or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( !identical( dim( obj@par$mu ), c( obj@r, obj@K ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"mu must be a matrix of dimension r x K.", sep = "" ),
call. = FALSE )
}
if ( !"sigma" %in% names( obj@par ) ) {
stop( paste( "Wrong specification of slot @par: ",
"multivariate Normal mixtures need ",
"a variance-covariance array named ",
"'sigma'", sep = "" ),
call. = FALSE )
} else if ( !( is.numeric( obj@par$sigma ) || is.integer( obj@par$mu ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be of type 'numeric' ",
"or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$sigma ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma is not an array.", sep = "" ),
call. = FALSE )
} else if ( !all( apply( obj@par$sigma, 3, isSymmetric ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must contain K symmetric ",
"r x r matrices.", sep = "" ),
call. = FALSE )
} else if ( !all( apply( obj@par$sigma, 3, function( x ) { all( eigen( x )$values > 0 ) } ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must contain K positive definite ",
"r x r matrices.", sep = "" ),
call. = FALSE )
} else if ( !identical( dim( obj@par$sigma ), c( obj@r, obj@r, obj@K ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must be an array of dimension ",
"r x r x K.", sep = "" ),
call. = FALSE )
}
}
}
### ----------------------------------------------------------------------------
### .valid.Normult.Model
### @description Initial validity check for parameters of a multivariate
### Normal mixture.
### @par obj a model object
### @return An error if parameters fail necessary conditions and
### a warning if parameters fail consistency
### @detail This validity check is called in the setters to ensure that
### slots can be changed without errors but help the user to
### end up with an inherently consistent model object.
### The parameter list must contain the foillowing elements:
### mu: an r x K matrix containing 'numeric' or
### 'integer' values
### sigma: am r x r x K array containing 'numeric' or
### 'integer' matrices, all symmetric/positive
### definite
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------------
".valid.Normult.Model" <- function( obj )
{
if ( length( obj@par ) > 0 ) {
if ( !"mu" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"multivariate Normal mixtures need ",
"a mean matrix named 'mu'.", sep = "" ),
call. = FALSE )
} else if ( !is.matrix( obj@par$mu ) ) {
warning( paste( "Wrong specification of slot @par: ",
"mu is not a matrix. ", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( obj@par$mu ) || is.numeric( obj@par$mu ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be of type 'numeric ",
"or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( !identical( dim( obj@par$mu ), c( obj@r, obj@K ) ) ) {
warning( paste( "Wrong specification of slot @par: ",
"mu must be a matrix of dimension r x K.", sep = "" ),
call. = FALSE )
}
if ( !"sigma" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"multivariate Normal mixtures need ",
"a variance-covariance array named ",
"'sigma'", sep = "" ),
call. = FALSE )
} else if ( !( is.numeric( obj@par$sigma ) || is.integer( obj@par$mu ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be of type 'numeric' ",
"or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$sigma ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma is not an array.", sep = "" ),
call. = FALSE )
} else if ( !all( apply( obj@par$sigma, 3, isSymmetric ) ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must contain K symmetric ",
"r x r matrices.", sep = "" ),
call. = FALSE )
} else if ( !all( apply( obj@par$sigma, 3, function( x ) { all( eigen( x )$values > 0 ) } ) ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must contain K positive definite ",
"r x r matrices.", sep = "" ),
call. = FALSE )
} else if ( !identical( dim( obj@par$sigma ), c( obj@r, obj@r, obj@K ) ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must be an array of dimension ",
"r x r x K.", sep = "" ),
call. = FALSE )
}
}
}
### ------------------------------------------------------------------------------
### .init.valid.Student.Model
### @description Initial validity check for parameters of a univariate
### Student-t mixture.
### @par obj a model object
### @return An error if parameters fail certain conditions
### @detail This initial validity check is called in the S4 constructor
### 'model()' and ensures that the user constructs an inherently
### consistent model object.
### The parameter list must contain the following elements:
### mu: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values
### sigma: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values, all positive
### df: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values, all positive
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### -------------------------------------------------------------------------------
".init.valid.Student.Model" <- function( obj )
{
if ( length( obj@par ) > 0 ) {
if ( !"mu" %in% names( obj@par ) ) {
stop( paste( "Wrong specification of slot @par: ",
"univariate Normal mixtures need ",
"a mean matrix named 'mu'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$mu ) && !is.vector( obj@par$mu ) &&
!is.matrix( obj@par$mu ) ) {
stop( paste( "Wrong specification of slot @par: ",
"mu must be either an array, a vector ",
"or a matrix of dimension 1 x K. ", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$mu ) ) ||
is.integer( as.vector( obj@par$mu ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( length( obj@par$mu ) != obj@K ) {
stop( paste( "Wrong specification of slot @par: ",
"mu must be a matrix of dimension 1 x K ",
"or a vector of size K.", sep = "" ),
call. = FALSE )
}
if ( !"sigma" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"univariate Normal mictures need ",
"a variance vector named ",
"'sigma'", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$sigma ) ) ||
is.integer( as.vector( obj@par$sigma ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( any( obj@par$sigma <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must contain variances, all ",
"positive.", sep = "" ),
.call = FALSE )
} else if ( !is.array( obj@par$sigma ) && is.vector( obj@par$sigma ) &&
is.matrix( obj@par$sigma ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must be either an array, a vector, ",
"or a matrix of dimension 1 x K.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$sigma ) != obj@K ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must be either an array, a vector, ",
"or a matrix, ",
"or a matrix of dimension ",
"1 x K.", sep = "" ),
call. = FALSE )
}
if ( !"df" %in% names( obj@par ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Student-t mixtures need a degree of ",
"freedom vector.", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$df ) ) ||
is.integer( as.vector( obj@par$df ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( any( obj@par$df <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Degrees of freedom must be all positive.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$df ) != obj@K ) {
stop( paste( "Wrong specification of slot @par: ",
"df must be a vector or matrix of ",
"dimension 1 x K", sep = "" ),
call. = FALSE )
}
}
}
### ------------------------------------------------------------------------------
### .valid.Student.Model
### @description Validity check for parameters of a univariate Student-t
### mixture.
### @par obj a model object
### @return An error if parameters fail certain necessary conditions and
### a warning if parameters fail consistency.
### @detail This validity check is called in the setters to ensure that
### slots can be changed without errors but help the user to
### end up with an inherently consistent model object.
### The parameter list must contain the following elements:
### mu: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values
### sigma: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values, all positive
### df: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values, all positive
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### -------------------------------------------------------------------------------
".valid.Student.Model" <- function( obj )
{
if ( length( obj@par ) > 0 ) {
if ( !"mu" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"univariate Normal mixtures need ",
"a mean matrix named 'mu'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$mu ) && !is.vector( obj@par$mu ) &&
!is.matrix( obj@par$mu ) ) {
warning( paste( "Wrong specification of slot @par: ",
"mu must be either an array, a vector ",
"or a matrix of dimension 1 x K. ", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$mu ) ) ||
is.integer( as.vector( obj@par$mu ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( length( obj@par$mu ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"mu must be a matrix of dimension 1 x K ",
"or a vector of size K.", sep = "" ),
call. = FALSE )
}
if ( !"sigma" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"univariate Normal mictures need ",
"a variance vector named ",
"'sigma'", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$sigma ) ) ||
is.integer( as.vector( obj@par$sigma ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( any( obj@par$sigma <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must contain variances, all ",
"positive.", sep = "" ),
.call = FALSE )
} else if ( is.array( obj@par$sigma ) && is.vector( obj@par$sigma ) &&
is.matrix( obj@par$sigma ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must be either an array, a vector, ",
"or a matrix of dimension 1 x K.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$sigma ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must be either an array, a vector, ",
"or a matrix, ",
"or a matrix of dimension ",
"1 x K.", sep = "" ),
call. = FALSE )
}
if ( !"df" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"Student-t mixtures need a degree of ",
"freedom vector.", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$df ) ) ||
is.integer( as.vector( obj@par$df ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( any( obj@par$df <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Degrees of freedom must be all positive.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$df ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"df must be a vector or matrix of ",
"dimension 1 x K", sep = "" ),
call. = FALSE )
}
}
}
### ----------------------------------------------------------------------------
### .init.valid.Studmult.Model
### @description Initial validity check for parameters of a multivariate
### Student-t mixture.
### @par obj a model object
### @return An error if parameters fail certain conditions
### @detail This initial validity check is called in the S4 constructor
### 'model()' and ensures that the user constructs an inherently
### consistent model object.
### The parameter list must contain the foillowing elements:
### mu: an r x K matrix containing 'numeric' or
### 'integer' values
### sigma: an r x r x K array containing 'numeric' or
### 'integer' matrices, all symmetric/positive
### definite
### df: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer', all positive
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------------
".init.valid.Studmult.Model" <- function( obj )
{
if ( length( obj@par) > 0 ) {
if ( !"mu" %in% names( obj@par ) ) {
stop( paste( "Wrong specification of slot @par: ",
"multivariate Student-t mixtures need ",
"a mean matrix named 'mu'.", sep = "" ),
call. = FALSE )
} else if ( !is.matrix( obj@par$mu ) ) {
stop( paste( "Wrong specification of slot @par: ",
"mu is not a matrix. ", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( obj@par$mu ) || is.numeric( obj@par$mu ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be of type 'numeric ",
"or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( !identical( dim( obj@par$mu ), c( obj@r, obj@K ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"mu must be a matrix of dimension r x K.", sep = "" ),
call. = FALSE )
}
if ( !"sigma" %in% names( obj@par ) ) {
stop( paste( "Wrong specification of slot @par: ",
"multivariate Student-t mictures need ",
"a variance-covariance array named ",
"'sigma'", sep = "" ),
call. = FALSE )
} else if ( !( is.numeric( obj@par$sigma ) || is.integer( obj@par$mu ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be of type 'numeric' ",
"or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$sigma ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma is not an array.", sep = "" ),
call. = FALSE )
} else if ( !all( apply( obj@par$sigma, 3, isSymmetric ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must contain K symmetric ",
"r x r matrices.", sep = "" ),
call. = FALSE )
} else if ( !all( apply( obj@par$sigma, 3, function( x ) { all( eigen( x )$values > 0 ) } ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must contain K positive definite ",
"r x r matrices.", sep = "" ),
call. = FALSE )
} else if ( !identical( dim( obj@par$sigma ), c( obj@r, obj@r, obj@K ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"sigma must be an array of dimension ",
"r x r x K.", sep = "" ),
call. = FALSE )
}
if ( !"df" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"Student-t mixtures need a degree of ",
"freedom vector.", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$df ) ) ||
is.integer( as.vector( obj@par$df ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( any( obj@par$df <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Degrees of freedom must be all positive.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$df ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"df must be a vector or matrix of ",
"dimension 1 x K", sep = "" ),
call. = FALSE )
}
}
}
### ----------------------------------------------------------------------------
### .valid.Studmult.Model
### @description Initial validity check for parameters of a multivariate
### Student-t mixture.
### @par obj a model object
### @return An error if parameters fail necessary conditions and
### a warning if parameters fail consistency
### @detail This validity check is called in the setters to ensure that
### slots can be changed without errors but help the user to
### end up with an inherently consistent model object.
### The parameter list must contain the foillowing elements:
### mu: an r x K matrix containing 'numeric' or
### 'integer' values
### sigma: am r x r x K array containing 'numeric' or
### 'integer' matrices, all symmetric/positive
### definite
### df: an 1 x K array, vector or matrix containing
### 'numeric' or 'integer' values, all positive
### @see ?model, ?vignette('finmix')
### @author Lars Simon Zehnder
### ----------------------------------------------------------------------------
".valid.Studmult.Model" <- function( obj )
{
if ( length( obj@par ) > 0 ) {
if ( !"mu" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"multivariate Student-t mixtures need ",
"a mean matrix named 'mu'.", sep = "" ),
call. = FALSE )
} else if ( !is.matrix( obj@par$mu ) ) {
warning( paste( "Wrong specification of slot @par: ",
"mu is not a matrix. ", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( obj@par$mu ) || is.numeric( obj@par$mu ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be of type 'numeric ",
"or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( !identical( dim( obj@par$mu ), c( obj@r, obj@K ) ) ) {
warning( paste( "Wrong specification of slot @par: ",
"mu must be a matrix of dimension r x K.", sep = "" ),
call. = FALSE )
}
if ( !"sigma" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"multivariate Student-t mictures need ",
"a variance-covariance array named ",
"'sigma'", sep = "" ),
call. = FALSE )
} else if ( !( is.numeric( obj@par$sigma ) || is.integer( obj@par$mu ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"parameters must be of type 'numeric' ",
"or 'integer'.", sep = "" ),
call. = FALSE )
} else if ( !is.array( obj@par$sigma ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma is not an array.", sep = "" ),
call. = FALSE )
} else if ( !all( apply( obj@par$sigma, 3, isSymmetric ) ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must contain K symmetric ",
"r x r matrices.", sep = "" ),
call. = FALSE )
} else if ( !all( apply( obj@par$sigma, 3, function( x ) { all( eigen( x )$values > 0 ) } ) ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must contain K positive definite ",
"r x r matrices.", sep = "" ),
call. = FALSE )
} else if ( !identical( dim( obj@par$sigma ), c( obj@r, obj@r, obj@K ) ) ) {
warning( paste( "Wrong specification of slot @par: ",
"sigma must be an array of dimension ",
"r x r x K.", sep = "" ),
call. = FALSE )
}
if ( !"df" %in% names( obj@par ) ) {
warning( paste( "Wrong specification of slot @par: ",
"Student-t mixtures need a degree of ",
"freedom vector.", sep = "" ),
call. = FALSE )
} else if ( !all( is.numeric( as.vector( obj@par$df ) ) ||
is.integer( as.vector( obj@par$df ) ) ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Parameters must be of type 'numeric' or ",
"'integer'.", sep = ""),
call. = FALSE )
} else if ( any( obj@par$df <= 0 ) ) {
stop( paste( "Wrong specification of slot @par: ",
"Degrees of freedom must be all positive.", sep = "" ),
call. = FALSE )
} else if ( length( obj@par$df ) != obj@K ) {
warning( paste( "Wrong specification of slot @par: ",
"df must be a vector or matrix of ",
"dimension 1 x K", sep = "" ),
call. = FALSE )
}
}
}
### Additional functions
".get.univ.Model" <- function()
{
univ <- c("poisson", "cond.poisson",
"binomial", "exponential",
"normal", "student")
return(univ)
}
".get.multiv.Model" <- function()
{
multiv <- c("normult", "studmult")
return(multiv)
}
|
47db035ce8fb9d1fcb15ba321e8de216b9bd6461
|
d08e69198fbd60086aa35d765c7675006d06cf3f
|
/R/vec.R
|
f2f7faf30cee2d644b4472e692a55451e0aa3220
|
[] |
no_license
|
villardon/MultBiplotR
|
7d2e1b3b25fb5a1971b52fa2674df714f14176ca
|
9ac841d0402e0fb4ac93dbff078170188b25b291
|
refs/heads/master
| 2023-01-22T12:37:03.318282
| 2021-05-31T09:18:20
| 2021-05-31T09:18:20
| 97,450,677
| 3
| 2
| null | 2023-01-13T13:34:51
| 2017-07-17T08:02:54
|
R
|
UTF-8
|
R
| false
| false
| 195
|
r
|
vec.R
|
vec <- function(X, ByColumns=TRUE){
n=dim(X)[1]
p=dim(X)[2]
if (ByColumns){
v=numeric()
for (j in 1:p)
v=c(v,X[,j])
}
return(list(v=v, ByColumns=ByColumns, n=n, p=p))
}
|
af00408d71ab8dcff6ea2ca7ad0c38e7fc59ddff
|
76a9d91abb86811e7a020db0c7b5f74a27a24c13
|
/R/GreenestCityOfTheMonth.R
|
2024f44d380fc1ba0a5b262c5cc6b34b9e95ff69
|
[] |
no_license
|
Oskartblcrrzzka/GreenCity
|
02a7784981ffdecbf83ae63d95e5569c05876f38
|
fa02c807123b08003cc8161f5de5a110b56dd8d5
|
refs/heads/master
| 2021-01-17T22:46:42.128273
| 2016-01-13T18:18:21
| 2016-01-13T18:18:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,783
|
r
|
GreenestCityOfTheMonth.R
|
# Student: William Schuch & Rik van Berkum
# Team: Geodetic Engineers of Utrecht
# Institute: Wageningen University and Research
# Course: Geo-scripting (GRS-33806)
# Date: 2016-01-12
# Week 2, Lesson 7: Vector - Raster
GreenestCityOfTheMonth <- function(monthnumber,plot_or_not){
# bricking the right raster
mon <- stack@layers[monthnumber]
monbrick <- brick(mon)
# create a mask
monMask <- mask(monbrick, nlCitySinu)
# extract
monMean <- extract(monMask, nlCitySinu, sp=TRUE, df=TRUE, fun=mean)
# highest greenvalue
maximum <- max(monMean@data[16], na.rm = TRUE)
# find rownumber
maxrownr <- which(monMean@data[16] == maximum[1])
# lookup city name
greenestcity <- monMean$NAME_2[maxrownr]
# create a subset of the greenest city
SS_greenestcity <- subset(monMean, monMean$NAME_2==greenestcity, drop = FALSE)
if (plot_or_not == TRUE) {
GetCenterX <- (bbox(SS_greenestcity)[1,1]+bbox(SS_greenestcity)[1,2])/2
GetCenterY <- (bbox(SS_greenestcity)[2,1]+bbox(SS_greenestcity)[2,2])/2
MonthnumberToMonth <- c("January", "February", "March", "April", "May", "June", "July", "August",
"September", "October", "November", "December")
MonthChar <- MonthnumberToMonth[monthnumber]
CenterText = list("sp.text", c(GetCenterX,GetCenterY), greenestcity)
p.plot <- spplot(monMean, zcol = MonthChar,
col.regions=colorRampPalette(c('darkred', 'red', 'orange', 'yellow','green'))(20),
xlim = bbox(SS_greenestcity)[1, ]+c(-10000,10000),
ylim = bbox(SS_greenestcity)[2, ]+c(-10000,10000),
scales= list(draw = TRUE),
sp.layout = CenterText,
main = paste(as.character("Greenest city of the month"),as.character(MonthChar)) )
return(list(p.plot, greenestcity))
} else {
return(greenestcity)
}
}
|
8bf79819d932685b28167607c9129cbde0048b7b
|
a00a0912c2ad60908a8f49ea0c18da534270f1c7
|
/temp/fpca.R
|
8443feabe198da85e495cc5f21851797fdb38b60
|
[] |
no_license
|
wenrurumon/mysrc
|
1cda876324096e2f54ac2dea98e9c5e173bd03a0
|
39fa7e9fecf4f1288703f6be3ad71fbf48266ef4
|
refs/heads/master
| 2021-01-19T21:24:55.444035
| 2020-09-19T17:28:38
| 2020-09-19T17:28:38
| 88,656,630
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,804
|
r
|
fpca.R
|
##################
# FPCA
##################
library(fda)
library(MASS)
library(GenABEL)
library(flare)
library(corpcor)
#FPCA
scale_ivn <- function(x){apply(x,2,rntransform)}
myinv<-function(A){
A_svd<-fast.svd(A)
if(length(A_svd$d)==1){
A_inv<-A_svd$v%*%as.matrix(1/A_svd$d)%*%t(A_svd$u)
}else{
A_inv<-A_svd$v%*%diag(1/A_svd$d)%*%t(A_svd$u)
}
return(A_inv)
}
fourier.expansion<- function(x,pos,nbasis,lambda){
frange <- c(pos[1], pos[length(pos)])
rlt=list();
rlt$fbasis<-create.fourier.basis(frange,nbasis=nbasis)
rlt$phi = eval.basis(pos,rlt$fbasis) + eval.basis(pos,rlt$fbasis,2)*lambda
rlt$coef<-myinv(t(rlt$phi)%*%rlt$phi)%*%t(rlt$phi)%*%t(x)
return(rlt)
}
fpca <- function(x,pos,nbasis,lambda){
nbasis <- min(nrow(x),nbasis)
#process data
x <- x[,order(pos),drop=F]
pos <- pos[order(pos)]
pos <- (pos-min(pos))/(max(pos)-min(pos))
#fourier expansion
x.expanded <- fourier.expansion(x,pos,nbasis,lambda)
fcoef<-scale(t(x.expanded$coef-rowMeans(x.expanded$coef))/sqrt(ncol(x)))
#PCA
A.svd <- try(svd(fcoef))
while(!is.list(A.svd)){
nbasis <- nbasis - 2
x.expanded <- fourier.expansion(x,pos,nbasis,lambda)
fcoef<-scale(t(x.expanded$coef-rowMeans(x.expanded$coef))/sqrt(ncol(x)))
A.svd <- try(svd(fcoef))
}
prop1 <- (A.svd$d)^2; prop1 <- cumsum(prop1)/sum(prop1)
r <- which(prop1>0.8)[1]
d <- A.svd$d-A.svd$d[min(r+1,dim(fcoef))]
d <- d[d>1e-10]
prop2 <- d^2; prop2 <- cumsum(prop2)/sum(prop2)
d <- diag(d,length(d),length(d))
score1 <- fcoef %*% A.svd$v
score1 <- score1[,1:which(prop1>0.9999)[1],drop=F]
prop1 <- prop1[1:which(prop1>0.9999)[1]]
score2 <- A.svd$u[,1:ncol(d),drop=F] %*% sqrt(d)
if(ncol(score1)==1){
score3 <- score1
prop3 <- prop1
} else {
score3 <- qpca(score1,rank=r)
prop3 <- score3$prop
score3 <- score3$X
}
list(fpca=list(score=score1,prop=prop1),
qfpca=list(score=score2,prop=prop2),
q2fpca=list(score=score3,prop=prop3))
}
qpca <- function(A,rank=0){
A <- scale_ivn(A)
A.svd <- svd(A)
if(rank==0){
d <- A.svd$d
} else {
d <- A.svd$d-A.svd$d[min(rank+1,nrow(A),ncol(A))]
}
d <- d[d > 1e-10]
r <- length(d)
prop <- d^2; prop <- cumsum(prop/sum(prop))
d <- diag(d,length(d),length(d))
u <- A.svd$u[,1:r,drop=F]
v <- A.svd$v[,1:r,drop=F]
x <- u%*%sqrt(d)
y <- sqrt(d)%*%t(v)
z <- x %*% y
rlt <- list(rank=r,X=x,Y=y,Z=x%*%y,prop=prop)
return(rlt)
}
#Gemetoc Process
hw_test <- function(X){
n = length(X);
n_11 = sum(X==0);
n_12 = sum(X==1);
n_22 = sum(X==2);
p = (2*n_11+n_12)/(2*n);
q = 1-p;
t_stat = ((n_11-n*p^2)^2)/(n*(p^2)) + ((n_12-2*n*p*q)^2)/(2*n*p*q) + ((n_22-n*q^2)^2)/(n*(q^2));
p_value = pchisq(t_stat,1,lower.tail=FALSE);
return(p_value);
}
common <- function(x){
p2 <- mean(x==2)+1/2*mean(x==1)
p2<0.95&p2>0.05
}
|
372a0256cf013465c532912a1e8c04506fdd2267
|
7271ef850abc9ffe10fba66096dbdc81d64eaa77
|
/R/GraphCleaning.r
|
1cf3a45129f5ede8ee39c54fab05c263a1d202b6
|
[] |
no_license
|
symanzik/R-micromap-package-development
|
5b38660146a780339c65c187947b9189fd302206
|
773a28997c04475a9fbb5ef802989459942f926a
|
refs/heads/master
| 2020-12-26T21:45:32.927443
| 2014-10-22T22:02:47
| 2014-10-22T22:02:47
| 27,741,455
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,457
|
r
|
GraphCleaning.r
|
##########################
##### theme settings #####
##########################
# creating theme functions to be called as needed from the panel building done elsewhere
# each function takes a ggplot object (e.g. pl <- plots[[1]]) and adds attributes to it
# as specified in the above function arguments then returns the object
### set panel background color, overall title and margins
plot_opts <- function(i, pl, a){
# sets background color if specified
if(!is.na(a[[i]]$panel.bgcolor)) pl <- pl + theme(plot.background = element_rect(colour=a[[i]]$panel.bgcolor,
fill=a[[i]]$panel.bgcolor))
# sets a header title if specified
# if any of the panels have a header this inserts a blank title on those that do not in order
# to keep a similar layout among panels
if(!all(is.na(all_atts(a, 'panel.header')))){
tmp.size <- max(as.numeric(all_atts(a, 'panel.header.size')))*10 # All headers must be same size. Default size is 10
tmp.lineheight <- as.numeric(all_atts(a, 'panel.header.lineheight'))
tmp.lineheight <- tmp.lineheight[which.max(abs(tmp.lineheight-1))] # All line heights must be equal so we use the height
# most drastically different from 1
# If all headers aren't NA then we must change the titles to be blank in order to leave space
# for them at the top. If any header is multiple lines (ie contains '\n') then we must add in
# the correct number of character returns to the other headers in order to make the plot uniform
tmp.headers <- lapply(all_atts(a, 'panel.header'), function(t) if(is.na(t)|t=='') t=' ' else t)
tmp.title <- tmp.headers[[i]]
ns <- max(unlist(lapply(tmp.headers, function(x) length(strsplit(x, '\n')[[1]])))) - length(strsplit(tmp.title,'\n')[[1]])
if(ns>0) tmp.title <- paste(tmp.title, rep(' \n ',ns), sep='')
pl <- pl + ggtitle(tmp.title) +
theme(plot.title=element_text(family=a[[i]]$panel.header.font, face=a[[i]]$panel.header.face,
colour=a[[i]]$panel.header.color, size=tmp.size, lineheight=tmp.lineheight))
}
# sets panel margins and removes ggplot's default side strips
pl <- pl + theme(strip.background = element_blank(),
strip.text.x = element_blank(),
strip.text.y = element_blank(),
plot.margin = unit(a[[i]]$panel.margins, "lines"))
pl
}
### set graph background color and whether grid lines show up
graph_opts <- function(i, pl, a){
bgcolor <- ifelse(!is.na(a[[i]]$panel.bgcolor), a[[i]]$panel.bgcolor, 'white')
bgcolor <- ifelse(!is.na(a[[i]]$graph.bgcolor), a[[i]]$graph.bgcolor, bgcolor)
# sets background color of graphs
# note: what were referring to as "graphs" are what ggplot refers to as "panels" (ie. "panel.background")
pl <- pl + theme(panel.background = element_rect(colour=bgcolor, fill=bgcolor))
# draws grid lines in the specified color if desired -- defaults to darkgray in attribute list
if(a[[i]]$graph.grid.major){
pl <- pl + theme(panel.grid.major = element_line(colour=a[[i]]$graph.grid.color))
} else {
pl <- pl + theme(panel.grid.major = element_blank())
}
if(a[[i]]$graph.grid.minor){
pl <- pl + theme(panel.grid.minor = element_line(colour=a[[i]]$graph.grid.color))
} else {
pl <- pl + theme(panel.grid.minor = element_blank())
}
pl
}
### sets graph boundaries, ticks, labels, borders
axis_opts <- function(i, pl, a, limsx=NA, limsy=NA, border=TRUE, expx=FALSE){
# i=p; a=att; limsx=tmp.limsx; limsy=c(tmp.limsy,tmp.median.limsy); border=FALSE; expx=FALSE
# many features are "hidden" by simply coloring the same color as the background so
# if panel background is NA we assume "white" will effectively do the hiding
bgcolor <- ifelse(!is.na(a[[i]]$panel.bgcolor), a[[i]]$panel.bgcolor, 'white')
# specify label size as maximum of all requested label sizes
label.size <- as.numeric(max(all_atts(a, 'xaxis.labels.size')))*10
# limsy will sometimes be in the form (c(lower bound, upper bound, lower bound for the median , upper bound for the median))
# if thats the case, we split it into the two seperate limits here
median.limsy <- NULL
if(length(limsy)==4) {median.limsy <- limsy[3:4]; limsy <- limsy[1:2]}
##############
### X axis ###
##############
# with ggplot2, most axis specifications need to be made through the "scale_x_continuous()" function. there
# are 5 arguements: title, breaks (tick locations), labels (tick labels), limits (data limits), and expand (the
# extent to which the axis is expanded beyond the limits of the data). these specifications must be made
# all at once so we build this statement as a string and then execute it through an "eval" statement at the end
# the following boolean variables state whether to include these specifications in the scale_x_continuous statement
# we start by assuming none except title will be needed
x.breaks <- x.labels <- x.limits <- x.expand <- FALSE
### axis title ###
xstr.title <- "''"
# If all axis titles aren't NA then we must change the other titles to be blank in order to leave space
# for them at the bottom. If any title is multiple lines (ie contains '\n') then we must add in
# the correct number of character returns to the other titles in order to make the plot uniform
if(!all(is.na(all_atts(a, 'xaxis.title')))){
tmp.size <- max(as.numeric(all_atts(a, 'xaxis.title.size')))*8 # All titles must be same size. Default size is 8
tmp.lineheight <- as.numeric(all_atts(a, 'xaxis.title.lineheight'))
tmp.lineheight <- tmp.lineheight[which.max(abs(tmp.lineheight-1))] # All line heights must be equal so we use the height
# most drastically different from 1
tmp.titles <- lapply(all_atts(a, 'xaxis.title'), function(t) if(is.na(t)|t=='') t=' ' else t)
tmp.title <- tmp.titles[[i]]
ns <- max(unlist(lapply(tmp.titles, function(x) length(strsplit(x, '\n')[[1]])))) - length(strsplit(tmp.title,'\n')[[1]])
if(ns>0) tmp.title <- paste(tmp.title, rep(' \n ',ns), sep='')
xstr.title <- paste("'",tmp.title,"'",sep='')
pl <- pl + theme(axis.title.x = element_text(family=a[[i]]$xaxis.title.font, face=a[[i]]$xaxis.title.face,
colour=a[[i]]$xaxis.title.color, size=tmp.size, lineheight=tmp.lineheight))
}
### axis limits and expansion ###
if (!any(is.na(limsx))) x.limits <- TRUE
# if there is a border to be added, we must manually deal with expansion
if(!expx){
x.expand <- TRUE
xstr.expand <- as.character(", expand=c(0,0)")
}
xstr.limits <- as.character(paste('c(',min(limsx), ',', max(limsx),')'))
xstr.limits <- paste(", limits=", xstr.limits)
### panel footers (not completed) ###
# "panel footers" are really just augmented x axis titles
# if all axis titles are blank then we hide axis titles on the whole plot
if(all(is.na(all_atts(a, 'xaxis.title')))) pl <- pl + theme(axis.title.x = element_blank())
### axis lines ###
# note: axis lines are always there, if the user doesn't want to
# see them they are colored to match the background
if (!a[[i]]$xaxis.line.display & !a[[i]]$yaxis.line.display) {
pl <- pl + theme(axis.line = element_line(colour=bgcolor))
# else lines will be plotted
} else {
pl <- pl + theme(axis.line = element_line(colour='black'))
}
### axis ticks ###
# for now we assume ticks are never wanted as they make things pretty cluttered looking
pl <- pl + theme(axis.ticks = element_blank())
### axis text ###
# trys to hide axis text on the whole plot
if(!any(all_attsb(a, 'xaxis.text.display'))) {
pl <- pl + theme(axis.text.x = element_blank())
# otherwise trys to "hide" axis text on this panel
} else if (!a[[i]]$xaxis.text.display) {
pl <- pl + theme(axis.text.x = element_text(colour=bgcolor, size=label.size))
# axis text will show and we'll add specific labels if requested
} else if (!is.na(unlist(a[[i]]$xaxis.labels)[1]) &
!is.na(unlist(a[[i]]$xaxis.ticks)[1])) {
tmpTheme <- "theme(axis.text.x = element_text(size=label.size"
if(!is.null(a[[i]]$xaxis.labels.angle)) tmpTheme <- paste(tmpTheme, ", angle = ", a[[i]]$xaxis.labels.angle)
if(!is.null(a[[i]]$xaxis.labels.hjust)) tmpTheme <- paste(tmpTheme, ", hjust =", a[[i]]$xaxis.labels.hjust)
if(!is.null(a[[i]]$xaxis.labels.vjust)) tmpTheme <- paste(tmpTheme, ", vjust =", a[[i]]$xaxis.labels.vjust)
tmpTheme <- paste(tmpTheme, "))")
pl <- pl + eval(parse(text=tmpTheme ))
x.breaks <- x.labels <- TRUE
xstr.breaks <- paste(', breaks=c(', make.string(a[[i]]$xaxis.ticks),')',sep='')
xstr.labels <- paste(', labels=c(', make.string(a[[i]]$xaxis.labels),')',sep='')
# warning if user only specified text but not location or vice versa
} else if (!is.na(unlist(a[[i]]$xaxis.labels)[1]) |
!is.na(unlist(a[[i]]$xaxis.ticks)[1])) {
print('Warning: both axis labels AND tick location must be specified')
# otherwise text shows up as ggplot defaults
}
# put it all together and execute the eval call
xstr <- paste("scale_x_continuous(", xstr.title)
if (x.expand) xstr <- paste(xstr, xstr.expand)
if (x.breaks) xstr <- paste(xstr, xstr.breaks)
if (x.labels) xstr <- paste(xstr, xstr.labels)
if (x.limits) xstr <- paste(xstr, xstr.limits)
xstr <- paste(xstr, ")")
pl <- pl + eval(parse(text=xstr))
##############
### Y axis ###
##############
# with ggplot2, most axis specifications need to be made through the "scale_y_continuous()" function. there
# are 5 arguements: title, breaks (tick locations), labels (tick labels), limits (data limits), and expand (the
# extent to which the axis is expanded beyond the limits of the data). these specifications must be made
# all at once so we build this statement as a string and then execute it through an "eval" statement at the end
### axis title ###
ystr.title <- ifelse(!is.na(a[[i]]$yaxis.title), a[[i]]$yaxis.title, "''")
### axis text ###
if(a[[i]]$yaxis.ticks.display | a[[i]]$yaxis.text.display) ystr.breaks <- "" else ystr.breaks <- ", breaks=NULL"
### axis limits and expansion ###
ystr.expand <- ", expand=c(0,0)"
limsy <- limsy + c(-1,1) * diff(limsy)*a$plot.pGrp.spacing
ystr.limits <- as.character(paste('c(',min(limsy), ',', max(limsy),')'))
ystr.limits <- paste(", limits=", ystr.limits)
pl <- pl + theme(panel.margin = unit(0, "lines"))
# if (any(is.na(limsy)) | a$median.row) y.limits <- FALSE else y.limits <- TRUE
# put it all together and execute the eval call
ystr <- paste("scale_y_continuous(", ystr.title, ystr.expand, ystr.breaks)
# if (y.limits) ystr <- paste(ystr, ystr.limits)
ystr <- paste(ystr, ")")
pl <- pl + eval(parse(text=ystr))
##############
### border ###
##############
borderx <- range(limsx) + c(1,-1) * diff(range(limsx))*.001
bordery <- range(limsy) + c(0, -1) * diff(range(limsy))*.001
if(!is.null(median.limsy)) median.limsy <- range(median.limsy) - c(0, diff(range(median.limsy))*.001)
tmp.border <- data.frame(pGrp=rep(1:max(pl$data$pGrp),each=2), ymin=bordery[1], ymax=bordery[2],
xmin=borderx[1], xmax=borderx[2])
if(a$median.row) tmp.border <- rbind(subset(tmp.border, !pGrp==a$m.pGrp), data.frame(pGrp=a$m.pGrp, ymin=median.limsy[1], ymax=median.limsy[2],
xmin=borderx[1], xmax=borderx[2]))
if(border) border.color <- a[[i]]$graph.border.color else border.color <- NA
pl <- pl + geom_rect(aes(xmin = xmin, xmax=xmax, ymin=ymin, ymax=ymax), data=tmp.border,
colour= border.color, fill=NA)
pl <- pl + theme(axis.line = element_blank())
pl
}
assimilatePlot <- function(pl, i, a, limsx=NA, limsy=NA){
pl <- plot_opts(i,pl,a)
pl <- graph_opts(i,pl,a)
if(is.na(limsx)){
limsx <- range(pl$data[,unlist(a[[i]]$panel.data)])
limsx <- limsx+c(-1,1)*diff(limsx)*.05
}
if(is.na(limsy)){
# labs <- names(pl$options$labels)
# labs <- labs[sapply(1:length(labs), function(j) pmatch("y",labs[j], nomatch=0)==1)]
# limsy <- -range(pl$data[,sub("-","",unlist(pl$options$labels[labs]))])
# limsy <- limsy + c(1,-1)*diff(limsy)*.05
limsy <- -c(.5, max(a$grouping)+.5)
}
if(a$median.row){
pl <- pl + scale_colour_manual(values=c(a$colors,gray(.5)), guide='none')
median.limsy <- c(-.5, -1.5)
limsy <- c(limsy, median.limsy)
}
pl <- axis_opts(i,pl,a, limsx=limsx, limsy=limsy, border=TRUE)
pl
}
|
863d16be0e35a2fb207f8408de1e1d2045ac8abb
|
3f00f7c81c6ed9bb50db182fa6652e26a062a5f1
|
/man/write.mct.Rd
|
45953c8df925d60bf40112f37ae5005732c34965
|
[] |
no_license
|
cran/polymapR
|
c2c2130a476b2e1da85b1ac0d96cd330b4eb2b1a
|
ae247e7e8fb238f9fd8933d12e2d46ae04606b1e
|
refs/heads/master
| 2023-03-19T23:30:05.960131
| 2023-03-13T16:20:02
| 2023-03-13T16:20:02
| 113,219,924
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,293
|
rd
|
write.mct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exported_functions.R
\name{write.mct}
\alias{write.mct}
\title{Write MapChart file}
\usage{
write.mct(
maplist,
mapdir = "mapping_files_MDSMap",
file_info = paste("; MapChart file created on", Sys.Date()),
filename = "MapFile",
precision = 2,
showMarkerNames = FALSE
)
}
\arguments{
\item{maplist}{A list of maps. In the first column marker names and in the second their position. All map data are
compiled into a single MapChart file.}
\item{mapdir}{Directory to which .mct files are written, by default the same directory
as for \code{\link{MDSMap_from_list}}}
\item{file_info}{A character string added to the first lines of the .mct file, by default a datestamp is recorded.}
\item{filename}{Character string of filename to write the .mct file to, by default "MapFile"}
\item{precision}{To how many decimal places should marker positions be specified (default = 2)?}
\item{showMarkerNames}{Logical, by default \code{FALSE}, if \code{TRUE}, the marker names will be diplayed in the
MapChart output as well.}
}
\description{
Write a .mct file of a maplist for external plotting with MapChart software (Voorrips ).
}
\examples{
\dontrun{
data("integrated.maplist")
write.mct(integrated.maplist)}
}
|
8a28f008f2390a57c6a860348de6cd27e7705e1a
|
6076b19163a44bbfeb360fc691282ce3f4c99635
|
/plot4.R
|
8f20116fbadcb6498fa66a27a15d6e501336c368
|
[] |
no_license
|
dwallc/ExData_Plotting1
|
ae335ec73ce383c20fa75c4c5e3c5c257557db1c
|
e5efb6ad390ae7ce04a6f4de90a7778c0b0a44e3
|
refs/heads/master
| 2020-11-30T12:32:50.799694
| 2014-07-12T00:06:54
| 2014-07-12T00:06:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,756
|
r
|
plot4.R
|
### Exploratory Data Analysis
## Peer Assessment I - Plot 4
# Clear Workspace
rm(list=ls())
# Load Original Data File
data <- read.table("household_power_consumption.txt", header = TRUE,
sep = ";", colClasses = "character")
# Cleaning and Subsetting Data
data[,1] <- as.Date(data$Date, "%d/%m/%Y")
data[,3] <- as.numeric(data[,3])
data[,4] <- as.numeric(data[,4])
data[,5] <- as.numeric(data[,5])
data[,6] <- as.numeric(data[,6])
data[,7] <- as.numeric(data[,7])
data[,8] <- as.numeric(data[,8])
data[,9] <- as.numeric(data[,9])
data$DateTime <- as.POSIXct(paste(data$Date, data$Time),
format="%Y-%m-%d %H:%M:%S")
subdata <- subset(data, data$Date %in%
as.Date(c("2007-02-01", "2007-02-02")))
## Create 4 Plots in a 2x2 Grid
# Set 2x2 Grid, Filling Across Rows
par(mfrow = c(2,2), mar = c(5.1, 4.1, 1.1, 2.1))
# 1st Plot - Global Active Power
plot(subdata$DateTime, subdata[,3], type = "l", xlab = " ",
ylab = "Global Active Power")
# 2nd Plot - Voltage
plot(subdata$DateTime, subdata[,5], type = "l", xlab = "datetime",
ylab = "Voltage")
# 3rd Plot - Energy sub metering
plot(subdata$DateTime, subdata[,7], type = "l", xlab = " ",
ylab = "Energy sub metering")
lines(subdata[,10], subdata[,8], col = "red")
lines(subdata[,10], subdata[,9], col = "blue")
legend(x = "topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lwd = 1, pt.cex = 0.6, cex = 0.6, bty = "n")
# 4th Plot - Global_reactive_power
plot(subdata$DateTime, subdata[,4], type = "l", xlab = "datetime",
ylab = "Global_reactive_power")
# Copy Histogram to PNG File
dev.copy(png, file = "plot4.png")
dev.off()
|
2b437a2c12d3e398cab0836a94311284d6b39d2b
|
27bf605ba3f4461078967ff8b839b6cf5454d956
|
/bin/BRF_pred.R
|
3a27521f43707f0e8be5d0eaae8d58d5cb81c55d
|
[] |
no_license
|
wangpanqiao/COME
|
7d8a610bac920aec3e429bd60c2fe19cd2124a70
|
84b3a35a5a9f53c0735a3e42d5c7d801daa85f7b
|
refs/heads/master
| 2020-11-25T07:37:49.256369
| 2019-11-22T03:51:00
| 2019-11-22T03:51:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,491
|
r
|
BRF_pred.R
|
Args = commandArgs(TRUE);
inputF1 = Args[1];#input the model;
inputF2 = Args[2];#input the predict matrix;
output1 = Args[3];#output the predition probablity.
Nlines = 10000;
library("randomForest");
load(inputF1);
INid = file(inputF2,"r");
flag = 0;
IN = readLines(INid,n=Nlines);
while(length(IN) != 0){
if(flag == 0){
Mx = read.table(text=IN,sep="\t",head=T,quote="",check.names=F);
HEADER = colnames(Mx);
tmp = model_list[[1]];
FI = as.vector(rownames(tmp$importance));
ID = which(is.element(HEADER,FI));
Mx = as.matrix(Mx[,ID]);
for(m in 1:length(model_list)){
rf = model_list[[m]];
RF_pred = predict(rf,Mx,type="vote");
if(m==1){
RF_pred_sum = RF_pred
}else{
RF_pred_sum = RF_pred_sum + RF_pred;
}
}
RF_pred_mean = round(RF_pred_sum/length(model_list),4);
write.table(RF_pred_mean,file=output1,quote=F,sep="\t",col.name=T,row.name=F,append = FALSE);
flag = 1;
}else{
Mx = read.table(text=IN,sep="\t",head=F);
colnames(Mx) = HEADER;
Mx = as.matrix(Mx[,ID]);
for(m in 1:length(model_list)){
rf = model_list[[m]];
RF_pred = predict(rf,Mx,type="vote");
if(m==1){
RF_pred_sum = RF_pred
}else{
RF_pred_sum = RF_pred_sum + RF_pred;
}
}
RF_pred_mean = round(RF_pred_sum/length(model_list),4);
write.table(RF_pred_mean,file=output1,quote=F,sep="\t",col.name=F,row.name=F,append = TRUE);
}
rm(Mx, RF_pred_sum, RF_pred, RF_pred_mean);
IN = readLines(INid,n=Nlines);
}
close(INid);
rm(list=ls());
|
bb19753a8e1a3ac43f87d846a0ffdce190bb3c4a
|
a9dcc6d36e928267e6ac9b3d8de324afd7030a72
|
/ProcessingPipeline/ExamineGenomeCoverage_24-07-17.R
|
9795fb4b68a355163db05cd6668f9887879df163
|
[] |
no_license
|
xulijunji/GeneralTools
|
e5778d2da6e64264a26027a713e577d88391007e
|
758c769ba10cde1c02e74d5dec70d978d9b6675d
|
refs/heads/master
| 2021-08-23T20:14:33.238693
| 2017-12-06T10:47:49
| 2017-12-06T10:47:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,491
|
r
|
ExamineGenomeCoverage_24-07-17.R
|
############
# Set path #
############
path <- "C:/Users/Joseph Crisp/Desktop/UbuntuSharedFolder/Cumbria/vcfFiles/"
##########################
# Read in coverage files #
##########################
# # Read in the genome coverage file
# genomeCoverageFile <- paste(path, "genomeCoverageSummary_DP-20_21-07-2017.txt", sep="")
# genomeCoverage <- read.table(genomeCoverageFile, header=TRUE, stringsAsFactors=FALSE)
# Read in the isolate coverage file
isolateCoverageFile <- paste(path, "isolateCoverageSummary_DP-20_10-09-2017.txt", sep="")
isolateCoverage <- read.table(isolateCoverageFile, header=TRUE, stringsAsFactors=FALSE)
# Parse the Isolate column
#isolateCoverage$IsolateID <- parseIsolateColumn(isolateCoverage$IsolateID)
#############################
# Plot the isolate coverage #
#############################
# Open a pdf
file <- paste(substr(isolateCoverageFile, 1, nchar(isolateCoverageFile) - 4), ".pdf", sep="")
pdf(file)
plot(y=isolateCoverage$PercentageCoverage,
x=isolateCoverage$MeanDepth,
las=1, ylab="Proportion", main="Proportion of M. bovis Genome with >19 mapped reads",
xlab="Mean Read Depth", pch=16, cex=3,
col=ifelse(grepl(x=isolateCoverage$IsolateID, pattern="WB"), rgb(1,0,0, 0.5),
rgb(0,0,1, 0.5)))
text(y=isolateCoverage$PercentageCoverage,
x=isolateCoverage$MeanDepth,
labels = isolateCoverage$IsolateID, cex=1, pos=4,
col=ifelse(isolateCoverage$PercentageCoverage < 0.8, rgb(0,0,0, 1), rgb(0,0,0, 0)))
legend("bottomright", legend=c("BADGER", "COW"), text.col=c("red", "blue"), bty="n")
dev.off()
############################
# Plot the genome coverage #
############################
# # Note reference genome size
# MbovisSize <- 4349904
#
# # Open a pdf
# file <- paste(substr(genomeCoverageFile, 1, nchar(genomeCoverageFile) - 4), ".pdf", sep="")
# pdf(file, height=7, width=7)
#
# plot(y=genomeCoverage$MeanDepth, x=genomeCoverage$POS, type="o",
# xlim=c(1, MbovisSize), ylim=c(0, max(genomeCoverage$MeanDepth)),
# bty="n", ylab="Average Read Depth", xlab="Genome Position", las=1,
# main="Genome Coverage Across Isolates")
#
# dev.off()
#############
# FUNCTIONS #
#############
parseIsolateColumn <- function(column){
ids <- c()
for(i in 1:length(column)){
parts <- strsplit(column[i], split="_")[[1]]
ids[i] <- paste(parts[1], "_", parts[2], sep="")
}
return(ids)
}
|
4bf3cd3a99e44c7bb5232e850f0f6af67096e0b0
|
7c5d573c8ff95422259654a05dcd9c23f79ae7d6
|
/first/8.R
|
8206906a330b75c0d58ee2dac440f137aea7bd7c
|
[] |
no_license
|
projBaseball/projBaseball
|
ca3b3d15b75a1cfb8d93a9aed5f95cd5d1dba343
|
9ce1360acb7a3d8499f920341ab6a3bfc1ff2ed8
|
refs/heads/master
| 2020-06-25T11:34:24.571999
| 2016-11-23T22:10:14
| 2016-11-23T22:10:14
| 74,617,393
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 917
|
r
|
8.R
|
baseballAnnualIncome <- read.csv("C:/Users/stories2/Documents/RProject/baseball.csv")
head(baseballAnnualIncome)
baseballAnnualIncomeWithNoCharacter <- baseballAnnualIncome[, -c(1:3)]
head(baseballAnnualIncomeWithNoCharacter)
summary(baseballAnnualIncomeWithNoCharacter)
dim(baseballAnnualIncomeWithNoCharacter)
trainDataIndex <- sample(1:dim(baseballAnnualIncomeWithNoCharacter)[1], 0.7 * dim(baseballAnnualIncomeWithNoCharacter)[1])
trainDataIndex
trainDataSet <- baseballAnnualIncomeWithNoCharacter[trainDataIndex, ]
testDataSet <- baseballAnnualIncomeWithNoCharacter[-trainDataIndex, ]
head(trainDataSet)
head(testDataSet)
head(trainDataSet$¿¬ºÀ)
class(trainDataSet$¿¬ºÀ)
str(trainDataSet)
trainDataSet$¿¬ºÀ <- as.numeric(trainDataSet$¿¬ºÀ)
fullModel <- lm(¿¬ºÀ ~ ., data = trainDataSet)
fullModel
summary(fullModel)
plot(trainDataSet$¿¬ºÀ, fitted(fullModel))
abline(0, 1, lty = 3)
|
35ed6b0a0a1281a6747b65f6a9bc2c0739d883e7
|
d7374b02d320137517b021ab7f96a34f4b917c3b
|
/src/dhsm.R
|
a8b5ee5297ef372b826ee8e236ecdbb544fede00
|
[
"MIT"
] |
permissive
|
ApexRMS/stsimdmult
|
dd4d751b9fdda67e4ddacd0ec32b967281e9a1f8
|
4b004214698254f11a809fbffaf6bf837086a9e0
|
refs/heads/master
| 2021-06-11T07:10:03.287743
| 2021-04-13T13:54:49
| 2021-04-13T13:54:49
| 166,886,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,615
|
r
|
dhsm.R
|
# *******************************************************************
# dhsm.R
#
# Developed by ApexRMS
# Last edited: 2014-12-12
# *******************************************************************
#
# This script should be called by the Syncrosim application, using a Sysem.Diagnostics.Process call ( same as gdal_translate). It can also
# be called from the Command line. The following assumptions are made:
# - The 1st command line argument is the absolute filename of the input State Attribute Raster. Use "'s if the filename contains spaces.
# - The Transistion Group CSV file is located in the same directory as the input file
# - Output files will be created in the same directory as the input file
# Example usage:
# CMD>RScript habSuit.R "D:\ApexRMS\Dynamic Habitat Suitability\Testdata\It0001-Ts0001-sa-446.tif"
library(raster)
# Expecting a command line argument specifying the full path of input raster file.
args <- commandArgs(trailingOnly = TRUE)
if (length(args) == 0) {
stop('Expected command line value specifying full input filename.')
}
inpFilename = args[1]
if (!(file.exists(inpFilename))){
stop(sprintf("Specified input filename '%s' does not exist",inpFilename))
}
#Test File:
#inpFilename = "D:/ApexRMS/Dynamic Habitat Suitability/Testdata/It0001-Ts0001-sa-446.tif"
# Figure out the input files directory. For now we're going to expect all files interations take place there.
baseName = dirname(inpFilename)
# Load the specified State Attribute raster file
sa <- raster(inpFilename)
# Load the Transition Group definition file ( exported by SSIM)
tgFilename<-file.path(baseName,"transgroup.csv")
if(!(file.exists(tgFilename))){
stop('The expected Transition Group file does not exist in the same directory as the input file')
}
tg <- read.csv( tgFilename)
# Loop through the transition groups and create a raster for each
for(id in tg$ID){
# Clone the input State Attribute raster, to get basic raster structure
transMult <- sa
# For test purposes, set values less that mean to False, True otherwise
#TEST CODE START
vals = sa[]
tmMean = mean(vals,na.rm = TRUE)
transMult[] = vals > tmMean
#TEST CODE END
# INSERT UNR Code here, replacing TEST CODE above
# Save to new raster.
#DEVNOTE: For some reason, the creation time is a little off in the resultant tif files.
outputFname = sprintf("dhsm-tg-%d.tif", id)
fullName = file.path(baseName,outputFname)
rf<-writeRaster(transMult, fullName, format="GTiff", overwrite=TRUE)
}
print(sprintf("Successfully completed processing State Attribute file '%s' !",inpFilename))
|
46f4401e2b5a1bec3b7170f1cfc9634f578ed112
|
5692c079b2deb1adc3631879db048fc5e5b1203f
|
/bedgraph2dmr_master.R
|
b6b3062a7aa99ba09848e45000b9dc6059dd124b
|
[] |
no_license
|
rcquan/bedgraph2dmr
|
83c40bf964e75b0e56939fa47bb8790661c9247d
|
674f2570ccb0a3c77834a16c6e78b856564f7634
|
refs/heads/master
| 2021-01-10T20:58:09.080273
| 2015-05-17T07:15:14
| 2015-05-17T07:15:14
| 26,092,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,578
|
r
|
bedgraph2dmr_master.R
|
# source("http://bioconductor.org/biocLite.R")
# biocLite("bsseq")
library(bsseq)
library(stringr)
source("R/bedgraph2dmr_helper.R")
## creates "results" directory if it does not exist
dir.create("results")
dir.create("results/tables")
dir.create("results/plots")
## read bedgraph files
BSmooth_obj <- read_bismark()
## apply smoothing algorithm
BSmooth_obj <- BSmooth(BSmooth_obj,
ns = 20,
h = 80,
maxGap = 100,
mc.cores = 2)
## assign labels
sampleNames <- BSmooth_obj@colData@rownames
normal <- sampleNames[seq(1, length(sampleNames) - 1, 2)]
tumor <- sampleNames[seq(2, length(sampleNames), 2)]
## fisher tests
fTests <- fisherTests(BSmooth_obj,
group1 = normal,
group2 = tumor)
fTests <- fTests$lookup
write.csv(fTests, "results/fTests.csv")
# define groups and set cutoff for coverage
BSmooth_subset <- subset_by_type(BSmooth_obj,
min_cov = 300)
BSmooth_tstat_obj <- get_tstat(BSmooth_subset,
est_var = "group2") # other options: "same", "paired"
export_tstat_data(BSmooth_tstat_obj)
export_dmr_data(BSmooth_subset,
BSmooth_tstat_obj,
# settings for dmrFinder
FDR = 0.05,
max_gap = 100,
# subsetting DMR results
cg_num = 1,
mean_diff = 0.1,
# settings for plots
batch_num = c(1,2),
min_cov = 300)
|
eab7b408bead73225e274056c0e5b5eca56ddf07
|
8ef521bc632f63d4e0de0b16edbe38d1211869f0
|
/code/Figure3/corshrink_toeplitz.R
|
c1a69efca8c9850df7de9c4a482ca02ceb7de719
|
[] |
no_license
|
kkdey/CorShrink-pages
|
d8990ce4f85d69b005f288895619459199c4a8ec
|
3fa5ad101c1f70075870ec070a47bcb87bb1e8e6
|
refs/heads/master
| 2021-07-14T19:58:50.122329
| 2018-09-16T01:27:44
| 2018-09-16T01:27:44
| 111,842,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,511
|
r
|
corshrink_toeplitz.R
|
############# CorShrink on Toeplitz matrices ######################
library(CorShrink)
library(glasso)
library(corpcor)
library(psych)
library(Matrix)
n <- 50
P <- 100
NUM_SIM <- 5
DM_toeplitz = function(n,P){
library("MASS")
index1=sort(sample(seq(1:n),(n/2)))
index2=seq(1:n)[-index1]
Sigmatp=function(P){
a=array(0,dim=c(P,P))
for(i in 1:P){
for(j in 1:P){
a[i,j]=max(1-0.1*(abs(i-j)),0)
}
}
return(a)
}
Sigma = Sigmatp(P)
data = mvrnorm(n,rep(0,P),Sigma)
Xtest = data[index2,]
Xtrain = data[index1,]
Omega = solve(Sigma)
return(list(Xtrain = Xtrain, Xtest = Xtest, Sigma = Sigma))
}
frob_vals <- matrix(0, NUM_SIM, 10)
for(m in 1:NUM_SIM){
ll <- DM_toeplitz(n=n, P=P)
data <- rbind(ll$Xtrain, ll$Xtest)
Sigma <- ll$Sigma
corSigma <- cov2cor(Sigma)
### GLASSO
nr <- 100
rho <- c(1e-02, 0.01, 1, 5, 10, 50)
a_list <- list()
for(i in 1:length(rho)){
a_list[[i]] <- glasso::glasso(S,rho[i])
}
## Strimmer Shafer
strimmer_sample <- corpcor::cor.shrink(data)
## CorShrink
cov_sample_ML <- CorShrinkData(data, sd_boot = FALSE,
ash.control = list())
pdsoft_sample <- PDSCE::pdsoft.cv(data, tolin = 1e-04, tolout = 1e-04)
# frob_S <- mean(as.matrix(cov2cor(S)) - as.matrix(cov2cor(corSigma)))^2
frob_S <- 1 - (tr(as.matrix(cov2cor(S)%*%cov2cor(corSigma))))/(norm(cov2cor(S), type = "F")* norm(corSigma, type = "F"))
# frob_glasso <- mean(as.matrix(cov2cor(a$w)) -as.matrix(cov2cor(corSigma)))^2
# frob_strimmer <- mean(as.matrix(cov2cor(strimmer_sample)) - as.matrix(cov2cor(corSigma)))^2
frob_strimmer <- 1 - (tr(as.matrix(cov2cor(strimmer_sample[1:P,1:P])%*%cov2cor(corSigma))))/(norm(cov2cor(strimmer_sample[1:P,1:P]), type = "F")* norm(corSigma, type = "F"))
# frob_corshrink <- mean(as.matrix(cov2cor(cov_sample_ML$ash_cor_PD)) - as.matrix(cov2cor(corSigma)))^2
frob_corshrink <- 1 - (tr(as.matrix(cov2cor(cov_sample_ML$cor)%*%cov2cor(corSigma))))/(norm(cov2cor(cov_sample_ML$cor), type = "F")* norm(corSigma, type = "F"))
frob_pdsoft <- 1 - (tr(as.matrix(cov2cor(pdsoft_sample$sigma)%*%cov2cor(corSigma))))/(norm(cov2cor(pdsoft_sample$sigma), type = "F")* norm(corSigma, type = "F"))
frob_glasso_1 <- 1 - (tr(as.matrix(cov2cor(a_list[[1]]$w)%*%cov2cor(corSigma))))/(norm(cov2cor(a_list[[1]]$w), type = "F")* norm(corSigma, type = "F"))
frob_glasso_2 <- 1 - (tr(as.matrix(cov2cor(a_list[[2]]$w)%*%cov2cor(corSigma))))/(norm(cov2cor(a_list[[2]]$w), type = "F")* norm(corSigma, type = "F"))
frob_glasso_3 <- 1 - (tr(as.matrix(cov2cor(a_list[[3]]$w)%*%cov2cor(corSigma))))/(norm(cov2cor(a_list[[3]]$w), type = "F")* norm(corSigma, type = "F"))
frob_glasso_4 <- 1 - (tr(as.matrix(cov2cor(a_list[[4]]$w)%*%cov2cor(corSigma))))/(norm(cov2cor(a_list[[4]]$w), type = "F")* norm(corSigma, type = "F"))
frob_glasso_5 <- 1 - (tr(as.matrix(cov2cor(a_list[[5]]$w)%*%cov2cor(corSigma))))/(norm(cov2cor(a_list[[5]]$w), type = "F")* norm(corSigma, type = "F"))
frob_glasso_6 <- 1 - (tr(as.matrix(cov2cor(a_list[[6]]$w)%*%cov2cor(corSigma))))/(norm(cov2cor(a_list[[5]]$w), type = "F")* norm(corSigma, type = "F"))
frob_vals[m, ] <- c(frob_S, frob_strimmer, frob_corshrink, frob_pdsoft,
frob_glasso_1, frob_glasso_2, frob_glasso_3, frob_glasso_4,
frob_glasso_5, frob_glasso_6)
cat("We are at simulation", m, "\n")
}
save(frob_vals, file = paste0("toeplitz_cmd_n_", n, "_P_", P, "_results.rda"))
|
36eec0fbcca552c25876e0e5ff650c6a54eb9f4b
|
a7ec8ebd0a459a2eb96ef457dce697ee69a7ed6c
|
/ui.R
|
4031b1b44c814a52c01673238a01a283a8fb18d6
|
[] |
no_license
|
ccdatatraits/DataScience_StockPredictor
|
8cc8170f0b055e18318e380480c8114b65450568
|
9d055d8f2c210fba6bc5648d3075501ee4c6ce4f
|
refs/heads/master
| 2020-12-18T11:55:51.489777
| 2016-08-16T06:26:27
| 2016-08-16T06:26:27
| 65,793,425
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,198
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application
# for Share Price Predictor Application
#
shinyUI(fluidPage(
# Application title
titlePanel("Share Price Predictor Application"),
# Sidebar with a text input for name of share (Examples provided)
sidebarLayout(
sidebarPanel(
textInput('share', 'Share Symbol', ''),
helpText('Refer http://www.nasdaq.com/markets/most-active.aspx',
'to find any some most active shares'),
submitButton('Submit')
),
# Main Panel consisting of all outputs
mainPanel(
helpText('Step (1): Enter Share Symbol'),
helpText('Step (2): Press Submit'),
helpText('Step (3): Check Historical results as well as future (year from today) predicted value'),
helpText('Note: If you enter an invalid input, then a straight share price line (default) as well as an error message will be shown'),
h4('Input'),
verbatimTextOutput("inputValue"),
# Show a line chart plot of the share price
plotOutput("distLineChartPlot"),
helpText('Plot shows close (closing price) value for that date'),
h4(verbatimTextOutput("prediction"))
)
)
))
|
fbb1f0f7ff262139708e5b1e0aba9eb98c78d195
|
ee0fe0515a9acb3098ceb82c5615cb2a4e92c3df
|
/CacheMatrix/cachematrix.R
|
644d7dfcd1234363baa38a634ce03dd31cf8fe41
|
[] |
no_license
|
ChrisMuir/coursera_r_programming
|
be0559c20459247fba8bdab8d41be8c07ba2e87c
|
e73cd414c399d01ddcde0426ec37daedf62955d6
|
refs/heads/master
| 2021-05-30T12:21:33.892644
| 2016-01-12T19:45:33
| 2016-01-12T19:45:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,965
|
r
|
cachematrix.R
|
## The functions below allow the user to store the output of a matrix inversion computation
## in a cache outside of the current R environment. This gives the user the ability to
## call upon the matrix inversion output at will without having to recompute multiple times.
## This is especially useful for use in loops in R.
## makeCacheMatrix creates the cache matrix object, and sets up a list of user commands.
## Once the function is run in R, here is an example of the commands to seed the cache:
## a <- makeCacheMatrix()
## x <- matrix(1:4, 2, 2)
## a$set(x)
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve calculates the inverse of the matrix created by function makecacheMatrix.
## Although, prior to this calculation, it looks to see if the matrix inversion has
## previously been calculated. If so, it returns the output saved in the cache and does
## NOT perform the inversion computation. If the requested inversion has NOT been stored in
## the cache, the function will calculate the matrix inversion and record the value of the
## inversion calculation in the cache by way of the setinverse function.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
## Going back to the example from above, the command for cacheSolve would look like this:
## cacheSolve(a)
##
## Which would result in the inverse of our example matrix being returned:
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
##
|
4d7b2cdaa91dc465f21ffa18c97a209e60de2721
|
ea94437e329e7dcf55549559e384f4cccdecc76c
|
/Self_ISLR/Chap4.R
|
e4a351046a21f0daea3bfc3b9020c2e08fb0a273
|
[] |
no_license
|
nahida47/student_projects
|
c8654c37ac3b275b2e6ca88b7cad9963365f219f
|
e7b77b3e8cce2e81d656b25ea58c0e6175baf407
|
refs/heads/master
| 2022-11-24T19:43:42.101847
| 2018-02-16T18:33:12
| 2018-02-16T18:33:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,455
|
r
|
Chap4.R
|
# Load all library needed (like python $load magic)
LoadLibraries=function (){
library (ISLR)
library (MASS)
library (class)
source('E:/OneDrive/Github/R/Libs/plot_libs.R')
print (" The libraries and functions have been loaded .")}
# Load library
LoadLibraries()
# Practice
names(Smarket)
pairs(Smarket) # not clear
plot_heatmap(cor(Smarket[, -9])) # Better
attach(Smarket)
plot(Volume)
glm.fit=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume,
data=Smarket ,family =binomial)
summary(glm.fit) # All values are large
glm.probs = predict(glm.fit, type="response")
contrasts(Direction)
glm.pred = rep("Down ", 1250)
glm.pred[glm.probs > .5]=" Up"
table(glm.pred, Direction)
# Add hold out
train = (Year < 2005)
Smarket.2005 = Smarket[!train, ]
Direction.2005 = Direction[!train]
glm.fit = glm(Direction ~ Lag1 + Lag2 + Lag3 + Lag4 + Lag5 + Volume,
data=Smarket, family=binomial, subset=train)
glm.probs = predict(glm.fit, Smarket.2005, type='response')
glm.pred = rep('Down', 252)
glm.pred[glm.probs > 0.5] = "Up"
table(glm.pred, Direction.2005)
mean(glm.pred!=Direction.2005)
# Use less predictors
glm.fit=glm(Direction ~ Lag1 + Lag2,
data=Smarket,
family =binomial,
subset =train)
# LDA Part
lda.fit = lda(Direction~Lag1+Lag2, data=Smarket, subset=train)
# QDA
qda.fit = qda(Direction~Lag1+Lag2, data=Smarket, subset=train)
# KNN
library(class)
train.X = cbind(Lag1 ,Lag2)[train ,]
test.X = cbind (Lag1 ,Lag2)[!train ,]
train.Direction = Direction [train]
set.seed(1)
knn.pred = knn(train.X, test.X, train.Direction, k=3)
# On Caravan
attach(Caravan)
standardized.X = scale(Caravan[, -86])
test = 1:1000
train.X = standardized.X[-test, ]
test.X = standardized.X[test, ]
train.Y = Purchase[-test]
test.Y = Purchase[test]
set.seed(1)
knn.pred = knn(train.X, test.X, train.Y, k=5)
mean(test.Y != knn.pred)
table(knn.pred, test.Y)
glm.fit = glm(Purchase~., data= Caravan, family=binomial, subset=-test)
glm.probs = predict(glm.fit ,Caravan[test ,], type="response")
glm.pred = rep("No", 1000)
glm.pred[glm.probs > 0.5] = "Yes"
table(glm.pred, test.Y)
glm.pred[glm.probs > 0.25] = "Yes"
table(glm.pred, test.Y)
# Exercise (Applied)
# 10
# (a)
# Only year and volume have some relationship.
attach(Weekly)
summary(Weekly)
plot_heatmap(cor(Weekly[, -9]))
# (b)
# Only Lag2
glm.fit = glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Weekly, family=binomial)
summary(glm.fit)
# (c)
# generate an overwhelming number of ups
glm.probs = predict(glm.fit, type="response")
glm.pred = rep("Down", length(glm.probs))
glm.pred[glm.probs > 0.5] = "Up"
table(glm.pred, Direction)
# (d)
mask = (Year < 2009)
Weekly.test = Weekly[!mask, ]
glm.fit = glm(Direction~Lag2, data=Weekly, family=binomial, subset=mask)
glm.probs = predict(glm.fit, Weekly.test, type="response")
glm.pred = rep("Down", length(glm.probs))
glm.pred[glm.probs > 0.5] = "Up"
Direction.test = Direction[!mask]
table(glm.pred, Direction.test)
# (e)
lda.fit = lda(Direction~Lag2, data=Weekly, family=binomial, subset=mask)
lda.pred = predict(lda.fit, Weekly.test)
table(lda.pred$class, Direction.test)
# (f)
qda.fit = qda(Direction~Lag2, data=Weekly, subset=mask)
qda.class = predict(qda.fit, Weekly.test)$class
table(qda.class, Direction.test)
# (g)
train.X = as.matrix(Lag2[mask])
test.X = as.matrix(Lag2[!mask])
train.Direction = Direction[mask]
set.seed(1)
knn.pred = knn(train.X, test.X, train.Direction, k = 1)
table(knn.pred, Direction.test)
# (h)
# Logistic and LDA.
# (i)
# Nah.
# 11.
# (a)
attach(Auto)
mpg01 = rep(0, length(mpg))
mpg01[mpg > median(mpg)] = 1
auto = data.frame(Auto, mpg01)
# (b)
# all of them
cormat = cor(auto[, -9])
plot_heatmap(cormat)
# (c)
train = (year %% 2 == 0)
test = !train
auto.train = auto[train,]
auto.test = auto[test,]
mpg01.test = mpg01[test]
# (d)
# 12.64%
lda.fit = lda(mpg01~cylinders+weight+displacement+horsepower,
data=Auto, subset=train)
lda.pred = predict(lda.fit, auto.test)
table(lda.pred$class, mpg01.test)
mean(lda.pred$class != mpg01.test)
# (e)
# 13.19%
qda.fit = qda(mpg01~cylinders+weight+displacement+horsepower,
data=Auto, subset=train)
qda.pred = predict(qda.fit, auto.test)
mean(qda.pred$class != mpg01.test)
# (f)
# 12.09%
glm.fit = glm(mpg01~cylinders+weight+displacement+horsepower,
data=Auto,
family=binomial,
subset=train)
glm.probs = predict(glm.fit, auto.test, type="response")
glm.pred = rep(0, length(glm.probs))
glm.pred[glm.probs > 0.5] = 1
mean(glm.pred != mpg01.test)
# (g)
library(class)
train.X = cbind(cylinders, weight, displacement, horsepower)[train,]
test.X = cbind(cylinders, weight, displacement, horsepower)[test,]
train.mpg01 = mpg01[train]
set.seed(1)
# KNN(k=1)
# 15.38%
knn.pred = knn(train.X, test.X, train.mpg01, k=1)
mean(knn.pred != mpg01.test)
# KNN(k=3)
# 13.74%
knn.pred = knn(train.X, test.X, train.mpg01, k=3)
mean(knn.pred != mpg01.test)
# KNN(k=10)
# 16.48%
knn.pred = knn(train.X, test.X, train.mpg01, k=10)
mean(knn.pred != mpg01.test)
# 12.
# (a)
power <- function(){
print(2^3)
}
# (b)
power2 <- function(x, a){
print(x^a)
}
# (c)
# 1000, 2.25e15, 2248091
# (d)
power3 <- function(x, a){
return(x^a)
}
# (e)
x = 1:10
plot(x, power3(x, 2), log = "xy", ylab = "Log of y = x^2", xlab = "Log of x",
main = "Log of x^2 versus Log of x")
# (f)
plotpower <- function(x, a) {
plot(x, power3(x, a))
}
# 13.
# Repetitive.
|
77739aee2fafc26151843a1cb746d3bd1219eb0a
|
aa435daf8de56291db265de4184cbb86af2d26ab
|
/man/area.Rd
|
3675d6ab11e0b240d692b273a73e8484c7fc2d30
|
[] |
no_license
|
fabriziomaturo/BioFTF
|
a87ad186283e99853d4443f7e1dc7fd3d7b20025
|
ddfc94136187689a3c98dd5e38744739e57f2da7
|
refs/heads/master
| 2021-08-14T19:03:17.164997
| 2017-11-16T14:39:44
| 2017-11-16T14:39:44
| 110,982,087
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,765
|
rd
|
area.Rd
|
\name{area}
\alias{area}
\title{Compute the area under the beta profile for each community.}
\usage{
area(x,n)
}
\arguments{
\item{x}{A data matrix with the abundance of the species (the rows are the communities and the columns indicate the species living in a community). The user can freely choose to use the absolute or relative frequencies.}
\item{n}{The number of points of the domain [-1,1] that the user wants to consider in computing the functional tools. It is suggested to choose a number in the interval [3,10000] because a value of less than 3 has little sense; indeed, the major indices of biodiversity are the richness index, the index of Shannon and the index of Simpson (that we get for beta equal to -1, 0, +1, respectively). On the other hand, a value greater than 10000 is not very interesting because with a value of approximately 100 we already have a good approximation. The examples provided in this package always divide the domain into 20 intervals. The default value is n=20.}
}
\description{
This function provides the area under the beta profile for each community. In an ecological framework, the area under the profiles can be used to assess biodiversity because it can rank communities, even if they have different number of species. Indeed, in a case of maximum dominance, the area is extremely low; while in a case of evenness, it increase. The main advantage of using area is that the ordering among communities can be investigated without the analysis of a graph. It provides a scalar measure of biodiversity considering the whole domain. This is not possible with the classical indices.
}
\examples{
x=matrix(c(0.3,0.5,0.1,0.05,0.05,0.25,0.25,0.25,0.25,0,0.35,0.3,0.35,0,0),3,5)
area(x,20)
}
|
250039d43440b68ec2eb8b7d3bd09856fb26aba9
|
87e0e27810347db6c2d27846fcaf266eb0c5d8c3
|
/man/build_county_crosswalk.Rd
|
d315889064426cef30091bbe9c6b417c86071c4a
|
[] |
no_license
|
ucsf-bhhi/coc-data
|
bc245f1d44fef05fdba7976b3ebf570b788a7c99
|
b215a8731c1c0ec2e9cfce1732eec99f058d086e
|
refs/heads/main
| 2023-09-01T17:14:11.053770
| 2021-10-13T22:38:49
| 2021-10-13T22:38:49
| 379,758,960
| 1
| 0
| null | 2021-10-13T19:21:22
| 2021-06-24T00:11:47
|
R
|
UTF-8
|
R
| false
| true
| 1,608
|
rd
|
build_county_crosswalk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coc_county_tract_crosswalk.R
\name{build_county_crosswalk}
\alias{build_county_crosswalk}
\title{Creates the county to CoC crosswalk}
\usage{
build_county_crosswalk(tract_crosswalk)
}
\arguments{
\item{tract_crosswalk}{A tract to CoC crosswalk}
}
\value{
A data frame:
\itemize{
\item \code{county_fips}: County FIPS code
\item \code{coc_number}: CoC number
\item \code{coc_name}: CoC name
\item \code{year}: Year
\item \code{coc_pop}: CoC population
\item \code{coc_renting_hh}: Number of renter households in the CoC
\item \code{county_pop_in_coc}: Number of county residents in the CoC
\item \code{county_renting_hh_in_coc}: Number of county renting households in the CoC
\item \code{county_pop}: County population
\item \code{county_renting_hh}: Number of renter households in the county
\item \code{pct_coc_pop_from_county}: Share of the CoC population from the county
\item \code{pct_coc_renting_hh_from_county}: Share of the CoC renter households from the county
\item \code{pct_county_pop_in_coc}: Share of the county population in the CoC
\item \code{pct_county_renting_hh_in_coc}:Share of the county renter households in the CoC
}
}
\description{
Calculates the CoC population, share of the CoC coming from each tract, and share of each CoC in each county.
Both for the total population and the population under the poverty line.
}
\seealso{
\code{\link[=fetch_tract_data]{fetch_tract_data()}}, \code{\link[=match_tracts_to_cocs]{match_tracts_to_cocs()}}, \code{\link[=build_tract_crosswalk]{build_tract_crosswalk()}}
}
|
0bb04075ef0eb0943bb9b93f218cba5fa5d80a44
|
22a92f4df409056f130b3c12ce10de50cf07f131
|
/complete.R
|
417f87d9638d4fafb00e8f7afbce769d7ea9f82b
|
[] |
no_license
|
Crixo24/R-coursera
|
d58350960b2158b0771c016c17c1b001fecf4507
|
4479c59030d7e51f8bc0b15bd0315a52c13c98ae
|
refs/heads/master
| 2020-12-24T17:07:55.812917
| 2015-07-25T18:36:20
| 2015-07-25T18:36:20
| 39,697,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 999
|
r
|
complete.R
|
complete <- function(directory, ids){
measures <- c()
id <- c()
nobs <- c()
for (f in seq_along(ids)){
if(floor(ids[f]/10) == 0){
monitor <- paste("00", toString(ids[f]), ".csv", sep="", collapse=NULL)
}
else if(floor(ids[f]/100) == 0){
monitor <- paste("0", toString(ids[f]), ".csv", sep="", collapse=NULL)
}
else{
monitor <- paste(toString(ids[f]), ".csv", sep="", collapse=NULL)
}
#Here ends the process to set up modules' filenames
#getting interesting data
monitor <- read.csv(file=paste(directory, monitor, sep="", collapse=NULL), head=TRUE, sep=",")
sulfate <- as.vector(monitor[["sulfate"]])
nitrate <- as.vector(monitor[["nitrate"]])
num_completos <- 0
for (i in 1:length(sulfate)){
if(!is.na(sulfate[i]) && !is.na(nitrate[i])){
num_completos <- num_completos + 1
}
}
id <- c(id, monitor[1, 4])
nobs <- c(nobs, num_completos)
}
data.frame(id, nobs)
}
|
40fdf9366e0af314594ec70c500e6eacea0c8c21
|
ddebce5c535d9e6fc8b0718d5929a52ecf347e63
|
/src/r_code/classify_motions.r
|
1d8f4dec912322af47d26f1beb06c9875c6669e1
|
[
"MIT"
] |
permissive
|
AcidLeroy/OpticalFlow
|
f3c8ded1c96a1dc078a44b0e46f4a396186f70f4
|
382530f4c50045b2430a55ea5b092b1e5bd24392
|
refs/heads/master
| 2021-01-17T07:14:11.467764
| 2016-10-11T02:34:45
| 2016-10-11T02:34:45
| 51,722,318
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,472
|
r
|
classify_motions.r
|
# Clean up all variables and functions:
rm(list=ls())
library(class)
library(e1071)
# Function definitions:
# Plots a particular column in the vector with a given label name
# dataframe - This is the data frame that contains a list for each respective CDF
# column_name - This is the column for which to plot the box and whiskers for all the values in this class.
# x_label - This is the label for the type of data being plotted in the CDF
PlotClass <- function(dataframe, column_name, x_label){
dataset = dataframe[[column_name]]
mat = do.call(rbind, dataset)
boxplot(mat, use.cols = TRUE, main=column_name, xlab=x_label, ylab='CDF')
}
# Returns all rows that match a particular classification type. I.e. if you want to get
# all values in the data frame that were classified as a 1, you would simply call
# GetClassification(df 1).
GetClassification <- function(dataframe, classification){
class_tf = unlist(lapply(dataframe$Classification, function(x) x == classification))
return (sapply(dataframe, function(x) x[class_tf], simplify=FALSE))
}
# Plot CDFs found in the dataframe.
PlotHistsClasses <- function(VideoHists){
VideoHists1 = GetClassification(VideoHists, 1)
VideoHists2 = GetClassification(VideoHists, 2)
library(ggplot2);
par(mfrow=c(6,2));
par(mar=c(1,1,1,1))
no_plot = c("Filename", "Classification")
x_labels = c('x', 'y', 'angle', 'angle', 'motion magnitude', 'angle')
idx = 1;
for( i in names(VideoHists[!names(VideoHists) %in% no_plot])) {
PlotClass(VideoHists1, i, x_labels[idx])
PlotClass(VideoHists2, i, x_labels[idx])
idx = idx + 1
}
}
# Get all columns except for the ones specified in excluded_columns. Returns the modified
# data frame
GetAllExcept <- function(df, excluded_columns) {
return (df[names(df[!names(df) %in% excluded_columns])])
}
# Combine features of that dataframe into a matrix so that the features
# can easily processed by svm and knn.
CombineFeatures <- function(df, features_to_combine) {
result = do.call(cbind, df[features_to_combine])
return (apply(result, 1, unlist))
}
FeatureSelection <- function(VideoHists){
# Selects features that produce different variables:
VideoHists1 = GetClassification(VideoHists, 1)
VideoHists2 = GetClassification(VideoHists, 2)
# Go through and construct only the variables that are different:
ValidVars = c()
# Don't use these variables
no_use = c("Filename", "Classification")
# Loop through all variables except for the ones specified in no_use
for( i in names(VideoHists[!names(VideoHists) %in% no_use])) {
# Compare every variable:
x = unlist(VideoHists1[[i]]);
y = unlist(VideoHists2[[i]]);
# Nothing to do if same mean:
if (abs(mean(x)-mean(y)) > 1.0e-6) {
# Compare the two populations:
w <- wilcox.test(x, y, alternative = "two.sided");
if (w$p.value < 0.05) { # 0.02
# They are different.
# Use this variable.
ValidVars = c(ValidVars, i)
}
}
}
# Attach unsued variables
ValidVars = c(ValidVars, no_use)
return (VideoHists[ValidVars])
}
PlotFeatureClasses <- function(VideoHists){
# Visualize the different classes
d<-dim(VideoHists);
NumOfVars <- d[2];
# Select class=1 or -1
VideoHists1 <- VideoHists[which(VideoHists[, NumOfVars] == 1), seq(1:(NumOfVars-1))];
VideoHists2 <- VideoHists[which(VideoHists[, NumOfVars] == 2), seq(1:(NumOfVars-1))];
library(ggplot2);
par(mfrow=c(1,2), mar=c(10, 4, 10, 2) + 0.1);
plot(melt(VideoHists1),
main="1: Selected Features",
xlab="x",
ylab="Values");
plot(melt(VideoHists2),
main="2: Selected Features",
xlab="x",
ylab="Values");
}
ClassifyFeatures <- function(VideoHists){
NoOfSamples <- length(VideoHists$Classification)
# Build a factor of the correct classification:
All_cl <- unlist(VideoHists$Classification);
# Store 1 for wrong classification and 0 for correct.
knnResult <- rep(1, times=NoOfSamples);
svmResult <- rep(1, times=NoOfSamples);
# Remove classification
no_use = c("Filename", "Classification")
features = GetAllExcept(VideoHists, no_use)
# Create a leave one out classification approach
for(i in 1:NoOfSamples)
{
# Set up training and testing data:
trainData = lapply(features, function(x) x[-i]) # Remove i.
testData = lapply(features, function(x) x[i]) # One left out.
#Combine data
trainData = t(CombineFeatures(trainData, names(trainData)))
testData = t(CombineFeatures(testData, names(testData)))
# Prepare the labels for the training set:
# Optimal: k=1
knnResult[i] <- knn (trainData, testData, All_cl[-i], k=3); # 3
#**** With tuning ****#
tune.out=tune(svm, trainData, All_cl[-i], , kernel="linear", ranges=list(cost=c(0.0001, 0.001, 0.01, 0.1, 1, 5, 10, 100, 10000)) , scale=FALSE)
svmResult[i] <- predict(tune.out$best.model, testData);
#***** Without tuning *****#
# model <- svm(All_cl[-i] ~ ., data=trainData, scale=FALSE);
# svmResult[i] <- predict(model, testData);
cat("SVM result = ", round(svmResult), "\n");
cat("KNN result = ", knnResult, "\n")
}
print(All_cl);
print(knnResult)
print(All_cl);
print(round(svmResult));
cat("Confusion Matrix: knn");
print(table(All_cl, knnResult));
cat("Confusion Matrix: SVM");
print(table(All_cl, round(svmResult)));
}
# Read the table
#VideoHists = read.table("/Users/cody/Repos/OpticalFlow/src/matlab/VideoHistos_Typing_cells.csv", header=F);
source("load_video_features.r")
list.of.packages <- c("ggplot2", "reshape2", "e1071", "class")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
#VideoHists = LoadVideoFeatures("/Users/cody/Repos/OpticalFlow/src/matlab/VideoHistos_Typing_cells.csv")
VideoHists = LoadVideoFeatures("/Users/cody/Repos/OpticalFlow/src/aws_scripts/output.csv")
# We want to visualize the CDF plots.
library(reshape2);
# Plot the classes:
PlotHistsClasses(VideoHists)
# Select the features:
ExtractedFeatures <- FeatureSelection(VideoHists)
#ExtractedFeatures <- VideoHists[c("Motion_mag_CDF", "Classification")]
# Plot extracted features:
#PlotFeatureClasses(ExtractedFeatures)
# Classification
# Loads the classification package in R.
ClassifyFeatures(ExtractedFeatures)
|
023e9acbad872ccde7c99ff25417af480f7ba41e
|
9a4c7b3220f7304f63393cdeb73ed5c680d8c6f3
|
/man/daily2climatol.Rd
|
5dee7f0fe3b467b6d98b4590d7c0b8ce98e6b558
|
[] |
no_license
|
cran/climatol
|
95e7fc1bd1fee03da85676ccdae77b3ffc4d7477
|
0805bcf92e6195a79d4ff60ebf4737c2ee2f5f2f
|
refs/heads/master
| 2023-04-29T06:37:49.658766
| 2023-04-20T15:42:35
| 2023-04-20T15:42:35
| 17,718,557
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,842
|
rd
|
daily2climatol.Rd
|
\name{daily2climatol}
\alias{daily2climatol}
\title{Convert daily data files to \code{climatol} input format}
\description{
This function can be useful to prepare the \code{climatol} input files when the users have their daily data in per station individual files.
}
\usage{
daily2climatol(stfile, stcol=1:6, datcol=1:4, varcli='VRB', anyi=NA, anyf=NA,
mindat=365, sep=',', dec='.', na.strings='NA', header=TRUE)
}
\arguments{
\item{stfile}{File with file names and station coordinates, codes and names.}
\item{stcol}{Columns in \code{stfile} holding data file names, longitudes,
latitudes, elevations and station codes and names. (Defaults to 1:6. Use 0
for codes and/or names columns if they are missing, and numeric values will
be assigned.)}
\item{datcol}{Columns in data files holding year, month, day, value.}
\item{varcli}{(Short) name of the studied climatic variable.}
\item{anyi}{First year to study (defaults to the first year of available data).}
\item{anyf}{Last year to study (defaults to the last year of available data).}
\item{mindat}{Minimum required number of data per station. (Defaults to 365
daily data.)}
\item{sep}{Field separator in all files, whether data or stations. (Defaults
to white space.)}
\item{dec}{Decimal point. ('.' by default.)}
\item{na.strings}{Strings coding missing data (\code{'NA'} by default).}
\item{header}{Logical value indicating whether input files have a header line
or not. (\code{TRUE} by default.)}
}
\details{
Many users have their daily series in separate files (one per station). This
function can be used to read these daily data files and write the input files
needed by the \code{homogen} function of this \code{climatol} package.
When either station codes or names are missing in the stations file, its
corresponding column must be set to 0. In this case, codes and/or names will be
assigned with numeric values.
Field separator, decimal point and the presence of a header line must be consistent in all files (data files and stations file).
If your files follow the RClimDex convention, you can use the \code{rclimdex2climatol} function instead.
}
\seealso{\code{\link{rclimdex2climatol}}, \code{\link{homogen}}}
\examples{
## Set a temporal working directory and write example input files:
wd <- tempdir()
wd0 <- setwd(wd)
data(climatol_data)
df=cbind(File=c('p064.csv','p084.csv','p082.csv'),SIstations)
write.csv(df,'stations.csv',row.names=FALSE,quote=FALSE)
write.csv(p064.df,'p064.csv',row.names=FALSE,quote=FALSE)
write.csv(p084.df,'p084.csv',row.names=FALSE,quote=FALSE)
write.csv(p082.df,'p082.csv',row.names=FALSE,quote=FALSE)
## Now run the example:
daily2climatol('stations.csv',varcli='RR')
## Return to user's working directory:
setwd(wd0)
## Input and output files can be found in directory:
print(wd)
}
\keyword{manip}
|
5c9018d5b20165f7a1ebcf7e885e6a4ac23e0b44
|
94e5844e76f79c8fb7375401581e33233aafab38
|
/B2_code.R
|
e69c8b7f0c56f416ebdb89efedd5f982e348eba0
|
[] |
no_license
|
jake4599/FINC780_B2
|
5eb9100e785f1fdb743d9bfb42074a602614c4e4
|
2a4e1f5bd52e56b2a2976573d2fa9eab6ba6d0d5
|
refs/heads/master
| 2021-07-01T06:45:27.417733
| 2017-09-21T00:42:13
| 2017-09-21T00:42:13
| 104,282,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18
|
r
|
B2_code.R
|
library("dplyr")
|
d581e662c75c3bcfa8e0b8fd2eb624f037aeb48b
|
74addb8d2690d232427b4915344122ae9d8bc366
|
/man/calibrate_deviations.Rd
|
fca41c8a5bdbb5502850065071e09419f867aba8
|
[] |
no_license
|
hydromethkust/Momocs
|
ddead4ca2fbeeef086a9894825bd1298a96c56c0
|
e365307d4ec11c67667c03f7a217492494b873b9
|
refs/heads/master
| 2021-06-10T19:33:52.251459
| 2016-12-06T23:11:01
| 2016-12-06T23:11:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,058
|
rd
|
calibrate_deviations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core-outopn-calibrate.R
\name{calibrate_deviations}
\alias{calibrate_deviations}
\title{Quantitative calibration, through deviations, for Out and Opn objects}
\usage{
calibrate_deviations(x, method, id, range, norm.centsize, dist.method,
interpolate.factor, dist.nbpts, plot)
}
\arguments{
\item{x}{and \code{Out} or \code{Opn} object on which to calibrate_deviations}
\item{method}{any method from \code{c('efourier', 'rfourier', 'tfourier')} and
\code{'dfourier'}.}
\item{id}{the shape on which to perform calibrate_deviations}
\item{range}{vector of harmonics (or degree for opoly and npoly on Opn) on which to perform calibrate_deviations.
If not provided, the harmonics corresponding to 0.9, 0.95 and 0.99% of harmonic power
are used.}
\item{norm.centsize}{logical whether to normalize deviation by the centroid size}
\item{dist.method}{a method such as \link{edm_nearest} to calculate deviations}
\item{interpolate.factor}{a numeric to increase the number of points on the original shape (1 by default)}
\item{dist.nbpts}{numeric the number of points to use for deviations calculations}
\item{plot}{logical whether to print the graph (FALSE is you just want the calculations)}
}
\value{
a ggplot object and the full list of intermediate results. See examples.
}
\description{
Calculate deviations from original and reconstructed shapes using a
range of harmonic number.
}
\details{
Note that from version 1.1, the calculation changed and fixed a problem. Before,
the 'best' possible shape was calculated using the highest possible number of harmonics.
This worked well for efourier but not for others (eg rfourier, tfourier) as they
are known to be unstable with high number of harmonics. From now on, Momocs uses
the 'real' shape, as it is (so it must be centered) and uses \link{coo_interpolate}
to produce \code{interpolate.factor} times more coordinates as the shape
has and using the default \code{dist.method}, eg \link{edm_nearest},
the latter finds the euclidean distance, for each point on the reconstructed shape,
the closest point on this interpolated shape. \code{interpolate.factor} being set
to 1 by default, no interpolation will be made in you do not ask for it. Note,
that interpolation to decrease artefactual errors may also be done outside
\code{calibrate_deviations} and will be probably be removed from it
in further versions.
For *poly methods on Opn objects, the deviations are calculated from a degree 12 polynom.
}
\examples{
data(bot)
calibrate_deviations(bot)
\dontrun{
# on Opn
data(olea)
calibrate_deviations(olea)
# lets customize the ggplot
library(ggplot2)
gg <- calibrate_deviations(bot, id=1:20)$gg
gg + geom_hline(yintercept=c(0.001, 0.005), linetype=3)
gg + labs(col="Number of harmonics", fill="Number of harmonics",
title="Harmonic power") + theme_bw()
gg + coord_polar()
### intermediate results can be accessed eg with:
shp <- hearts[1] \%>\% coo_interpolate(360) \%>\% coo_samplerr(60) \%>\% Out()
calibrate_deviations(shp, id=1, range=1:24, method="efourier") \%$\%
res \%>\% apply(1, mean) \%>\% plot(type="b")
calibrate_deviations(shp, id=1, range=1:24, method="rfourier") \%$\%
res \%>\% apply(1, mean) \%>\% plot(type="b")
calibrate_deviations(shp, id=1, range=1:24, method="tfourier") \%$\%
res \%>\% apply(1, mean) \%>\% plot(type="b")
# ... providing an illustration of the e vs r/t fourier approaches developped in the help page.
### illustration of interpolate.factor
interp <- c(1, 5, 25)
res <- list()
for (i in seq_along(interp))
calibrate_deviations(shp, id=1, range=1:24,
method="tfourier", interpolate.factor=interp[i], plot=FALSE) \%$\%
res \%>\% apply(1, mean) -> res[[i]]
### int_5 is more accurate than no inteprolation
sign(res[[2]] - res[[1]])
### int 25 is more accurate than int_5, etc.
sign(res[[3]] - res[[2]])
}
}
\seealso{
Other calibration: \code{\link{calibrate_harmonicpower}},
\code{\link{calibrate_r2}},
\code{\link{calibrate_reconstructions}}
}
|
86c9ea96c539d0efb26b76a9388a1015e5ae04ed
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dlm/examples/residuals.dlmFiltered.Rd.R
|
0c659bdab5d45e439d6154aa1ad2edaadadfdb96
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 330
|
r
|
residuals.dlmFiltered.Rd.R
|
library(dlm)
### Name: residuals.dlmFiltered
### Title: One-step forecast errors
### Aliases: residuals.dlmFiltered
### Keywords: misc
### ** Examples
## diagnostic plots
nileMod <- dlmModPoly(1, dV = 15100, dW = 1468)
nileFilt <- dlmFilter(Nile, nileMod)
res <- residuals(nileFilt, sd=FALSE)
qqnorm(res)
tsdiag(nileFilt)
|
c6aabe471f507d4b6500a6e627f580b0146e955b
|
8a190762a44d4ef33fbb8397c7798fabb48433fa
|
/man/get_catalogue.Rd
|
afe78680422633d3537d05157ff5094d3207ad43
|
[
"MIT"
] |
permissive
|
petrbouchal/czso
|
570787c692cc018c52f6da4a0a286ca896dcf3b5
|
e9a9cb4a60f8e760e050dd722d87d255feafb122
|
refs/heads/master
| 2023-09-02T11:43:52.998289
| 2023-08-21T19:20:36
| 2023-08-21T19:20:36
| 235,595,701
| 13
| 2
|
NOASSERTION
| 2021-09-22T07:35:00
| 2020-01-22T14:53:07
|
R
|
UTF-8
|
R
| false
| true
| 333
|
rd
|
get_catalogue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core.R
\name{get_catalogue}
\alias{get_catalogue}
\title{Deprecated: use \code{czso_get_catalogue()} instead}
\usage{
get_catalogue()
}
\value{
a tibble
}
\description{
\lifecycle{deprecated}
}
\examples{
# see `czso_get_catalogue()`
}
\keyword{internal}
|
68c8ce00dc7773cf241630a039de41eeec31cee5
|
6b6a836c3316ee2ea29ab70d67e6feb76321893c
|
/04_practice.r
|
38422636f0005476266858392c976ac614b10ead
|
[] |
no_license
|
TAKENOKO129/wang
|
a39648bac1e70d00e0017193e4d1578be9e67ed7
|
67925c100fd633e1cbc9e4ab9fd797e27f6e3c47
|
refs/heads/main
| 2023-01-05T02:08:52.364558
| 2020-11-03T10:06:49
| 2020-11-03T10:06:49
| 307,356,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,525
|
r
|
04_practice.r
|
#向量
8.1:10.9
8.5:1.5
c(1:6,8:0)
vector("numeric",3)
vector("complex",4)
vector("logical",5)
vector("character",5)
vector("list",5)
numeric(5)
complex(5)
logical(5)
character(5)
list(5)
#序列
seq.int(3,12)
seq.int(2.2,3.7)
seq.int(3,12,2)
seq.int(3.2,1.1,-0.1)
n <- 0
1:n
seq_len(n)
m <- 5
1:5
seq_len(m)
seq_len(3)
pp <- c("peter","a","you","ok","how")
seq_along(pp)
for(i in seq_along(pp))print(pp[i]) # 这两个seq_along为什么表达出来的结果不一样??
#长度
length(1:120)
length(c(TRUE,FALSE,NULL,NA))
length(TRUE)
sn <- c("you","and","me","beautiful","needs")
length(sn)
nchar(sn)
ppp <- c(1,2,3,4,4,5,6,6,7)
length(ppp) <- 3
ppp
length(ppp) <- 11
ppp
#命名
c(apple = 1,banan = 2, yes = 3,4)
c(apple = 1,banan = 2, "kiwi fruit" = 3,4)
x <- 1:4
names(x) <- c("apple","banana","kiwi fruit","")
x
names(x)
names(1:4)
#索引向量
x <- (1:5)^2
x
x[c(1,2,3)]
x[c(-2,-4)]
x[c(TRUE,TRUE,FALSE,FALSE)]
names(x) <- c("one","four","nine","sixteen","twentyfive")
x[c("one","nine","twentyfive")]
x[c(1,NA,5)]
x[c(TRUE,FALSE,TRUE,NA,TRUE)]
x[3.9]
x[10]
x[]
which(x>2)
which.max(x)
which.min(x)
1:5 + 1
1:5 + 1:15 #长向量是短向量的倍数
#rep()重复使用元素创建矢量
rep(1:5,3)
rep(1:5,each=3)
rep(1:5,times=3)
rep(1:5,length.out=9)
rep.int(1:5,3)
rep_len(1:5,13)
#创建数组
(three_d_array <- array(
1:24,
dim = c(4,3,2),
dimnames = list(
c("one","two","three","four"),
c("ein","zwei","drei"),
c("un","deux")
)
))
class(three_d_array)
#创建矩阵
(a_matrix <- matrix(
1:12,
nrow = 4,
dimnames = list(
c("one","two","three","four"),
c("ein","zwei","drei")
)
))
class(a_matrix)
#使用array函数创建矩阵
(two_d_array <- array(
1:12,
dim = c(4,3),
dimnames = list(
c("one","two","three","four"),
c("ein","zwei","drei")
)
))
class(two_d_array)
#创建矩阵byrow = TRUE来按行填充矩阵
(a_matrix <- matrix(
1:12,
nrow = 4,
byrow = TRUE,
dimnames = list(
c("one","two","three","four"),
c("ein","zwei","drei")
)
))
class(a_matrix)
#dim返回维度的整数值向量
dim(three_d_array)
dim(a_matrix)
nrow(a_matrix)#行数
ncol(a_matrix)#列数
length(three_d_array)
length(a_matrix)#所有维度的乘积
#nrow、ncol和dim用于向量时将返回NULL值
#NROW和NCOL,它们把向量看做具有单个列的矩阵 (也即数学意义上的列向量)
identical(nrow(a_matrix),NROW(a_matrix))
identical(ncol(a_matrix),NCOL(a_matrix))
recaman <- c(0,1,3,6,2,7,13,20)
nrow(recaman)
NROW(recaman)
ncol(recaman)
NCOL(recaman)
#矩阵的行和列 行名rownames和列名colnames
rownames(a_matrix)
colnames(a_matrix)
dimnames(a_matrix)
rownames(three_d_array)
colnames(three_d_array)
dimnames(three_d_array)
#索引数组
a_matrix[1,c("zwei","drei")]#在第一行,第二列第三列的两个元素
a_matrix[1,]#第一行的所有元素
a_matrix[,c("zwei","drei")]#第二列,第三列的所有元素
#合并矩阵
(another_matrix <- matrix(
seq.int(2,24,2),
nrow = 4,
dimnames = list(
c("five","six","seven","eight"),
c("vier","funf","sechs")
)
))
c(a_matrix,another_matrix)
#使用cbind和rbind函数按行和列来绑定两个矩阵,能更自然地合并它们
cbind(a_matrix,another_matrix)
rbind(a_matrix,another_matrix)
#数组算术
a_matrix + another_matrix
a_matrix*another_matrix
t(a_matrix)#t用于转置
a_matrix%*%t(a_matrix)
1:3%o%4:6
outer(1:3,4:6)
(m <- matrix(c(1,0,1,5,-3,1,2,4,7),nrow = 3))
m^-1
(inverse_of_m <- solve(m))
m %*% inverse_of_m
|
7344435fa3156c062f035952947865a1ea506256
|
b0b9770e1cb4b96e6af3c311ef3b4a1205b4466b
|
/activity_1/Starter_script.R
|
4cbb35c885aa61cc7db216b9c6765160a91e72d9
|
[
"MIT"
] |
permissive
|
jessicaguo/ESA-Bayesian-short-course
|
4e09f1e6a516020bd66ab446309fa765287e3726
|
43b337b1a5d6fc19d2436b42265895a16c614c21
|
refs/heads/main
| 2023-06-30T09:48:05.206473
| 2021-08-06T20:34:17
| 2021-08-06T20:34:17
| 370,826,197
| 4
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,747
|
r
|
Starter_script.R
|
#This is a starter script
# there are blanks to fill in.
# I describe these in the R markdown pdf and markdown file.
#setwd(...)
#load your data
Weights <- read.csv("GiantRats.csv",header=T)
head(Weights)
#create JAGS data object for the "jags.model()" function
data <- list(x = c(8.0, 15.0, 22.0, 29.0, 36.0), xbar = 22, N = 30, T = 5,
Y = Weights)
#select initial values (Fill in the blanks!)
# and save into inits object for "jags.mode()" function
inits = list(
list(mu.alpha = 1, mu.beta = 0, tau=1,tau.alpha=1,tau.beta=1),
list(mu.alpha = xx, mu.beta = xx, tau=0.5,tau.alpha=2,tau.beta=0.5),
list(mu.alpha = xx, mu.beta = xx, tau=2,tau.alpha=0.5,tau.beta=2))
#load rjags package to interface between R and JAGS
library(rjags)
#Initialize the model
Rats_model <- jags.model("RATS_Hmodel.R", data=data, inits=inits, n.chains = 3, n.adapt=500)
#Sample some parameters
full_coda <- coda.samples(Rats_model, variable.names = c("deviance", "mu.alpha",
"mu.beta","alpha","beta","sig","sig.alpha","sig.beta"), n.iter=1000)
plot(converged_samples) #base R method to plot output. Flexible but less user-friendly.
library(mcmcplots) #another way to plot output, more user-friendly, but inflexible. Annoying for large models.
mcmcplot(full_coda)
#convergence diagnostic (<1.2 is converged)
gelman.diag(full_coda)
#Summarize output in terms of means, sd's, 95% CrI's
table1=summary(full_coda)$stat
table2=summary(full_coda)$quantiles
outstats<-cbind(table2[,1],table1[,2],table2[,1],table2[,5])
colnames(outstats)<-c("mean","sd","val2.5pc","val97.5pc")
outstats
#compare parameter values
library(mcmcplots)
caterplot(full_coda, parms="alpha", quantiles=list(outer=c(0.025,0.975),inner=c(.25,.75)), greek=T)
# The End #
|
274ceb57d76cda3d3bc87c388f0a208577ad6563
|
6b61e50a88f336bf22f47893d5bcb373c8d1f30c
|
/code/CVgeneric.R
|
d4debbfa7f7eeb1d5f3834b0d1bb0d8131bef7a7
|
[] |
no_license
|
zhanyuanucb/stat-154-project
|
7158f4dd6d0b862422466ca598e5b353886b8acc
|
8260c92b6a12f0e44d951a3e625de97b1c20aa5e
|
refs/heads/master
| 2020-05-19T03:18:30.592768
| 2019-05-04T05:58:01
| 2019-05-04T05:58:01
| 184,796,579
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,593
|
r
|
CVgeneric.R
|
library(glmnet)
library(ggplot2)
library(dplyr)
library(HDCI)
library(caret)
library(MASS)
library(e1071)
library(glmnet)
library(tidyr)
library(ROCR)
library(randomForest)
library(purrr)
library(tidyr)
library(ggpubr)
CVgeneric <- function(train_x, train_y, k, loss_fn, model, thresh=0.5, mtry = 7, nodesize=50, ntree=10) {
flds = createFolds(1:nrow(train_x), k = k)
avg_acc = c()
for (i in 1:k) {
cv_train = data.frame()
for (j in 1:(k-1)) {
cv_train = rbind.data.frame(cv_train, train_x[flds[[j]], ])
}
cv_val = train_x[flds[[i]], ]
cv_val_label = train_y[flds[[i]]]
if (model == 'lda') {
clf = lda(label ~ ., data = cv_train)
} else if (model == 'qda') {
clf = qda(label ~ ., data = cv_train)
} else if (model == 'logit') {
clf = glm(label ~ ., data = cv_train, family = binomial(link = "logit"))
y_hat = get_logit_pred(clf, cv_val, thresh = thresh)
temp = loss_fn(cv_val_label, y_hat)
avg_acc = c(avg_acc, temp)
} else if (model == "rf") { #randomForest(size_train[, -1], factor(size_train[, 1]), mtry = 7, nodesize = 50 , ntree = 10)
clf = randomForest(train_x[, -1], factor(train_x[, 1]), mtry = mtry, nodesize = nodesize, ntree = ntree)
temp = loss_fn(cv_val_label, predict(clf, cv_val))
avg_acc = c(avg_acc, temp)
}
if (model != "logit" && model != "rf") {
y_hat = predict(clf, cv_val)$class
temp = loss_fn(cv_val_label, y_hat)
avg_acc = c(avg_acc, temp)
}
}
return(list(fold_acc = avg_acc, error_rate = 1 - avg_acc))
}
|
825ae7cfc19847f17aea45daf3d1866c1f306d5f
|
a2ba0b6390c2a8682dc0bf0df29e00c99cc19de7
|
/man/source_decoratees.Rd
|
7a145db3392ba0774ce5e7a69d563b86103716e0
|
[] |
no_license
|
nteetor/tinsel
|
609cb8c6cfb31da894bf2147508e9b705369a0a6
|
24fa5f6ef5dc3ac30637b5f5d136ba07c3f80849
|
refs/heads/master
| 2021-05-03T08:45:45.966066
| 2016-11-21T17:15:56
| 2016-11-21T17:15:56
| 72,219,492
| 21
| 0
| null | 2016-11-15T21:29:36
| 2016-10-28T15:30:33
|
R
|
UTF-8
|
R
| false
| true
| 1,490
|
rd
|
source_decoratees.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source-decoratees.R
\name{source_decoratees}
\alias{source_decoratees}
\title{Read and Parse Decoratees from a File}
\usage{
source_decoratees(file)
}
\arguments{
\item{file}{A character string specifying a file path.}
}
\description{
Given a file, \code{source_decoratees} reads and parses decorated functions
(decoratees) into the calling environment.
}
\details{
Malformed decoratees are ignored and a message will alert the user a function
has been skipped. However, an error is raised if a decorator is undefined.
If you are working within RStudio the "Source Active File Decoratees" addin
effectively allows you to bind \code{source_decoratees} to a keyboard
shorcut. The addin is found under \bold{Tools} > \bold{Addins}.
}
\examples{
# source example files
source_decoratees(tinsel_example('attributes.R'))
source_decoratees(tinsel_example('tags.R'))
# the important thing is to look at the contents
# of the example files, note the use of the special
# "#." comment
writeLines(readLines(tinsel_example('attributes.R')))
writeLines(readLines(tinsel_example('tags.R')))
# the decorator functions are not sourced,
exists('attribute') # FALSE
exists('html_wrap') # FALSE
# only decorated functions are sourced
print(selector1)
selector1(mtcars, 'mpg')
# format with bold tags
html_bold('make this bold')
# format with paragraph tags
html_paragraph("I'll make my report as if I told a story...")
}
|
70ad65d3029b9c7eca546628541e995fb3823b1c
|
33e9809a950d135fbf4d8157f608b0da6698dd2d
|
/man/rcox.Rd
|
66a5547f5639797765027da46087379aa661e78e
|
[] |
no_license
|
cran/gRcox
|
3c5c302e1627f855f3d083421845f77a9cf6c7d0
|
1156162787adde6c779cab3db692162ab11452c3
|
refs/heads/master
| 2021-01-22T06:45:15.403393
| 2006-09-20T00:00:00
| 2006-09-20T00:00:00
| null | 0
| 0
| null | null | null | null |
ISO-8859-15
|
R
| false
| false
| 1,907
|
rd
|
rcox.Rd
|
\name{rcox}
\alias{rcox}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Main function for specifying RCON/RCOR models}
\description{
This is the main function for specifying and fitting RCON/RCOR models in the
package.
}
\usage{
rcox(gm = NULL, vcc = NULL, ecc = NULL, type = c("rcon", "rcor"),
method = c("scoring", "ipm", "hyd", "user"),
fit = TRUE,
data = NULL, S = NULL, n = NULL, Kstart,
control = rcox.control())
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{gm}{Generating class for a grapical Gaussian model, see
'Examples' for an illustration}
\item{vcc}{List of vertex colour classes for the model}
\item{ecc}{List of edge colour classes for the model}
\item{type}{Type of model. Default is RCON}
\item{method}{Estimation method. Default is 'scoring' which is
stabilised Fisher scoring. An alternative is 'ipm' which is
iterative partial maximisation. The methods 'hyd' and 'user' are for
internal use and should not be called directly}
\item{fit}{Should the model be fitted}
\item{data}{A dataframe}
\item{S}{An empirical covariance matrix (as alternative to giving data
as a dataframe)}
\item{n}{The number of observations (which is needed if data is
specified as an empirical covariance matrix)}
\item{Kstart}{An initial value for K}
\item{control}{Controlling the fitting algorithms}
}
\details{
~~ If necessary, more details than the description above ~~
}
\value{
A model object of type 'rcox'.
}
%\references{ ~put references to the literature/web site here ~ }
\author{Søren Højsgaard, sorenh@agrsci.dk}
%\note{ ~~further notes~~
%% ~Make other sections like Warning with \section{Warning }{....} ~
%}
%\seealso{ ~~objects to See Also as \code{\link{help}}, ~~~ }
\examples{
}
\keyword{models}% at least one, from doc/KEYWORDS
|
ed62e1cd06e186dee22d59cb0113c32987934a43
|
bbd803cd4fe2623ae8f41f46586684691a2e7f92
|
/R/AllClasses.R
|
ff424f70a61f24b3dd5f0db5644127ee95d7afa3
|
[] |
no_license
|
12379Monty/clusterExperiment
|
96d3359aefe60a65bfdfd3eb4f05a647347c020d
|
a26d494a9a23d467269d85c69348c4904a08bb56
|
refs/heads/master
| 2021-01-21T15:21:53.986787
| 2017-06-14T23:30:20
| 2017-06-14T23:30:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,615
|
r
|
AllClasses.R
|
setOldClass("dendrogram")
setClassUnion("dendrogramOrNULL",members=c("dendrogram", "NULL"))
setClassUnion("matrixOrNULL",members=c("matrix", "NULL"))
setClassUnion("matrixOrMissing",members=c("matrix", "missing"))
#' @title Class ClusterExperiment
#'
#' @description \code{ClusterExperiment} is a class that extends
#' \code{SummarizedExperiment} and is used to store the data
#' and clustering information.
#'
#' @docType class
#' @aliases ClusterExperiment ClusterExperiment-class clusterExperiment
#'
#' @description In addition to the slots of the \code{SummarizedExperiment}
#' class, the \code{ClusterExperiment} object has the additional slots described
#' in the Slots section.
#'
#' @description There are several methods implemented for this class. The most
#' important methods (e.g., \code{\link{clusterMany}}, \code{\link{combineMany}},
#' ...) have their own help page. Simple helper methods are described in the
#' Methods section. For a comprehensive list of methods specific to this class
#' see the Reference Manual.
#'
#' @slot transformation function. Function to transform the data by when methods
#' that assume normal-like data (e.g. log)
#' @slot clusterMatrix matrix. A matrix giving the integer-valued cluster ids
#' for each sample. The rows of the matrix correspond to clusterings and columns
#' to samples. The integer values are assigned in the order that the clusters
#' were found, if found by setting sequential=TRUE in clusterSingle. "-1" indicates
#' the sample was not clustered.
#' @slot primaryIndex numeric. An index that specifies the primary set of
#' labels.
#' @slot clusterInfo list. A list with info about the clustering.
#' If created from \code{\link{clusterSingle}}, clusterInfo will include the
#' parameter used for the call, and the call itself. If \code{sequential = TRUE}
#' it will also include the following components.
#' \itemize{
#' \item{\code{clusterInfo}}{if sequential=TRUE and clusters were successfully
#' found, a matrix of information regarding the algorithm behavior for each
#' cluster (the starting and stopping K for each cluster, and the number of
#' iterations for each cluster).}
#' \item{\code{whyStop}}{if sequential=TRUE and clusters were successfully
#' found, a character string explaining what triggered the algorithm to stop.}
#' }
#' @slot clusterTypes character vector with the origin of each column of
#' clusterMatrix.
#' @slot dendro_samples dendrogram. A dendrogram containing the cluster
#' relationship (leaves are samples; see \code{\link{makeDendrogram}} for
#' details).
#' @slot dendro_clusters dendrogram. A dendrogram containing the cluster
#' relationship (leaves are clusters; see \code{\link{makeDendrogram}} for
#' details).
#' @slot dendro_index numeric. An integer giving the cluster that was used to
#' make the dendrograms. NA_real_ value if no dendrograms are saved.
#' @slot dendro_outbranch logical. Whether the dendro_samples dendrogram put
#' missing/non-clustered samples in an outbranch, or intermixed in the dendrogram.
#' @slot coClustering matrix. A matrix with the cluster co-occurrence
#' information; this can either be based on subsampling or on co-clustering
#' across parameter sets (see \code{clusterMany}). The matrix is a square matrix
#' with number of rows/columns equal to the number of samples.
#' @slot clusterLegend a list, one per cluster in \code{clusterMatrix}. Each
#' element of the list is a matrix with nrows equal to the number of different
#' clusters in the clustering, and consisting of at least two columns with the
#' following column names: "clusterId" and "color".
#' @slot orderSamples a numeric vector (of integers) defining the order of
#' samples to be used for plotting of samples. Usually set internally by other
#' functions.
#'
#' @name ClusterExperiment-class
#' @aliases ClusterExperiment
#' @rdname ClusterExperiment-class
#' @import SummarizedExperiment
#' @import methods
#' @importClassesFrom SummarizedExperiment SummarizedExperiment
#' @importFrom dendextend as.phylo.dendrogram
#' @export
#'
setClass(
Class = "ClusterExperiment",
contains = "SummarizedExperiment",
slots = list(
transformation="function",
clusterMatrix = "matrix",
primaryIndex = "numeric",
clusterInfo = "list",
clusterTypes = "character",
dendro_samples = "dendrogramOrNULL",
dendro_clusters = "dendrogramOrNULL",
dendro_index = "numeric",
dendro_outbranch = "logical",
coClustering = "matrixOrNULL",
clusterLegend="list",
orderSamples="numeric"
)
)
## One question is how to extend the "[" method, i.e., how do we subset the co-occurance matrix and the dendrogram?
## For now, if subsetting, these are lost, but perhaps we can do something smarter?
setValidity("ClusterExperiment", function(object) {
#browser()
if(length(assays(object)) < 1) {
return("There must be at least one assay slot.")
}
if(!is.numeric(assay(object))) {
return("The data must be numeric.")
}
if(any(is.na(assay(object)))) {
return("NA values are not allowed.")
}
tX <- try(transform(object),silent=TRUE)
if(inherits(tX, "try-error")){
stop(paste("User-supplied `transformation` produces error on the input data
matrix:\n",x))
}
if(any(is.na(tX))) {
return("NA values after transforming data matrix are not allowed.")
}
if(!all(is.na((object@clusterMatrix))) &
!(NROW(object@clusterMatrix) == NCOL(object))) {
return("If present, `clusterMatrix` must have as many row as cells.")
}
if(!is.numeric(object@clusterMatrix)) {
return("`clusterMatrix` must be a numeric matrix.")
}
if(NCOL(object@clusterMatrix)!= length(object@clusterTypes)) {
return("length of clusterTypes must be same as NCOL of the clusterMatrix")
}
if(NCOL(object@clusterMatrix)!= length(object@clusterInfo)) {
return("length of clusterInfo must be same as NCOL of the clusterMatrix")
}
############
##Check dendrogram slotNames
############
#browser()
if(!is.null(object@dendro_samples)){
if(nobs(object@dendro_samples) != NCOL(object)) {
return("dendro_samples must have the same number of leaves as the number of samples")
}
if(is.na(object@dendro_outbranch)) return("if dendro_samples is defined, must also define dendro_outbranch")
}
else{
if(!is.null(object@dendro_clusters)) return("dendro_samples should not be null if dendro_clusters is non-null")
if(!is.na(object@dendro_outbranch)) return("dendro_samples should not be null if dendro_outbranch is not NA")
}
if(!is.null(object@dendro_clusters)){
if(is.na(dendroClusterIndex(object))) return("if dendrogram slots are filled, must have corresponding dendro_index defined.")
dcluster<-clusterMatrix(object)[,dendroClusterIndex(object)]
if(nobs(object@dendro_clusters) != max(dcluster)) {
return("dendro_clusters must have the same number of leaves as the number of (non-negative) clusters")
}
}
else{
if(!is.null(object@dendro_samples)) return("dendro_clusters should not be null if dendro_samples is non-null")
}
## Check co-clustering
if(!is.null(object@coClustering) &&
(NROW(object@coClustering) != NCOL(object@coClustering)
| NCOL(object@coClustering) != NCOL(object))) {
return("`coClustering` must be a sample by sample matrix.")
}
## If have a cluster matrix
if(!all(is.na(object@clusterMatrix))){ #what does this mean, how can they be all NA?
#check primary index
if(length(object@primaryIndex) != 1) {
if(length(object@primaryIndex) == 0) return("If more than one set of clusterings, a primary cluster must
be specified.")
if(length(object@primaryIndex) > 0) return("Only a single primary index may be specified")
}
if(object@primaryIndex > NCOL(object@clusterMatrix) |
object@primaryIndex < 1) {
return("`primaryIndex` out of bounds.")
}
#check clusterTypes
if(NCOL(object@clusterMatrix) != length(object@clusterTypes)) {
return("`clusterTypes` must be the same length as NCOL of
`clusterMatrix`.")
}
#check internally stored as integers
testConsecIntegers<-apply(object@clusterMatrix,2,function(x){
whCl<-which(!x %in% c(-1,-2))
uniqVals<-unique(x[whCl])
return(all(sort(uniqVals)==1:length(uniqVals)))
})
#browser()
if(!all(testConsecIntegers)) return("the cluster ids in clusterMatrix must be stored internally as consecutive integer values")
####
#test that colnames of clusterMatrix appropriately aligns with everything else
####
if(is.null(colnames(object@clusterMatrix))) return("clusterMatrix must have column names")
if(any(duplicated(colnames(object@clusterMatrix)))) return("clusterMatrix must have unique column names")
if(!is.null(names(object@clusterTypes))) return("clusterTypes should not have names")
if(!is.null(names(object@clusterInfo))) return("clusterInfo should not have names")
if(!is.null(names(object@clusterLegend))) return("clusterLegend should not have names")
####
#test that @clusterLegend is proper form
####
if(length(object@clusterLegend) != NCOL(object@clusterMatrix)) {
return("`clusterLegend` must be list of same length as NCOL of
`clusterMatrix`")
}
testIsMatrix <- sapply(object@clusterLegend,
function(x) {!is.null(dim(x))})
if(!all(testIsMatrix)) {
return("Each element of `clusterLegend` list must be a matrix")
}
testColorRows <- sapply(object@clusterLegend, function(x){nrow(x)})
testClusterMat <- apply(object@clusterMatrix, 2, function(x) {
length(unique(x))})
if(!all(testColorRows == testClusterMat)) {
return("each element of `clusterLegend` must be matrix with number of
rows equal to the number of clusters (including -1 or -2 values)
in `clusterMatrix`")
}
testColorCols1 <- sapply(object@clusterLegend, function(x) {
"color" %in% colnames(x)})
testColorCols2 <- sapply(object@clusterLegend, function(x) {
"clusterIds" %in% colnames(x)})
testColorCols3 <- sapply(object@clusterLegend, function(x) {
"name" %in% colnames(x)})
if(!all(testColorCols1) || !all(testColorCols2) || !all(testColorCols3)) {
return("each element of `clusterLegend` must be matrix with at least 3
columns, and at least 3 columns have names `clusterIds`,
`color` and `name`")
}
# testUniqueName <- sapply(object@clusterLegend, function(x) {
# any(duplicated(x[,"name"]))})
# if(any(testUniqueName)) return("the column")
testColorCols1 <- sapply(object@clusterLegend, function(x){is.character(x)})
if(!all(testColorCols1)) {
return("each element of `clusterLegend` must be matrix of character
values")
}
testColorCols1 <- sapply(1:length(object@clusterLegend), function(ii){
col<-object@clusterLegend[[ii]]
x<-object@clusterMatrix[,ii]
y<-as.numeric(col[,"clusterIds"])
all(y %in% x)
})
if(!all(testColorCols1)) {
return("each element of `clusterLegend` must be matrix with column
`clusterIds` matching the corresponding integer valued
clusterMatrix values")
}
}
if(length(object@orderSamples)!=NCOL(assay(object))) {
return("`orderSamples` must be of same length as number of samples
(NCOL(assay(object)))")
}
if(any(!object@orderSamples %in% 1:NCOL(assay(object)))) {
return("`orderSamples` must be values between 1 and the number of samples.")
}
return(TRUE)
})
#' @description The constructor \code{clusterExperiment} creates an object of
#' the class \code{ClusterExperiment}. However, the typical way of creating
#' these objects is the result of a call to \code{\link{clusterMany}} or
#' \code{\link{clusterSingle}}.
#'
#' @description Note that when subsetting the data, the co-clustering and
#' dendrogram information are lost.
#'
#'@param se a matrix or \code{SummarizedExperiment} containing the data to be
#'clustered.
#'@param clusters can be either a numeric or character vector, a factor, or a
#'numeric matrix, containing the cluster labels.
#'@param transformation function. A function to transform the data before
#'performing steps that assume normal-like data (i.e. constant variance), such
#'as the log.
#'@param ... The arguments \code{transformation}, \code{clusterTypes} and
#' \code{clusterInfo} to be passed to the constructor for signature
#' \code{SummarizedExperiment,matrix}.
#'
#'@return A \code{ClusterExperiment} object.
#'
#'@examples
#'
#'se <- matrix(data=rnorm(200), ncol=10)
#'labels <- gl(5, 2)
#'
#'cc <- clusterExperiment(se, as.numeric(labels), transformation =
#'function(x){x})
#'
#' @rdname ClusterExperiment-class
#' @export
setGeneric(
name = "clusterExperiment",
def = function(se, clusters,...) {
standardGeneric("clusterExperiment")
}
)
#' @rdname ClusterExperiment-class
#' @export
setMethod(
f = "clusterExperiment",
signature = signature("matrix","ANY"),
definition = function(se, clusters, ...){
clusterExperiment(SummarizedExperiment(se), clusters, ...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment", "numeric"),
definition = function(se, clusters, ...){
if(NCOL(se) != length(clusters)) {
stop("`clusters` must be a vector of length equal to the number of samples.")
}
clusterExperiment(se,matrix(clusters, ncol=1),...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment","character"),
definition = function(se, clusters,...){
clusterExperiment(se,matrix(clusters,ncol=1),...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment","factor"),
definition = function(se, clusters,...){
clusters <- as.character(clusters)
clusterExperiment(se,clusters,...)
})
#'@rdname ClusterExperiment-class
#'@param clusterTypes a string describing the nature of the clustering. The
#' values `clusterSingle`, `clusterMany`, `mergeClusters`, `combineMany` are
#' reserved for the clustering coming from the package workflow and should not
#' be used when creating a new object with the constructor.
#'@param clusterInfo a list with information on the clustering (see Slots).
#'@param primaryIndex integer. Sets the `primaryIndex` slot (see Slots).
#'@param orderSamples a vector of integers. Sets the `orderSamples` slot (see
#' Slots).
#'@param dendro_samples dendrogram. Sets the `dendro_samples` slot (see Slots).
#'@param dendro_clusters dendrogram. Sets the `dendro_clusters` slot (see
#' Slots).
#' @param dendro_outbranch logical. Sets the `dendro_outbranch` slot (see Slots)
#'@param dendro_index numeric. Sets the dendro_index slot (see Slots).
#'@param coClustering matrix. Sets the `coClustering` slot (see Slots).
#'@details The \code{clusterExperiment} constructor function gives clusterLabels
#' based on the column names of the input matrix/SummarizedExperiment. If
#' missing, will assign labels "cluster1","cluster2", etc.
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment","matrix"),
definition = function(se, clusters,
transformation,
primaryIndex=1,
clusterTypes="User",
clusterInfo=NULL,
orderSamples=1:ncol(se),
dendro_samples=NULL,
dendro_index=NA_real_,
dendro_clusters=NULL,
dendro_outbranch=NA,
coClustering=NULL
){
if(NCOL(se) != nrow(clusters)) {
stop("`clusters` must be a matrix of rows equal to the number of
samples.")
}
if(length(clusterTypes)==1) {
clusterTypes <- rep(clusterTypes, length=NCOL(clusters))
}
if(is.null(clusterInfo)) {
clusterInfo<-rep(list(NULL),length=NCOL(clusters))
}
if(length(clusterTypes)!=NCOL(clusters)) {
stop("clusterTypes must be of length equal to number of clusters in
`clusters`")
}
#fix up names of clusters and match
#browser()
if(is.null(colnames(clusters))){
colnames(clusters)<-paste("cluster",1:NCOL(clusters),sep="")
}
if(any(duplicated(colnames(clusters)))){#probably not possible
colnames(clusters)<-make.names(colnames(clusters),unique=TRUE)
}
if(length(clusterTypes) == 1) {
clusterTypes <- rep(clusterTypes, length=NCOL(clusters))
}
if(is.null(clusterInfo)) {
clusterInfo <- rep(list(NULL), length=NCOL(clusters))
}
#make clusters consecutive integer valued:
tmp<-.makeColors(clusters, colors=bigPalette)
clusterLegend<-tmp$colorList
clustersNum<-tmp$numClusters
colnames(clustersNum)<-colnames(clusters)
#can just give se in constructor, and then don't loose any information!
out <- new("ClusterExperiment",
se,
transformation=transformation,
clusterMatrix = clustersNum,
primaryIndex = primaryIndex,
clusterTypes = unname(clusterTypes),
clusterInfo=unname(clusterInfo),
clusterLegend=unname(clusterLegend),
orderSamples=1:ncol(se),
dendro_samples=dendro_samples,
dendro_clusters=dendro_clusters,
dendro_index=dendro_index,
dendro_outbranch=dendro_outbranch,
coClustering=coClustering
)
validObject(out)
return(out)
})
|
c81dd7080ddf160304a41a059e84f9480dd5efa2
|
0eac16b6617ccfae728818585558bcc23e064b50
|
/R/RejectionABC.R
|
0da7aa69c1cbccfcab22760fd2a441f619630728
|
[
"MIT"
] |
permissive
|
tom-jin/LazyABC
|
2cf711bc9fd22d7f6d7f1beb4fff54cf5de3ee1a
|
a24d0f5cf243d4820ae7562293030669d500eeb3
|
refs/heads/master
| 2021-01-01T17:00:52.009640
| 2015-01-29T12:17:10
| 2015-01-29T12:17:10
| 30,016,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,269
|
r
|
RejectionABC.R
|
#' Rejection Approximate Bayesian Computation
#'
#' Perform rejection ABC sampling on user specified distributions.
#'
#' This is a generic function: methods can be defined for it directly
#' or via the \code{\link{Summary}} group generic. For this to work properly,
#' the arguments \code{...} should be unnamed, and dispatch is on the
#' first argument.
#'
#' @param data an observation
#' @param prior a function taking theta and returning its density from the prior
#' @param simulator a function taking theta performing simulation
#' @param tolerance the epsilon tolerance to accept simulations
#' @param n number of iterations to run
#' @return n weighted samples of the parameter from the posterior distribution.
#' @examples
#' posterior <-
#' RejectionABC(data = 2, prior = function(){runif(1, -10, 10)},
#' simulator = function(theta){rnorm(1, 2*(theta+2)*theta*(theta-2), 0.1+theta^2)},
#' n = 3000, tolerance = 1)
RejectionABC <- function(data, prior, simulator, tolerance = 0.1, n = 10000) {
posterior <- rep(NA, n)
for(i in 1:n) {
theta <- NA
repeat {
theta <- prior()
X <- simulator(theta)
if(abs(X - data) < tolerance)
break
}
posterior[i] <- theta
}
return(posterior)
}
|
b99d54dd54c8cc4f62142f9282c1452ff6c6c905
|
c1f4e7b9135bb3c4bc557d083e48ef558f89a691
|
/Pollutantmean.R
|
12982e21cc945a399fac59e2501c41dcd1fc96e9
|
[] |
no_license
|
briansun92/Rworkspace
|
1339b94233d0b0cc1750f080b2b7b6788edc75b3
|
7f1e82f005333da67f8d4151c0fec8387efc4d60
|
refs/heads/master
| 2020-05-26T12:09:15.688967
| 2017-02-26T15:31:33
| 2017-02-26T15:31:33
| 82,476,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 472
|
r
|
Pollutantmean.R
|
pollutantmean<-function(directory,pollutant,id = 1:332){
file<- dir(directory)
setwd(directory)
data1<-data.frame (NaN)
names(data1)= pollutant
for(i in id){
tempcsv<-read.csv(file[i])
tempsub<-subset(tempcsv,select = pollutant)
data1<-rbind(data1,tempsub)
}
result<-colMeans(data1,na.rm = TRUE)
setwd("~/Desktop/Coursera/R workspace")
print(result)
}
|
f066cb680a1ce05d8617a4e147c16dee6eda4c47
|
4b9955701ca424c19bec17f0bc4b36f72cfcbcc4
|
/R/FormatCheck.R
|
479c7847c8fa41ffa49f765ac8459be7b7c5855a
|
[
"BSD-2-Clause"
] |
permissive
|
mlr-org/mlrCPO
|
c238c4ddd72ece8549f8b48a79f02f543dac60e5
|
e6fc62a4aeb2001a3760c9d1126f6f2ddd98cc54
|
refs/heads/master
| 2022-11-21T17:30:54.108189
| 2022-11-16T16:08:10
| 2022-11-16T16:08:10
| 100,395,368
| 39
| 4
|
NOASSERTION
| 2022-10-18T23:46:13
| 2017-08-15T16:08:30
|
R
|
UTF-8
|
R
| false
| false
| 70,131
|
r
|
FormatCheck.R
|
# FormatCheck.R is central within the CPO framework. It checks that
# incoming & outgoing data conforms with the properties declared by
# a CPO and with requirements implicit within the CPO framework
# (Task column not changed in Feature Operating CPOs, number of rows
# not changed unless Training Only CPO, data for retrafo is the same
# as data for trafo).
# In the same stride, data format is changed to match the required
# format of the CPO functions -- see `dataformat` argument of
# makeCPO.
##################################
### Externals ###
##################################
# do the preparation before calling trafo:
# - check the data is in an acceptable format (task or df)
# - check the properties are fulfilled
# - split the data
# - get a shape info object
# @param indata [Task | data.frame] incoming data to be fed to the CPO trafo function
# @param dataformat [character(1)] one of 'task', 'df.all', 'df.features', 'split', 'factor', 'numeric', 'ordered'
# @param strict.factors [logical(1)] whether to consider 'ordered' as separate from 'factor' types
# @param allowed.properties [character] allowed properties of `indata`
# @param subset.selector [list] information about 'affect.*' parameters that determine which subset of 'indata' is affected
# @param capture.factors [logical(1)] whether to save factor levels of input data in shapeinfo data structure. This is only used if the CPO has 'fix.factors' set to TRUE
# @param operating.type [character(1)] one of 'target', 'feature', 'retrafoless': whether target data, feature data, or both (but only during trafo) may be changed
# @param name [character(1)] name of the cpo, for message printing
# @return [list] the data to feed to the CPO trafo function, as well as meta-information:
# list(indata = list(data, target, data.reduced, target.reduced), shapeinfo, properties, private)
# 'private' is a list containing some fields used by `handleTrafoOutput`.
prepareTrafoInput = function(indata, dataformat, strict.factors, allowed.properties, subset.selector, capture.factors, operating.type, name) {
assert(checkClass(indata, "data.frame"), checkClass(indata, "Task"))
subset.info = subsetIndata(indata, subset.selector, allowed.properties, "trafo", name)
indata = subset.info$indata
shapeinfo = makeInputShapeInfo(indata, capture.factors)
subset.selector$data = NULL
shapeinfo$subset.selector = subset.selector
split.data = splitIndata(indata, dataformat, strict.factors, TRUE)
list(indata = split.data$indata,
shapeinfo = shapeinfo, properties = subset.info$properties,
private = list(tempdata = split.data$tempdata, subset.index = subset.info$subset.index,
dataformat = dataformat, strict.factors = strict.factors,
origdata = subset.info$origdata, name = name, operating.type = operating.type))
}
# do the preparation before calling retrafo:
# - check data is in an acceptable format (task or df)
# - check the properties are fulfilled
# - check the shape is the same as during trafo
# - split the data
# --> return
# how does mlr predict handle this stuff? they just drop target columns by name
# @param indata [Task | data.frame] incoming data to be fed to the CPO retrafo function
# @param dataformat [character(1)] one of 'task', 'df.all', 'df.features', 'split', 'factor', 'numeric', 'ordered'
# @param strict.factors [logical(1)] whether to consider 'ordered' as separate from 'factor' types
# @param allowed.properties [character] allowed properties of `indata`
# @param shapeinfo.input [InputShapeInfo] information about the data shape used to train the CPO
# @param operating.type [character(1)] one of 'target', 'feature', 'retrafoless': whether target data, feature data, or both (but only during trafo) may be changed
# @param name [character(1)] name of the cpo, for message printing
# @return [list] the data to feed to the CPO retrafo function, as well as meta-information:
# list(indata = data in a shape fit to be fed into retrafo, properties, task, private)
# 'task' is the reconstructed task, if any
# 'private' is a list containing some fields used by `handleTrafoOutput`.
prepareRetrafoInput = function(indata, dataformat, strict.factors, allowed.properties, shapeinfo.input, operating.type, name) {
origdata = indata
targetop = operating.type == "target"
# check that input column names and general types match (num / fac, or num/fac/ordered if strict.factors
if ("factor.levels" %in% names(shapeinfo.input)) {
indata = fixFactors(indata, shapeinfo.input$factor.levels)
}
task = NULL
if ("Task" %in% class(indata)) {
origdatatype = "Task"
if (length(shapeinfo.input$target) && length(getTaskTargetNames(indata))) {
# NOTE: this might be too strict: maybe the user wants to retrafo a Task with the target having a different name?
# HOWEVER, then either the training indata's task didnt matter (and he should have trained with a data.set?), or it
# DID matter, in which case it is probably important to have the same data type <==> target name
assertSetEqual(getTaskTargetNames(indata), shapeinfo.input$target, .var.name = sprintf("Target names of Task %s", getTaskId(indata)))
}
traintype = shapeinfo.input$type
curtype = getTaskDesc(indata)$type
if (traintype != curtype && traintype != "cluster" && curtype != "cluster") {
# if either the training or the current task is of type "cluster" (could also mean
# the training was done with a data.frame) we forgive this here.
stopf("CPO trained with task of type %s cannot operate on task of type %s.",
traintype, curtype)
}
if (curtype != "cluster" || traintype == "cluster") {
# if the current task is a cluster, we only do target
# op stuff if the training type was also a cluster.
task = indata
}
target = getTaskData(indata, features = character(0))
indata = getTaskData(indata, target.extra = TRUE)$data
} else if (is.data.frame(indata)) {
origdatatype = "data.frame"
if (any(shapeinfo.input$target %in% names(indata)) || shapeinfo.input$type == "cluster") {
if (!all(shapeinfo.input$target %in% names(indata))) {
badcols = intersect(shapeinfo.input$target, names(indata))
stopf("Some, but not all target columns of training data found in new data. This is probably an error.\n%s%s: %s",
"Offending column", ifelse(length(badcols) > 1, "s", ""), collapse(badcols, sep = ", "))
}
if (targetop) {
task = constructTask(indata, shapeinfo.input$target, shapeinfo.input$type, "[CPO CONSTRUCTED]")
}
target = indata[shapeinfo.input$target]
indata = dropNamed(indata, shapeinfo.input$target)
} else {
target = indata[character(0)]
shapeinfo.input$target = NULL
}
} else {
stopf("Data fed into CPO %s retrafo is not a Task or data.frame.", name)
}
if (!is.null(task)) {
subset.info = subsetIndata(task, shapeinfo.input$subset.selector,
allowed.properties, "retrafo", name)
origdata = subset.info$origdata
indata = subset.info$indata
assertShapeConform(getTaskData(indata, target.extra = TRUE)$data, shapeinfo.input, strict.factors, name)
} else {
# every kind of input looks like 'cluster' here
allowed.properties %c=% "cluster"
subset.info = subsetIndata(indata, shapeinfo.input$subset.selector,
allowed.properties, "retrafo", name)
indata = subset.info$indata
assertShapeConform(indata, shapeinfo.input, strict.factors, name)
}
reducing = dataformat %in% c("task", "df.all")
if (targetop && !is.null(task)) {
split.data = splitIndata(indata, dataformat, strict.factors, TRUE)
indata = split.data$indata
if (reducing) {
indata = list(data = indata$data.reduced,
target = indata$data)
} else {
indata = indata[c("data", "target")]
}
} else {
split.data = splitIndata(indata, if (reducing) "df.features" else dataformat, strict.factors, FALSE)
indata = split.data$indata
indata["target"] = list(NULL) # we want a $target slot with value NULL
}
list(indata = indata, properties = subset.info$properties, task = task,
private = list(tempdata = split.data$tempdata, subset.index = subset.info$subset.index,
origdata = origdata, dataformat = dataformat, strict.factors = strict.factors,
name = name, operating.type = operating.type, origdatatype = origdatatype,
targetnames = names(target) %??% character(0)))
}
# Do the check of the trafo's return value
# - check the data is in an acceptable format (task, df, split dfs)
# - recombine into a task / df
# - check properties are allowed
# - get a shape info object
# @param outdata [Task | data.frame | matrix] data returned by CPO trafo function
# @param prepared.input [list] return object of `prepareTrafoInput`
# @param properties.needed [character] which properties are 'needed' by the subsequent data handler.
# Therefore, these properties may be present in `outdata` even though there were not in the input data.
# @param properties.adding [character] which properties are supposed to be 'added'
# to the subsequent data handler. Therefore, these properties must be *absent* from `outdata`.
# @param convertto [character(1)] only if the operating.type is 'target': type of the new task
# @param simpleresult [character(1)] whethre even for df.all / task dataformat the return value will be formatted
# according to df.features
# @return [list] the data resulting from the CPO operation, as well as meta-information: list(outdata, shapeinfo)
handleTrafoOutput = function(outdata, prepared.input, properties.needed, properties.adding, convertto, simpleresult) {
ppr = prepared.input$private
olddata = ppr$origdata # incoming data that was already given to prepareTrafoInput as 'indata'
dataformat = ppr$dataformat
strict.factors = ppr$strict.factors
operating.type = ppr$operating.type
name = ppr$name
subset.index = ppr$subset.index # index into olddata columns: the columns actually selected by 'affect.*' parameters
if (dataformat == "task" && !simpleresult) {
assertTask(outdata, "trafo", name)
}
if (operating.type != "target") {
# for dataformat 'factor', 'numeric' etc, combine the return object of the function with
# all the columns that were not originally given to the function
outdata = rebuildOutdata(outdata, ppr$tempdata, dataformat)
}
dataformat = getLLDataformat(dataformat)
if (dataformat %in% c("df.all", "task") && simpleresult) {
assert(operating.type == "feature")
dataformat = "df.features"
}
recombined = if (operating.type == "target") {
recombinetask(olddata, outdata, dataformat, strict.factors, subset.index, TRUE, convertto, name)
} else if (operating.type == "retrafoless") {
recombineRetrafolessResult(olddata, outdata, prepared.input$shapeinfo, dataformat, strict.factors, subset.index, name)
} else if (is.data.frame(olddata)) {
# implied operating.type == "feature"
recombinedf(olddata, outdata, dataformat, strict.factors, subset.index, character(0), name)
} else {
# implied operating.type == "feature"
recombinetask(olddata, outdata, dataformat, strict.factors, subset.index, FALSE, name = name)
}
checkOutputProperties(outdata, recombined, prepared.input$shapeinfo$target, prepared.input$properties, properties.needed, properties.adding, operating.type, "trafo", name)
shapeinfo = makeOutputShapeInfo(outdata)
if ("Task" %in% class(recombined)) {
shapeinfo$target = getTaskTargetNames(recombined)
}
list(outdata = recombined, shapeinfo = shapeinfo)
}
# do the check of the retrafo's return value
# - check data is in an acceptable format (task, df, split dfs)
# - recombine into a task / df
# - check the properties are fulfilled
# - check the shape is the same as during trafo
# @param outdata [Task | data.frame | matrix] data returned by CPO retrafo function
# @param prepared.input [list] return object of `prepareTrafoInput`
# @param properties.needed [character] which properties are 'needed' by the subsequent data handler.
# Therefore, these properties may be present in `outdata` even though there were not in the input data.
# @param properties.adding [character] which properties are supposed to be 'added'
# to the subsequent data handler. Therefore, these properties must be *absent* from `outdata`.
# @param convertto [character(1)] type of task to convert to, for target operation cpo
# @param shapeinfo.output [OutputShapeInfo] ShapeInfo describing the shape of the data returned by the CPO `trafo` function when it was called.
# This imposes the same structure on the retrafo return value.
# @return [list] the data resulting from the CPO retrafo operation
handleRetrafoOutput = function(outdata, prepared.input, properties.needed, properties.adding, convertto, shapeinfo.output) {
ppr = prepared.input$private
olddata = ppr$origdata # incoming data that was already given to prepareRetrafoInput as 'indata'
dataformat = ppr$dataformat
strict.factors = ppr$strict.factors
subset.index = ppr$subset.index # index into olddata columns: the columns actually selected by 'affect.*' parameters
operating.type = ppr$operating.type
targetnames = ppr$targetnames
name = ppr$name
if (operating.type == "target" && dataformat == "task") {
assertTask(outdata, "retrafo", name)
}
# whether to ignore target columns of shapeinfo.output
# (needed when dataformat differs between trafo & retrafo)
drop.shapeinfo.target = dataformat %in% c("df.all", "task") && operating.type != "target"
if (operating.type != "target") {
# tempdata: incoming data that was given to prepareRetrafoInput as 'indata', after subsetting according to 'affect.*' parameters
outdata = rebuildOutdata(outdata, ppr$tempdata, dataformat)
}
dataformat = getLLDataformat(dataformat)
if (operating.type == "target") {
# this won't get called at all if operating.type is target and there were not target
# columns to rebuild. Therefore `olddata` will always be a Task here.
recombined = recombinetask(olddata, outdata, dataformat, strict.factors, subset.index, TRUE, convertto, name)
} else {
if (dataformat %in% c("df.all", "task")) {
# target is always split off during retrafo
dataformat = "df.features"
}
if (ppr$origdatatype == "data.frame") {
if (any(targetnames %in% names(olddata))) {
assert(all(targetnames %in% names(olddata))) # we also check this in prepareRetrafoInput
}
recombined = recombinedf(olddata, outdata, dataformat, strict.factors, subset.index, targetnames, name)
} else {
recombined = recombinetask(olddata, outdata, dataformat, strict.factors, subset.index, FALSE, name = name)
}
}
checkOutputProperties(outdata, recombined, targetnames, prepared.input$properties, properties.needed, properties.adding, operating.type, "retrafo", name)
# check the shape of outdata is as expected
if (dataformat == "split" && operating.type != "target") {
assertSetEqual(names(outdata), setdiff(names(shapeinfo.output), "target"))
for (n in names(outdata)) {
assertShapeConform(outdata[[n]], shapeinfo.output[[n]], strict.factors, name)
}
} else {
if (operating.type == "target" && dataformat == "task") {
outdata = getTaskData(outdata, target.extra = TRUE)$data
}
assertShapeConform(outdata, shapeinfo.output, strict.factors, name, ignore.target = drop.shapeinfo.target)
}
assertTargetShapeConform(recombined, shapeinfo.output, operating.type, name)
if (operating.type == "target" && ppr$origdatatype == "data.frame") {
# input was a data.frame (with target columns), so we return da data.frame (with target columns)
recombined = getTaskData(recombined)
}
recombined
}
##################################
### Shape & Properties ###
##################################
# make sure that the factor levels of data.frame 'data' are as described by 'levels'.
# @param data [data.frame | Task] data / task to check / modify
# @param levels [list of character] levels of `data` columns, indexed by `data` column names
# @return [data.frame | Task] the modified `data`
fixFactors = function(data, levels) {
UseMethod("fixFactors")
}
fixFactors.default = function(data, levels) {
assertSubset(names(levels), names(data))
data[names(levels)] = mapply(factor, data[names(levels)], levels, SIMPLIFY = FALSE)
data
}
fixFactors.Task = function(data, levels) {
changeData(data, fixFactors(getTaskData(data), levels))
}
# calculate the properties of the data (only feature types & missings)
# data can be a task or data.frame
# @param data [data.frame | Task] the data to check
# @param targetnames [character] only if `data` is a data.frame: the target columns, which will be ignored
# @return [character] a subset of c("numerics", "factors", "ordered", "missings")
getDataProperties = function(data, targetnames) {
if (is.data.frame(data)) {
td = makeTaskDescInternal(NULL, NULL, data, targetnames, NULL, NULL, FALSE)
} else {
assertClass(data, "Task")
td = getTaskDesc(data)
}
nf = td$n.feat
c(names(nf)[nf > 0], if (td$has.missings) "missings")
}
# calculate the properties of the data, as if it were a task.
# If data is a data.frame, we give it the property 'cluster'
# otherwise, we give it the propertye of the task type. If
# applicable, we also set oneclass, multiclass, etc (any from
# the variable 'cpo.targetproperties')
# @param data [data.frame | Task] the data to check
# @return [character] a subset of c("numerics", "factors", "ordered", "missings", "cluster", "classif", "multilabel", "regr", "surv", "oneclass", "twoclass", "multiclass")
getTaskProperties = function(data) {
props = getDataProperties(data, character(0))
if (is.data.frame(data)) {
c(props, "cluster")
} else {
td = getTaskDesc(data)
if (td$type == "classif") {
others = switch(as.character(length(td$class.levels)),
`1` = "oneclass", `2` = "twoclass", "multiclass")
} else {
others = NULL
}
c(props, td$type, others)
}
}
# calculate properties of a general data object.
#
# This may be a Task, data.frame, or list of data.frames
# (as used with dataformat "split").
# @param data [list | data.frame | Task] The data to get properties of
# @param ignore.cols [character] names of columns to ignore, only for data.frame
# @return [character] same as of getTaskProperties
getGeneralDataProperties = function(data, ignore.cols = character(0)) {
if ("Task" %nin% class(data) && !is.data.frame(data)) {
unique(unlist(lapply(data, getTaskProperties)))
} else if (is.data.frame(data)) {
getTaskProperties(dropNamed(data, ignore.cols))
} else {
getTaskProperties(data)
}
}
# give error when shape is different than dictated by shapeinfo.
#
# @param df [data.frame] the data to check
# @param shapeinfo [ShapeInfo] a the shape which `df` must conform to
# @param strict.factors [logical(1)] whether to check for 'ordered' as a type differing from 'factor'
# @param name [character(1)] name of the CPO currently being run, for error and debug printing
# @param retrafoless [logical(1)] whether this is the trafo result of a retrafoless CPO. Default FALSE.
# @param ignore.target [logical(1)] whether to ignore columns that have the same name as the target
# column(s) declared in the $target slot. Default FALSE.
# @return [invisible(NULL)]
assertShapeConform = function(df, shapeinfo, strict.factors, name, retrafoless = FALSE,
ignore.target = FALSE) {
if (ignore.target && !is.null(shapeinfo$target)) {
shapeinfo$colnames = setdiff(shapeinfo$colnames, shapeinfo$target)
shapeinfo$coltypes = dropNamed(shapeinfo$coltypes, shapeinfo$target)
}
if (!identical(names2(df), shapeinfo$colnames)) {
errmsg = if (retrafoless) {
"Error in CPO %s: columns may not be changed by cpo.trafo.\nInput was %s, output is %s."
} else {
"Error in CPO %s: column name mismatch between training and test data.\nWas %s during training, is %s now."
}
stopf(errmsg, name, collapse(shapeinfo$colnames, sep = ", "), collapse(names(df), sep = ", "))
}
indata = df[shapeinfo$colnames]
if (strict.factors) {
typesmatch = list(
c("integer", "numeric"),
"factor", "ordered")
} else {
typesmatch = list(
c("integer", "numeric"),
c("factor", "ordered"))
}
newcoltypes = vcapply(indata, function(x) class(x)[1])
for (t in typesmatch) {
typemismatch = (newcoltypes %in% t) != (shapeinfo$coltypes %in% t)
if (any(typemismatch)) {
plurs = ifelse(sum(typemismatch) > 1, "s", "")
singes = ifelse(sum(typemismatch) > 1, "", "es")
stopf("Error in CPO %s: Type%s of column%s %s mismatch%s between training and test data.", name,
plurs, plurs, collapse(names(indata)[typemismatch], sep = ", "), singes)
}
}
}
# give error when shape recorded 'target' differs from task target
#
# only target column names are compared. This is needed for target
# operation CPOs changing target names. If data is not a Task
# or the operating.type is 'target', this does nothing.
#
# @param data [Task | data.frame] the Task.
# @param shapeinfo [ShapeInfo] a ShapeInfo
# @param operating.type [character(1)] operating type: "target", "feature", or "retrafoless"
# @return [invisible(NULL)]
assertTargetShapeConform = function(data, shapeinfo, operating.type, name) {
if ("Task" %nin% class(data) || operating.type != "target") {
return(invisible(NULL))
}
if (!identical({newtarget = getTaskTargetNames(data)}, shapeinfo$target)) {
stopf("Error in CPO %s: Target name(s) after retrafo differ(s) from target name(s) after trafo. Was '%s', is now '%s'",
name, collapse(newtarget, "', '"), collapse(shapeinfo$target, "', '"))
}
}
# prepare some information about the data shape, so retrafo can check that
# it gets the kind of data it expects
# this needs to be checked both for input and for output
# @param data [data.frame] the data for which the shape is to be created
# @return [ShapeInfo] a simple datastructure that contains information about data column names and types
makeShapeInfo = function(data) {
makeS3Obj("ShapeInfo",
colnames = colnames(data) %??% character(0),
coltypes = vcapply(data, function(x) class(x)[1]))
}
# like makeShapeInfo, but additionally get the target names and possibly factor levels
# @param indata [data.frame | Task] data for which the shape is to be created
# @param capture.factors [logical(1)] whether to capture factor levels
# @return [InputShapeInfo] a datastructure extending `ShapeInfo` containing information about the data shape
makeInputShapeInfo = function(indata, capture.factors) {
if ("Task" %in% class(indata)) {
data = getTaskData(indata, target.extra = TRUE)$data
ret = makeShapeInfo(data)
ret$target = getTaskTargetNames(indata)
ret$type = getTaskDesc(indata)$type
if (ret$type == "classif") {
ret$positive = getTaskDesc(indata)$positive
}
} else {
data = indata
ret = makeShapeInfo(data)
ret$target = character(0)
ret$type = "cluster"
}
if (capture.factors) {
ret$factor.levels = Filter(function(x) !is.null(x), lapply(data, levels))
}
addClasses(ret, "InputShapeInfo")
}
# creates shape info for data coming out of trafo, so retrafo can check that the data generated
# by it conforms to the data returned by trafo earlier.
# This does not do certain tests about the form of `outdata`, so it is recommended to call this
# after `recombinetask` was called (but with the *original* data, not the recombined data).
# @param outdata [data.frame | Task | list of data.frame] data returned by `trafo` function of which the shape is to be covered
# @return [OutputShapeInfo] This either extends `ShapeInfo` (if outdata is `data.frame` or `Task`) or is a list of `ShapeInfo` objects.
makeOutputShapeInfo = function(outdata) {
if (is.data.frame(outdata)) {
res = makeShapeInfo(outdata)
} else if ("Task" %in% class(outdata)) {
res = makeShapeInfo(getTaskData(outdata, target.extra = TRUE)$data)
res$target = getTaskTargetNames(outdata)
res$type = getTaskDesc(outdata)$type
} else {
# data is split by type, so we get the shape of each of the constituents
res = lapply(outdata, makeShapeInfo)
}
addClasses(res, "OutputShapeInfo")
}
# check properties of data returned by trafo or retrafo function
# @param outdata [data.frame | Task | list] data returned by (re)trafo function (after rebuildOutdata)
# @param recombined [data.frame | Task] recombined data as will be returned to the user
# @param target.names [character] names of target columns
# @param input.properties [character] input properties as determined by prepare***Input
# @param properties.needed [character] which properties are 'needed' by the subsequent data handler.
# Therefore, these properties may be present in `outdata` even though there were not in the input data.
# @param properties.adding [character] which properties are supposed to be 'added'
# to the subsequent data handler. Therefore, these properties must be *absent* from `outdata`.
# @param operating.type [character(1)] operating type of cpo, one of 'target', 'feature', 'retrafoless'
# @param whichfun [character(1)] name of the CPO stage
# @param name [character(1)] name of the CPO
# @return [invisible(NULL)]
checkOutputProperties = function(outdata, recombined, target.names, input.properties, properties.needed, properties.adding, operating.type, whichfun, name) {
# allowed.properties: allowed properties of `outdata`. That is the union of the CPO's 'properties.needed' field and the properties already present in 'indata'
allowed.properties = union(input.properties, properties.needed)
present.properties = if (operating.type == "feature") {
getGeneralDataProperties(outdata, target.names)
} else {
getTaskProperties(recombined)
}
if (operating.type == "target") {
# target operating CPOs can not change feature properties, but
# there may be properties hidden from 'prepared.input$properties'
# because of affect.*-subsetting which could be present in 'present.properties'
# so we remove all feature properties here.
present.properties = setdiff(present.properties, cpo.dataproperties)
} else if (operating.type == "feature") {
# remove properties of the target that are picked up by getGeneralDataProperties but
# are not relevant.
present.properties = setdiff(present.properties, cpo.all.target.properties)
}
assertPropertiesOk(present.properties, allowed.properties, whichfun, "out", name)
assertPropertiesOk(present.properties, setdiff(allowed.properties, properties.adding), whichfun, "adding", name)
}
# give userfriendly error message when data does have the properties it is allowed to have.
# @param present.properties [character] properties that were found in a given data object
# @param allowed.properties [character] the properties that the data object is allowed to have
# @param whichfun [character(1)] name of the CPO stage
# @param direction [character(1)] either "in" (data is being sent into CPO), "out" (data was returned by CPO function, some
# properties are present that were *not* present in the input data, but the given properties were not declared as
# 'properties.needed'), or "adding" (data was returned by CPO function, but the given properties *were* declared as
# 'properties.adding' and hence must not be present)
# @return [invisible(NULL)]
assertPropertiesOk = function(present.properties, allowed.properties, whichfun, direction, name) {
if (!isPropertyStrict()) {
return(invisible(NULL))
}
badprops = setdiff(present.properties, allowed.properties)
if (length(badprops)) {
if (direction == "in") {
stopf("Data going into CPO %s has propert%s %s that %s can not handle.",
whichfun, ifelse(length(badprops) > 1, "ies", "y"),
collapse(badprops, sep = ", "), name)
} else if (direction == "out") {
stopf("Data returned by CPO %s has propert%s %s that %s did not declare in .properties.needed.",
whichfun, ifelse(length(badprops) > 1, "ies", "y"),
collapse(badprops, sep = ", "), name)
} else {
# 'adding' properties may not be present during output, but the error message
# would be confusing if we used the 'out' message for this.
assert(direction == "adding")
stopf("Data returned by CPO %s has propert%s %s that %s declared in .properties.adding.\n%s",
whichfun, ifelse(length(badprops) > 1, "ies", "y"),
collapse(badprops, sep = ", "), name,
paste("properties in .properties.adding may not be present in", whichfun, "output."))
}
}
}
# Check that the given task does not lie about itself.
#
# This is used on user-returned tasks. Check that task.desc$size equals the row number, that
# the target names occur in the task, etc.
# @param task [Task] the task.
# @param whichfun [character(1)] which function returned the task: trafo, retrafo
# @param name [character(1)] name of the cpo
# @return [invisible(NULL)]
assertTask = function(task, whichfun, name) {
if (!is.list(task) || !is.list(task$task.desc) || "id" %nin% names(task$task.desc)) {
stopf("Object returned by %s %s was not a task.", whichfun, name)
}
taskdesignator = function() sprintf("Task %s returned by %s %s", task$task.desc$id, whichfun, name)
if (!is.environment(task$env)) {
stopf("%s had no environment in its '$env' slot.", taskdesignator())
}
task.desc = task$task.desc
target = task.desc$target
required.classes = switch(task.desc$type,
classif = c("ClassifTask", "SupervisedTask"),
regr = c("RegrTask", "SupervisedTask"),
cluster = c("ClusterTask", "UnsupervisedTask"),
surv = c("SurvTask", "SupervisedTask"),
multilabel = c("MultilabelTask", "SupervisedTask"),
stopf("%s task type must be one of classif, regr, cluster, multilabel, surv", taskdesignator()))
if (!identical(task$type, task.desc$type)) {
stopf("%s task type and task.desc type must be the same", taskdesignator())
}
required.classes = c(required.classes, "Task")
if (!identical(required.classes, class(task))) {
stopf("%s must have classes %s", taskdesignator(), collapse(required.classes, ", "))
}
required.classes = paste0(required.classes, "Desc")
if (!identical(required.classes, class(task.desc))) {
stopf("%s task.desc must have classes %s", taskdesignator(), collapse(required.classes, ", "))
}
checks = c(
`id must be a character(1)` = testString(task.desc$id),
`data must be a data.frame with unique column names` = testDataFrame(task$env$data, col.names = "unique"),
`target must be a character` = testCharacter(target),
`task.desc must have numeric 'n.feat' slot` = testNumeric(task.desc$n.feat))
if (!all(checks)) {
stopf("%s had problems: %s", taskdesignator(), collapse(names(checks)[!checks], "; "))
}
identIntLikeNum = function(x, y) identical(as.numeric(x), as.numeric(y))
cl = table(dropNamed(vcapply(task$env$data, function(x) class(x)[1]), target))
checks = c(
`target must be a subset of task columns` = testSubset(target, colnames(task$env$data)),
`number of 'numerics' features listed in task.desc is wrong` =
identIntLikeNum(sum(cl[c("integer", "numeric")], na.rm = TRUE), task.desc$n.feat["numerics"]),
`number of 'factors' features listed in task.desc is wrong` =
identIntLikeNum(sum(cl["factor"], na.rm = TRUE), task.desc$n.feat["factors"]),
`number of 'ordered' features listed in task.desc is wrong` =
identIntLikeNum(sum(cl["ordered"], na.rm = TRUE), task.desc$n.feat["ordered"]),
`'has.missings' slot in task.desc is wrong` =
identical(anyMissing(task$env$data), task.desc$has.missings),
`'size' slot in task.desc is wrong` =
identIntLikeNum(nrow(task$env$data), task.desc$size),
`'has.weights' slot in task.desc is wrong` =
identical(!is.null(task$weights), task.desc$has.weights),
`''has.blocking' slot in task.desc is wrong` =
identical(!is.null(task$blocking), task.desc$has.blocking))
if (!all(checks)) {
stopf("%s had problems: %s", taskdesignator(), collapse(names(checks)[!checks], "; "))
}
if (task.desc$type %in% c("classif", "regr") && length(target) != 1) {
stopf("%s is of type %s but has %s targets.", taskdesignator(), task.desc$type,
length(target))
} else if (task.desc$type == "surv" && length(target) != 2) {
stopf("%s is of type surv and must have exactly two targets.", taskdesignator())
} else if (task.desc$type == "multilabel" && length(target) < 2) {
stopf("%s is of type multilabel and must have more than one target.", taskdesignator())
}
checks = switch(task.desc$type,
classif = c(
`class levels in task.desc are not the factor levels of the target column` =
testSetEqual(levels(task$env$data[[target]]), task.desc$class.levels),
`task.desc 'positive' and 'negative' slots must be NA for multiclass tasks` =
length(task.desc$class.levels) <= 2 || (is.na(task.desc$positive) && is.na(task.desc$negative)),
`task.desc 'positive' and 'negative' slots must be both class levels of the target` =
length(task.desc$class.levels) != 2 || (
testString(task.desc$positive) &&
testString(task.desc$negative) &&
testSetEqual(c(task.desc$positive, task.desc$negative), task.desc$class.levels)),
`task.desc 'positive' slot must be the class level, 'negative' slot must be not_<positive>` =
length(task.desc$class.levels) != 1 || (
testString(task.desc$positive) &&
testString(task.desc$negative) &&
identical(task.desc$positive, task.desc$class.levels) &&
identical(task.desc$negative, paste0("not_", task.desc$class.levels)))),
regr = TRUE,
cluster = TRUE,
surv = c(
`time column must be numeric` = testNumeric(task$env$data[[target[1]]]),
`event column must be logical` = testLogical(task$env$data[[target[2]]])),
multilabel = c(
`class.levels in task.desc must equal target names.` =
testSetEqual(task.desc$class.levels, target)),
stop("Unexpected error: task.desc$type was bad."))
if (!all(checks)) {
stopf("%s had problems: %s", taskdesignator(), collapse(names(checks)[!checks], "; "))
}
}
##################################
### Task Splitting ###
##################################
# Get the *indices* of columns of 'data' that are referenced by affect.* params.
# E.g. if 'affect.type == "numeric"', the indices of all numeric columns are returned.
#
# All of the parameters are just the relevantt 'affect.*' parameters as given to the CPO constructor, with the
# exception of 'data'. `getColIndices` can therefore be called using `do.call(getColIndices, insert(affect.param.list, list(data = DATA)))`
# @param data [data.frame] The data to get indices from
# @param type [character] subset of `c("numeric", "factor", "ordered", "other")`: all columns of the given type are included
# @param index [integer] index into data columns to include. Order is preserved, and they are ordered before all other matches
# @param names [character] names of data columns to include. Order is preserved, and they are ordered before other matches, except `index`
# @param pattern [character(1)] `grepl` pattern. Data columns that match the pattern are included
# @param invert [logical(1)] If TRUE, all matches are inverted, i.e. only columns that do not match any of the criteria are returned
# @param pattern.ignore.case [logical(1)] the `ignore.case` parameter of `grepl`: ignore case of `pattern`.
# @param pattern.perl [logical(1)] the `perl` parameter of `grepl`: use perl regex syntax
# @param pattern.fixed [logical(1)] the `fixed` parameter of `grepl`: don't interpret pattern as regex, but as fixed pattern.
# @return [integer]: index into `data` columns for selected columns.
getColIndices = function(data, type, index, names, pattern, invert, pattern.ignore.case, pattern.perl, pattern.fixed) {
coltypes = vcapply(data, function(x) class(x)[1])
coltypes[coltypes == "integer"] = "numeric"
coltypes[!coltypes %in% c("numeric", "factor", "ordered")] = "other"
matchcols = coltypes %in% type
if (!is.null(pattern)) {
matchcols = matchcols | grepl(pattern, colnames(data), pattern.ignore.case, pattern.perl, pattern.fixed)
}
badnames = names[!names %in% names(data)]
if (length(badnames)) {
stopf("Column%s not found: %s", ifelse(length(badnames) > 1, "s", ""), collapse(badnames, sep = ", "))
}
index %c=% setdiff(match(names, names(data)), index)
index %c=% setdiff(which(matchcols), index)
if (invert) {
index = setdiff(seq_along(data), index)
}
index
}
# Translate the 'dataformat' option for internal use ("low level" dataformat) to a simplified version.
#
# most of CPOFormatCheck's split / recombine logic doesn't care about "factor", "onlyfactor", "ordered" or "numeric"
# and just treats it as "most" or "all" dataformat, subsetting the resulting data. This significantly
# simplifies the "splitting" and "recombining" of input / output data.
# E.g. if dataformat is "factor":
# (1) the data is split according to "most" -- this is translated by 'getLLDataformat'
# (2) the data that is handed to the cpo.trafo function is gotten by 'getIndata' which takes the '$factor' slot of the split data, in this case
# (3) cpo.trafo returns its output. This output is put back together with the other data using 'rebuildOutdata'
# (4) all checks are then done as if cpo.trafo had had used the "most" dataformat and not touched any but the '$factor' slots.
# @param dataformat [character(1)] the dataformat to translate
# @return [character(1)] a simplified dataformat option
getLLDataformat = function(dataformat) {
if (dataformat %in% c("factor", "numeric", "ordered")) {
"split"
} else {
dataformat
}
}
# Get element of data according to dataformat.
#
# This is the complementary operation (category theoretically the quotient object) of `getLLDataformat`.
# With 'indata' being split according to dataformat "factor", "onlyfactor", "ordered", or "numeric", get the relevant subitem
# from the indata after it was split according to "most" or "all".
# If dataformat is none of these, this is a noop.
# @param indata [list of data.frame | data.frame | Task] the result of splitting incoming data according to `getLLDataformat(dataformat)`.
# @param dataformat [character(1)] one of the possible dataformat options
# @return [data.frame | Task] data formatted according to dataformat, to be fed into a trafo / retrafo function
getIndata = function(indata, dataformat) {
if (dataformat %in% c("factor", "ordered", "numeric")) {
indata[[dataformat]]
} else {
indata
}
}
# Reassemble data that was split according to some of the `dataformat` options.
#
# If dataformat is one of "factor", "onlyfactor", "ordered", or "numeric", then
# the data returned by trafo / retrafo (only the modified factors / etc) needs
# to be integrated with the remaining unmodified columns. With 'outdata' being a
# data slice according to dataformat, this function puts the returned data back
# into the "tempdata" block from which the input was taken.
#
# If dataformat is none of these, this is a noop.
# @param outdata [data.frame | list of data.frame | Task] the data returned by a trafo / retrafo function
# @param tempdata [data.frame | list of data.frame | Task] the original data, split according to `getLLDataformat(dataformat)`.
# @param dataformat [character(1)] the dataformat option of the current CPO
# @return [data.frame | list of data.frame | Task] `outdata`, possibly embedded into `tempdata`.
rebuildOutdata = function(outdata, tempdata, dataformat) {
if (dataformat %in% c("factor", "ordered", "numeric")) {
tempdata[[dataformat]] = outdata
outdata = tempdata
}
if (dataformat %in% c("numeric", "split") && is.matrix(outdata$numeric)) {
outdata$numeric = as.data.frame(outdata$numeric)
}
outdata
}
# split 'outdata' into subsets given by 'which'. If 'which' does not contain "ordered", then
# 'ordered' columns are put together with 'factor' columns.
# @param which [character] subset of `c("numeric", "factor", "ordered", "other")`: by which types to split
# @param data [data.frame | any] data to split. This can also be any other list or vector if `types` is given
# @param types [character | NULL] types of columns / elements of `data`. If this is not provided, it is
# determined from `data`. This is useful if `data` is not a data.frame but e.g. only a vector of column names.
# @return [list of data.frame | list of any] a list of subsets of `data`, named according to `which`.
splitColsByType = function(which = c("numeric", "factor", "ordered", "other"), data, types = NULL) {
if (is.null(types)) {
types = vcapply(data, function(x) class(x)[1])
}
# types: may be a character of type names, then data can be something besides a data.frame, like just a vector of names or indices
match.arg(which, several.ok = TRUE)
factorsubset = c("factor", if (!"ordered" %in% which) "ordered")
sapply(which, function(x) {
subset = if (x == "other") {
!types %in% c("integer", "numeric", "factor", "ordered")
} else {
types %in% switch(x,
numeric = c("integer", "numeric"),
factor = factorsubset,
ordered = "ordered")
}
data[subset]
}, simplify = FALSE, USE.NAMES = TRUE)
}
# Convenience function for 'dataformat' splitting.
#
# calls `splitdf` or `splittask`, depending on datatype of `data`.
#
# This performs no checks. possibly need to check that properties are adhered to
# in retrafo, must also check if the format is the same as during training
# 'possibly' here means: if not attached to a learner
#
# @param data [Task | data.frame] the data to split up
# @param dataformat [character(1)] subset of `c("df.features", "split", "df.all", "task")`. Should be
# a result of `getLLDataformat` applied to the dataformat used for the CPO.
# @param strict.factors [logical(1)] whether to split ordered from factor columns
# @return [Task | data.frame | list of data.frame] the data split / formatted according to `dataformat`.
splitX = function(data, dataformat = c("df.features", "split", "df.all", "task"), strict.factors) {
dataformat = match.arg(dataformat)
if (is.data.frame(data)) {
splitdf(data, dataformat, strict.factors)
} else {
splittask(data, dataformat, strict.factors)
}
}
# check whether the first level of a classif target is not the positive level
#
# @param task [Task] the task to check
# @return [logical(1)] TRUE when the first level of a classif target is not the positive level, FALSE otherwise, and
# for non-classif task.
isLevelFlipped = function(task) {
if (getTaskType(task) != "classif") {
return(FALSE)
}
pos = getTaskDesc(task)$positive
assert(!is.null(pos))
if (is.na(pos)) {
return(FALSE)
}
target = getTaskData(task, target.extra = TRUE)$target
assert(length(levels(target)) <= 2)
if (!identical(levels(target)[1], pos)) {
assert(identical(levels(target)[2], pos))
return(TRUE)
}
FALSE
}
# reorder the levels of a classif target to make the positive level the first one
#
# @param data [data.frame] the data frame containing the target
# @param target [character(1) | numeric(1)] the name or index of the target column. Is assumed to be a factor with two levels.
# @return [data.frame] the input data with the two levels of the target column flipped.
flipTaskTarget = function(data, target) {
data[[target]] = factor(data[[target]], levels = rev(levels(data[[target]])))
data
}
# reorder levels of classif target if the original task was level flipped
#
# @param data [data.frame] the data frame containing the target to be flipped
# @param task [Task] the original task. The target column of this task must be the one of the data.frame
# @return [data.frame] the input data, potentially with the two levels of the target column flipped.
unflipTarget = function(data, task) {
if (isLevelFlipped(task)) {
data = flipTaskTarget(data, getTaskTargetNames(task))
}
data
}
# This does the 'dataformat' splitting up of Tasks.
#
# This is the sister of `splitdf` which gets applied to `data.frame`.
# @param task [Task] the task to split up
# @param dataformat [character(1)] subset of `c("df.features", "split", "df.all", "task")`. Should be
# a result of `getLLDataformat` applied to the dataformat used for the CPO.
# @param strict.factors [logical(1)] whether to split ordered from factor columns
# @return [Task | data.frame | list of data.frame] the data split / formatted according to `dataformat`.
splittask = function(task, dataformat, strict.factors) {
if (dataformat %in% c("split", "df.features")) {
splt = getTaskData(task, target.extra = TRUE)$data
colsplit = c("numeric", "factor", if (strict.factors) "ordered", "other")
trg = getTaskData(task, features = character(0))
if (isLevelFlipped(task)) {
trg = flipTaskTarget(trg, 1)
}
}
if (dataformat == "df.all") {
data = getTaskData(task)
target = getTaskTargetNames(task)
data = unflipTarget(data, task)
return(list(data = data, target = target))
}
switch(dataformat,
task = list(data = task, target = getTaskTargetNames(task)),
df.features = list(data = splt,
target = trg), # want the target to always be a data.frame
split = list(data = splitColsByType(colsplit, splt),
target = trg)) # want the target to always be a data.frame
}
# This does the 'dataformat' splitting up of data.frames.
#
# When creating a `Task` from a `data.frame` for `dataformat == "task"`, a `ClusterTask` is generated.
# @param df [data.frame] the data to split up
# @param dataformat [character(1)] subset of `c("df.features", "split", "df.all", "task")`. Should be
# a result of `getLLDataformat` applied to the dataformat used for the CPO.
# @param strict.factors [logical(1)] whether to split ordered from factor columns
# @return [Task | data.frame | list of data.frame] the data split / formatted according to `dataformat`.
splitdf = function(df, dataformat, strict.factors) {
colsplit = c("numeric", "factor", if (strict.factors) "ordered", "other")
switch(dataformat,
task = list(data = makeClusterTask("[CPO CONSTRUCTED]", data = df, fixup.data = "no", check.data = FALSE), target = character(0)),
df.all = list(data = df, target = character(0)),
df.features = list(data = df, target = df[, character(0), drop = FALSE]),
split = list(data = splitColsByType(colsplit, df),
target = df[, character(0), drop = FALSE]))
}
# Take subset of data according to 'affect.*' parameters
#
# @param indata [Task | data.frame]
# @param subset.selector [list] information about 'affect.*' parameters that determine which subset of 'indata' is affected
# @param allowed.properties [character] allowed properties of `indata`
# @param whichfun [character(1)] name of the CPO stage
# @param cpo.name [character(1)] name of the CPO
# @return [list] list(origdata, indata, subset.index, properties)
subsetIndata = function(indata, subset.selector, allowed.properties, whichfun, cpo.name) {
origdata = indata
if ("Task" %in% class(indata)) {
subset.selector$data = getTaskData(indata, target.extra = TRUE)$data
subset.index = do.call(getColIndices, subset.selector)
# subsetTask, but keep everything in order
new.subset.index = featIndexToTaskIndex(subset.index, indata)
indata.data = getTaskData(indata)
if (!identical(as.integer(new.subset.index), seq_along(indata.data))) {
indata = changeData(indata, indata.data[new.subset.index])
}
} else {
subset.selector$data = indata
subset.index = do.call(getColIndices, subset.selector)
indata = indata[subset.index]
}
present.properties = getTaskProperties(indata)
assertPropertiesOk(present.properties, allowed.properties, whichfun, "in", cpo.name)
list(origdata = origdata, indata = indata, subset.index = subset.index,
properties = present.properties)
}
# Split cpo input data according to dataformat
#
# Creates also 'tempdata', the data after the split but before
# subsetting (useful for dataformat 'numeric', 'factors' etc)
# and possibly 'reduced.indata', which reduces df.all and task into df.features.
# @param data [data.frame | Task] the input data
# @param dataformat [character(1)] one of 'task', 'df.all', 'df.features', 'split', 'factor', 'numeric', 'ordered'
# @param strict.factors [logical(1)] whether to consider 'ordered' as separate from 'factor' types
# @param create.reduced [logical(1)] whether to create 'reduced' indata
# @return [list] list(indata, tempdata). indata is the proper input for the CPO function,
# a list(data, target [, data.reduced, target.reduced]). tempdata the data after split before subsetting.
splitIndata = function(data, dataformat, strict.factors, create.reduced) {
indata = splitX(data, getLLDataformat(dataformat), strict.factors)
tempdata = indata$data
indata$data = getIndata(indata$data, dataformat)
if (create.reduced) {
# create separate "reduced" data that, besides containing the full task / df, also
# contains the data and target alone.
reduced.indata = if (dataformat %in% c("task", "df.all")) {
splitX(data, "df.features", strict.factors)
} else {
indata
}
names(reduced.indata) = paste0(names(reduced.indata), ".reduced")
indata %c=% reduced.indata
}
list(indata = indata, tempdata = tempdata)
}
##################################
### Task Recombination ###
##################################
# Task / Data recombination entails checking that data / target was only modified if allowed by the CPO type,
# checking that the number of rows didn't change, and relevant properties didn't change.
# Recombine the data previously split up by `splitdf` / `splittask` with `dataformat` being "most" or "all",
# and after the CPO trafo / retrafo function performed its operations on it.
#
# recombineLL is called by both recombinetask and recombinedf, and does the checking (e.g. number or rows did not change)
# that is common to both.
#
# 'LL' meaning 'low level'
# @param olddata [list of data.frame] data as fed to the CPO, for reference of correct row number etc.
# @param newdata [list of data.frame] data as returned by trafo / retrafo
# @param subset.index [integer] subset of 'task' features that were selected by 'affect.*' parameters
# @param name [character(1)] CPO name for pretty debug printing
# @return [data.frame] the data in `newdata` combined into a single data.frame.
recombineLL = function(olddata, newdata, targetnames, strict.factors, subset.index, name) {
allnames = names(olddata)
needednames = c("numeric", "factor", "other", if (strict.factors) "ordered")
if (!isTRUE(checkSetEqual(names(newdata), needednames))) {
stopf('CPO %s gave bad return. The returned value must be a list with names {"%s"}.',
name, collapse(needednames, sep = '", "'))
}
targetdata = olddata[targetnames]
olddata = dropNamed(olddata, targetnames)
unsubsetdata = olddata[-subset.index]
olddata = olddata[subset.index]
dfs = vlapply(newdata, is.data.frame)
if (any(!dfs)) {
is.plur = sum(!dfs) > 1
stopf("Return of %s element%s %s %s not a data.frame.", name, ifelse(is.plur, "s", ""),
collapse(names(dfs)[!dfs], sep = ", "), ifelse(is.plur, "are", "is"))
}
# check no new names clash with other names
# this kind of sucks when a CPO just happens to change the names to something thats already there
# but we also don't want to surprise the user about us unilaterally changing names, so he needs to
# take care of that.
jointargetnames = c(targetnames, names(unsubsetdata), unlist(lapply(newdata, names)))
if (any(duplicated(jointargetnames))) {
stopf("CPO %s gave bad result\nduplicate column names %s", name, collapse(unique(jointargetnames[duplicated(jointargetnames)], sep = ", ")))
}
types = vcapply(olddata, function(x) class(x)[1])
splitargetnames = splitColsByType(names(newdata), names(olddata), types) # list(numeric = [colnames], factor = [colnames]...
numrows = nrow(olddata)
namesorder = allnames
for (splittype in names(splitargetnames)) {
if (numrows != nrow(newdata[[splittype]])) {
stopf("Number of rows of %s data returned by %s did not match input\nCPO must not change row number.",
splittype, name)
}
if (!identical(splitargetnames[[splittype]], names(newdata[[splittype]]))) {
namesorder = setdiff(namesorder, splitargetnames[[splittype]])
namesorder %c=% names(newdata[[splittype]])
}
}
newdata = cbind(unsubsetdata, do.call(cbind, unname(newdata)), targetdata)
assertSetEqual(names(newdata), namesorder)
newdata[namesorder]
}
# Recombine a task that was previously (potentially) split up according to `dataformat` and then changed by trafo / retrafo.
#
# This is used when the split up data was created from a task, and if (therefore) the result of the
# CPO is again expected to be a task.
#
# this checks that the result of trafo / retrafo has the proper type, that target and type didn't change,
# (if dataformat == "task"), and that the number of rows is the same. It then reconstructs the complete task that
# will be output by the CPO.
# @param task [Task] old task, used for input, for comparison
# @param newdata [Task | data.frame | list of data.frame] output of cpo.trafo / cpo.retrafo. This has the same format
# as `splittask(task, dataformat)`
# @param dataformat [character(1)] the dataformat used, this is `getLLDataformat` applied to the CPO's dataformat parameter.
# @param strict.factors [logical(1)] whether to consider 'ordered' as separate from 'factor' types
# @param subset.index [integer] subset of 'task' features that were selected by 'affect.*' parameters
# @param targetbound [logical(1)] TRUE for target operating CPO, FALSE for feature operating CPO.
# @param newtasktype [character(1)] only if `targetbound`, type of new task. Give even if no task conversion happens.
# @param name [character(1)] CPO name for pretty debug printing
# @return [Task] the task incorporating the changes done by the CPO to `newdata`.
recombinetask = function(task, newdata, dataformat = c("df.all", "task", "df.features", "split"),
strict.factors, subset.index, targetbound, newtasktype, name) {
dataformat = match.arg(dataformat)
if (is.data.frame(task)) {
# only if 'targetbound'
task = makeClusterTask(id = "[CPO CONSTRUCTED]", data = task, fixup.data = "no", check.data = FALSE)
}
if (dataformat %in% c("df.features", "split")) {
if (targetbound) {
# return is just 'target' in a df.
if (!is.data.frame(newdata)) {
stopf("CPO %s gave bad result\nmust return a data.frame containing the target.",
name)
}
olddata = getTaskData(task)
oldtnames = getTaskTargetNames(task)
newtnames = names(newdata)
if (setequal(newtnames, oldtnames)) {
olddata[newtnames] = newdata
newdata = olddata
} else if (length(oldtnames) == 1 && length(newdata) == 1) {
assert(length(oldtnames) == 1)
# note that this can NOT be combined with
# the olddata[newtnames] block above!
# also note the double brackets [[ ]].
olddata[[oldtnames]] = newdata[[1]]
names(olddata)[names(olddata) == oldtnames] = names(newdata)
newdata = olddata
} else {
newdata = cbind(dropNamed(olddata, oldtnames), newdata)
}
if (anyDuplicated(colnames(newdata))) {
stopf("CPO %s introduced duplicate column names", name)
}
if (newtasktype == "classif") {
newdata = unflipTarget(newdata, task)
}
return(constructTask(newdata, newtnames, newtasktype, getTaskId(task), isLevelFlipped(task)))
} else {
return(changeData(task, recombinedf(getTaskData(task), newdata, dataformat, strict.factors, subset.index, getTaskTargetNames(task), name)))
}
}
if (dataformat == "df.all") {
checkDFBasics(task, newdata, targetbound, name)
if (!targetbound) {
newdata = unflipTarget(newdata, task)
newdata = changeData(task, newdata)
} else {
if (newtasktype == "classif") {
newdata = unflipTarget(newdata, task)
}
newdata = constructTask(newdata, getTaskTargetNames(task), newtasktype, getTaskId(task), isLevelFlipped(task))
}
}
if (nrow(getTaskData(task)) != nrow(getTaskData(newdata))) {
stopf("CPO %s must not change number of rows", name)
}
new.subset.index = featIndexToTaskIndex(subset.index, task)
if (targetbound) {
# everything may change except size, n.feat and missings
fulldata = recombinedf(getTaskData(task), getTaskData(newdata), "df.all", strict.factors, new.subset.index, character(0), name)
fulltask = constructTask(fulldata, getTaskTargetNames(newdata), newtasktype, getTaskId(newdata), isLevelFlipped(newdata))
checkColumnsEqual(getTaskData(task, target.extra = TRUE)$data[subset.index],
getTaskData(newdata, target.extra = TRUE)$data, "non-target column", name)
checkTaskBasics(task, fulltask, setdiff(names(getTaskDesc(task)), c("n.feat", "has.missings", "has.blocking", "has.weights")), name)
return(fulltask)
}
#check type didn't change
assert(getTaskType(task) == getTaskType(newdata))
assertSetEqual(names(getTaskDesc(task)), names(getTaskDesc(newdata)))
# check target didn't change
checkColumnsEqual(getTaskData(task, features = character(0)),
getTaskData(newdata, features = character(0)), "target column", name)
checkTaskBasics(subsetTask(task, features = subset.index), newdata, c("id", "n.feat", "has.missings"), name)
changeData(task, recombinedf(getTaskData(task), getTaskData(newdata), "df.all", strict.factors, new.subset.index, character(0), name))
}
# convert an index of feature columns to an index w.r.t. the whole task
#
# A column index that references columns with respect the data
# columns only is converted to the column index with respect the
# whole task data.frame (including target columns).
#
# Target columns are included in this index. If feat.index
# is a sorted numeric, the target columns just get sorted into
# the feat.index; otherwise they are put at the beginning.
# @param feat.index [numeric] index of columns with respect to feature cols only
# @param task [Task] the task
# @return [numeric] index w.r.t. the whole task df. Includes target cols.
featIndexToTaskIndex = function(feat.index, task) {
task.data = getTaskData(task)
fullindex = seq_along(task.data)
aretargets = names(task.data) %in% getTaskTargetNames(task)
new.subset.index = fullindex[!aretargets][feat.index]
if (all(new.subset.index == sort(new.subset.index))) {
sort(c(which(aretargets), new.subset.index))
} else {
c(which(aretargets), new.subset.index)
}
}
# Recombine a data.frame that was previously (potentially) split up according to `dataformat` and then changed by trafo / retrafo.
#
# recombine data.frame after checking for match of rows etc., see 'recombinetask'.
# @param df [data.frame] old data.frame, used for input, for comparison
# @param newdata [Task | data.frame | list of data.frame] output of cpo.trafo / cpo.retrafo. This has the same format
# as `splitdf(df, dataformat)`
# @param dataformat [character(1)] the dataformat used, this is `getLLDataformat` applied to the CPO's dataformat parameter.
# @param strict.factors [logical(1)] whether to consider 'ordered' as separate from 'factor' types
# @param subset.index [integer] subset of 'df' features that were selected by 'affect.*' parameters
# @param targetcols [character] names of target columns; this is relevant for retrafo when cpo.trafo was trained with a Task that
# contains target columns, and cpo.retrafo is fed with a data.frame that contains columns with the same name.
# @param name [character(1)] CPO name for pretty debug printing
# @return [data.frame] the data.frame incorporating the changes done by the CPO to `newdata`
recombinedf = function(df, newdata, dataformat = c("df.features", "split", "df.all", "task"), strict.factors, subset.index, targetcols, name) {
# otherwise it contains the columns removed from the DF because they were target columns.
dataformat = match.arg(dataformat)
if (dataformat == "split") {
return(recombineLL(df, newdata, targetcols, strict.factors, subset.index, name))
} else if (dataformat == "task") {
assertClass(newdata, "Task")
newdata = getTaskData(newdata)
}
if (!is.data.frame(newdata)) {
stopf("CPO %s gave bad result\nmust return a data.frame.", name)
}
if (nrow(df) != nrow(newdata)) {
stopf("CPO %s must not change number of rows.", name)
}
outsetcols = dropNamed(df, targetcols)
if (length(subset.index)) {
outsetcols = outsetcols[-subset.index]
}
fullnames = c(names(newdata), names(outsetcols), targetcols)
dubs = duplicated(fullnames)
if (any(dubs)) {
stopf("CPO %s gave bad result\ncolumn names %s duplicated (possibly with target)", name, collapse(unique(fullnames[dubs], sep = ", ")))
}
datanames = names(newdata)
newdata = cbind(outsetcols, newdata, df[targetcols])
if (identical(datanames, setdiff(names(df), targetcols)[subset.index])) {
# names didn't change, so we preserve column order
newdata = newdata[names(df)]
names(newdata) = names(df)
}
row.names(newdata) = attr(df, "row.names")
newdata
}
# Check that columns in `old.relevants` and `new.relevants` are identical.
#
# This is mostly a helper function for pretty error messages. Depending on what
# a CPO operates on, it must not change target OR data columns. These are the "relevant"
# columns. If this rule is violated, an error message tells the user that a CPO must not
# change target / data columns.
# @param old.relevants [data.frame] subset of the old data that must stay constant
# @param new.relevants [data.frame] subset of modified data, which is checked for equality with old.relevants
# @param relevant.name [character(1)] should be something like 'targets' or 'non-target features'
# @param name [character(1)] name of the CPO for debug purposes
# @return [invisible(NULL)]
checkColumnsEqual = function(old.relevants, new.relevants, relevant.name, name) {
if (!isTRUE(checkSetEqual(names(old.relevants), names(new.relevants)))) {
stopf("CPO %s must not change %s names.", name, relevant.name)
}
for (n in names(old.relevants)) {
if (!identical(old.relevants[[n]], new.relevants[[n]])) {
stopf("CPO %s must not change %ss, but changed %s.", name, relevant.name, n)
}
}
}
# general function that builds a task of type 'type' and with id 'id', using
# the given data.
#
# @param data [data.frame] the data to be used in the new task
# @param target [character] name of target columns inside `data`
# @param type [character(1)] type of the task to be created
# @param id [character(1)] id of the newly created task
# @param flip [logical(1)] whether, for binary classif task, to put the 2nd level on 'positive'
# @return [Task] a new task of type `type`, with id `id`, data `data`, and other meta information from `oldtask`.
constructTask = function(data, target, type, id, flip = FALSE) {
if (type == "cluster") {
if (length(target)) {
stop("Cluster task cannot have target columns")
}
return(makeClusterTask(id = id, data = data, fixup.data = "no", check.data = FALSE))
}
if (type == "classif") {
assertString(target)
targetcol = data[[target]]
if (!is.factor(targetcol)) {
stop("ClassifTask target must be a factor column!")
}
if (flip && length(target) == 1) {
if (length(levels(targetcol)) == 2) {
positive = levels(targetcol)[2]
return(makeClassifTask(id = id, data = data, target = target,
positive = positive, fixup.data = "no", check.data = FALSE))
}
}
}
constructor = switch(type,
classif = makeClassifTask,
multilabel = makeMultilabelTask,
regr = makeRegrTask,
surv = makeSurvTask)
constructor(id = id, data = data, target = target, fixup.data = "no", check.data = FALSE)
}
# check that newdata is a task, and that it agrees with the
# old 'task' on everything except 'allowed.td.changes'
#
# @param task [Task] the task to compare newdata to
# @param newdata [Task] the task to check
# @param allowed.td.changes [character] slots of 'task.desc' that the tasks may disagree on
# @param name [character(1)] name of the CPO to use in the error message
# @return [invisible(NULL)]
checkTaskBasics = function(task, newdata, allowed.td.changes, name) {
if (!"Task" %in% class(newdata)) {
stopf("CPO %s must return a Task", name)
}
if ("size" %nin% allowed.td.changes && getTaskDesc(task)$size != getTaskDesc(newdata)$size) {
stopf("CPO %s must not change number of rows", name)
}
old.td = getTaskDesc(task)
new.td = getTaskDesc(newdata)
# check most of task description didn't change
for (n in setdiff(names(old.td), allowed.td.changes)) {
if (!isTRUE({complaint = all.equal(old.td[[n]], new.td[[n]])})) {
stopf("CPO %s changed task description item %s:\n%s", name, n, complaint)
}
}
}
# check that newdata is a data.frame that fits 'task's format (size, no overlap in target column names)
# @param task [Task] the task to compare newdata to
# @param newdata [data.frame] the data.frame to check
# @param targetbound [logical(1)] whether the CPO is allowed to operate on target columns
# @param name [character(1)] name of the CPO to use in the error message
# @return [invisible(NULL)]
checkDFBasics = function(task, newdata, targetbound, name) {
if (!is.data.frame(newdata)) {
stopf("CPO %s cpo.trafo gave bad result\ncpo.trafo must return a data.frame.", name)
}
assertClass(newdata, "data.frame")
tnames = getTaskTargetNames(task)
missingt = tnames[!tnames %in% names(newdata)]
if (length(missingt)) {
addendum = ""
if (targetbound) {
addendum = paste("\nIf you want to change names or number of target columns in targetbound CPOs",
'you must use other dataformat values, e.g. "df.features".', sep = "\n")
}
stopf("CPO %s cpo.trafo gave bad result\ndata.frame did not contain target column%s %s.%s",
name, ifelse(length(missingt) > 1, "s", ""), collapse(missingt, ", "), addendum)
}
}
# Checks and recombines data returned by a 'retrafoless' CPO, which is allowed to operate on both
# data and target columns.
#
# perform basic checks that a retrafoless cpo returned the kind of task / data.frame that it should;
# then convert, if necessary.
# @param olddata [Task | data.frame] the original input data
# @param newdata [Task | data.frame] the data returned by the CPO trafo function
# @param shapeinfo [ShapeInfo] The input shape which `df` must conform to
# @param dataformat [character(1)] the result of `getLLDataformat` applied to the CPO's dataformat parameter
# @param strict.factors [logical(1)] whether to check for 'ordered' as a type differing from 'factor'
# @param subset.index [integer] index into olddata columns: the columns actually selected by 'affect.*' parameters
# @param name [character(1)] the CPO name used in error messages
# @return [Task | data.frame] the recombined data from newdata
recombineRetrafolessResult = function(olddata, newdata, shapeinfo.input, dataformat, strict.factors, subset.index, name) {
assert(identical(subset.index, seq_along(subset.index)))
assertSubset(dataformat, c("df.all", "task"))
if (is.data.frame(olddata)) {
if (dataformat == "df.all") {
assertClass(newdata, "data.frame")
} else { # dataformat == "task"
assertClass(newdata, "ClusterTask")
newdata = getTaskData(newdata)
}
assertShapeConform(newdata, shapeinfo.input, strict.factors, name, TRUE)
} else {
if (dataformat == "df.all") {
assertClass(newdata, "data.frame")
if (!all(getTaskTargetNames(olddata) %in% names(newdata)) ||
!all(names(newdata)[names(newdata) %in% getTaskTargetNames(olddata)] == getTaskTargetNames(olddata))) {
stopf("retrafoless CPO %s must not change target names.", name)
}
if (getTaskType(olddata) == "classif") {
tname = getTaskTargetNames(olddata)
if (isLevelFlipped(olddata)) {
newdata = flipTaskTarget(newdata, tname)
}
if (!identical(levels(getTaskData(olddata, target.extra = TRUE)$target),
levels(newdata[[tname]]))) {
stopf("retrafoless CPO %s must not change target class levels.", name)
}
}
newdata = changeData(olddata, newdata)
} else { # dataformat == "task"
if (!identical(class(newdata), class(olddata))) {
stopf("retrafoless CPO %s must not change task type.", name)
}
if (!all(getTaskTargetNames(olddata) == getTaskTargetNames(newdata))) {
stopf("retrafoless CPO %s must not change target names.", name)
}
if (getTaskType(olddata) == "classif") {
if (isLevelFlipped(olddata) != isLevelFlipped(newdata)) {
stopf("CPO %s changed task target feature order.", name)
}
if (!identical(levels(getTaskData(olddata, target.extra = TRUE)$target),
levels(getTaskData(newdata, target.extra = TRUE)$target))) {
stopf("retrafoless CPO %s must not change target class levels.", name)
}
}
checkTaskBasics(olddata, newdata, c("has.missings", "size", "class.distribution"), name)
}
assertShapeConform(getTaskData(newdata, target.extra = TRUE)$data, shapeinfo.input, strict.factors, name, TRUE)
}
newdata
}
|
85dcf47f3fdac8c54407adda8d380326570e865f
|
3ba5b8154f416e465af765012bf400644c24529f
|
/script/graph/cilk_by_threads.R
|
31d4c4783c34c1c53b5f641c1d75efea69493c9b
|
[] |
no_license
|
Sommerio/ParComp5
|
99e18a5e52ce3c6ea0a373c5008ae11ac9dfff09
|
768ebbfc92e3aeefa7ac838320771a942812c8c7
|
refs/heads/master
| 2021-12-10T05:54:14.574843
| 2013-01-28T17:22:02
| 2013-01-28T17:22:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,062
|
r
|
cilk_by_threads.R
|
# Fixed Size
# Args: File Name, Desired Length, Out File
args <- commandArgs(trailingOnly=TRUE)
print(paste("cilk_by_threads.R, Args:", args, sep=""))
rawdata <- read.csv(args[1], header = FALSE, sep=";")
names(rawdata) <- c('length', 'threads', 'chunk', 'time')
full_data <- subset(rawdata, length == args[2], select=c(length, threads, chunk, time))
if (length(full_data) < 1) {
q()
}
jpeg(args[3], width=1024, height=1024)
vals <- vector()
yreistn <- 0
#yreistn <- max(full_data$time)
if (args[2] > 3000) {
vals <- c(1, 20, 100)
yreistn <- 0.0023
} else {
vals <- c(1, 500, 3000)
yreistn <- 0.025
}
plot(full_data$threads, full_data$time, type="n", main=paste(args[1], ",\nSize:",args[2], sep=""), xlab="Threads", ylab="Time", ylim=c(0,yreistn))
for (i in 1:length(vals)) {
data <- subset(full_data, chunk == vals[i])
data <- data[with(data, order(threads)), ]
if (length(data) > 0) {
lines(data$threads, data$time, col=i)
}
}
legend(1, yreistn, cex=1.2, c(vals), col=c(1:length(vals)), pch=1, lty=1)
dev.off()
|
6e4e21645de67f90d86b29115933f2ea2298dba3
|
41fab1ef7f98ddf4c00c380fc8eb87eb1f17105d
|
/ShinyCompact/ui.R
|
4ff02d8159800e36684d6e8495523abd64195edc
|
[] |
no_license
|
TesterHH/DataProductsShinyCourseProject
|
bd0a8ff00c45655b9411acaddd8014fcd35959b4
|
54cb5b6c757dd4b51153ad474e85087de3945518
|
refs/heads/master
| 2020-05-04T15:48:35.319315
| 2019-04-03T18:56:34
| 2019-04-03T18:56:34
| 179,257,207
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 643
|
r
|
ui.R
|
shinyUI(pageWithSidebar(
headerPanel(""),
sidebarPanel(
radioButtons("secondVar", "Add predictor:",
c("Num. cylinders" = "cyl",
"Displacement" = "disp",
"Horsepower" = "hp",
"Rear ax. ratio" = "drat",
"Weight" = "wt",
"1/4 m. time" = "qsec",
"Engine type" = "vs",
"Num. gears" = "gear",
"Num. carburetors" = "carb"
))),
mainPanel(
plotOutput('myPlot'),
p('p-value, for additional predictor:'),
textOutput('myText')
)
))
|
8a8ef2838563f041d8a5e0cd7b5dd7b9235d9bd7
|
60827d96a5ce073b4531974503d0a2e4e295e648
|
/R/run_expansion.R
|
40bd55fbcbc0cf9c2212a3e6640ebf8121643288
|
[] |
no_license
|
ankitbit/qualmet
|
b6046d0e4bb28ef6b1f953a71c2544623fd886dc
|
c5336aaac5c6fe2c1aa270acbadd943aa3291ba3
|
refs/heads/master
| 2020-03-15T08:59:05.342735
| 2018-05-04T02:25:40
| 2018-05-04T02:25:40
| 112,934,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 757
|
r
|
run_expansion.R
|
#' @export
run_expansion<-function(data){
expansion_list<-numeric(9)
expansion_list[1]<-find_expansion(data, edge.betweenness.community(data))
expansion_list[2]<-find_expansion(data, fastgreedy.community(data))
expansion_list[3]<-find_expansion(data, label.propagation.community(data))
expansion_list[4]<-find_expansion(data, leading.eigenvector.community(data))
expansion_list[5]<-find_expansion(data, multilevel.community(data))
expansion_list[6]<-find_expansion(data, optimal.community(data))
expansion_list[7]<-find_expansion(data, spinglass.community(data))
expansion_list[8]<-find_expansion(data, walktrap.community(data))
expansion_list[9]<-find_expansion(data, infomap.community(data))
return(expansion_list)
}
|
0627c2c5633f49d8985f0c37d9cc97baaf9f1cc9
|
5b7013e13b7aad9e6ee2604d6d9619519bddb69f
|
/envoy-scaling-experiment/graphMany.R
|
d7dfefe756c4696f0a214e3df4baff25a55aa7cf
|
[] |
no_license
|
isabella232/cf-k8s-networking-scaling
|
4d841595571198c122863e397fe4faabe8b9ec00
|
bed654858024b3e85debcb10371f6ae84a97ab75
|
refs/heads/master
| 2022-12-29T15:08:26.462172
| 2020-10-13T21:14:24
| 2020-10-13T21:14:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,488
|
r
|
graphMany.R
|
library(tidyverse)
library(gridExtra)
filename <- "./"
## Conversion Functions
mb_from_bytes <- function(x) {
return(round(x/(1024*1024), digits=1))
}
bytes_from_mb <- function(x) {
return(x * 1024 * 1024)
}
# Read in important times (runID, stamp, event)
times=read_csv(paste(filename, "importanttimes.csv", sep=""))
# Set up x-axis in experimental time (all data has normalized timestamps)
maxSec = max(times$stamp)
breaksFromZero <- seq(from=0, to=maxSec, by=20 * 60 * 1000 * 1000 * 1000)
secondsFromNanoseconds <- function(x) {
return(round(x/(1000*1000*1000), digits=1))
}
minutesFromNanoseconds <- function(x) {
return(round(x/(60*1000*1000*1000), digits=1))
}
nanosecondsFromSeconds <- function(x) {
x*1000*1000*1000
}
experiment_time_x_axis <- function(p) {
return(
p + xlab("Time (minutes)") +
scale_x_continuous(labels=minutesFromNanoseconds, breaks=breaksFromZero)
)
}
# Add vertical lines for all important time events in all experiment runs
lines <- function() {
return(
geom_vline(data=times, mapping=aes(xintercept=stamp), color="grey80", alpha=0.5)
)
}
# Add labels to the vertical lines (add text before the first line)
first_times = group_by(times, event) %>% summarize(stamp = min(stamp))
lineLabels <- function() {
return(
geom_text(data=first_times, mapping=aes(x=stamp, y=0, label=event), size=2, angle=90, vjust=-0.4, hjust=0, color="grey25")
)
}
our_theme <- function() {
return(
theme_linedraw() %+replace%
theme(legend.title=element_blank())
)
}
quantiles = c(0.68, 0.90, 0.99, 0.999, 1)
mylabels = c("p68", "p90", "p99", "p999", "max")
fiveSecondsInNanoseconds = 5 * 1000 * 1000 * 1000
print("Collect Route Status Data")
# timestamp, runID, status, route
routes = read_csv("./route-status.csv", col_types=cols(runID=col_factor(), status=col_factor(), route=col_integer())) %>% drop_na()
halfRoute = max(routes$route) # the route-status.csv will only include routes created during CP load
print(paste("halfRoute = ", halfRoute))
time_when_route_first_works = routes %>% filter(status == "200") %>%
select(runID, stamp, route) %>%
arrange(stamp) %>%
group_by(runID, route) %>% slice(1L) %>% ungroup()
obs_deltas = routes %>%
filter(route < halfRoute) %>%
arrange(stamp) %>%
group_by(runID, route) %>%
mutate(delta = stamp - lag(stamp, default=stamp[1])) %>%
filter(delta != 0) %>%
summarize(m = mean(delta), n = n()) %>%
group_by(runID) %>%
summarize(m = mean(m)) %>%
mutate(m = m / 1e6)
print(obs_deltas)
# envoy_polls = read_csv('./envoy_requests.csv') %>%
# group_by(runID) %>%
# mutate(delta = stamp - lag(stamp, default=stamp[1])) %>%
# summarize(m = median(delta))
# print(envoy_polls)
# quit()
print("Collect Config Send Data")
xds = read_csv("./jaeger.csv", col_types=cols(runID=col_factor()))
xds = xds %>%
separate_rows(Routes, convert = TRUE) %>% # one row per observation of a route being configured
drop_na() # sometimes route is NA, so drop those
# RouteConfiguration is the first type sent. ClusterLoadAllocation is last.
time_when_route_first_sent = xds %>% filter(Type == "RouteConfiguration") %>%
select(runID, stamp=Timestamp, route=Routes) %>%
arrange(stamp) %>%
group_by(runID, route) %>% slice(1L) %>% ungroup() %>%
filter(route < halfRoute)
print("Collect /clusters Data")
time_when_cluster_appears = read_csv("endpoints_arrival.csv", col_types=cols(runID=col_factor())) %>%
filter(str_detect(route, "service_.*")) %>%
select(runID, stamp, route) %>%
extract("route", "route", regex = "service_([[:alnum:]]+)", convert=TRUE) %>%
filter(route < halfRoute) %>% # only include routes from CP load
print("Calculate Control Plane Latency")
from_config_sent_to_works = left_join(time_when_route_first_sent, time_when_route_first_works, by=c("runID","route")) %>%
mutate(time_diff = stamp.y - stamp.x) # when it works minus when it was sent
from_clusters_to_works = left_join(time_when_cluster_appears, time_when_route_first_works, by=c("runID", "route")) %>%
mutate(time_diff = stamp.y - stamp.x) %>% # route works - cluster exists
arrange(route)
from_config_sent_to_clusters = left_join(time_when_route_first_sent, time_when_cluster_appears, by=c("runID", "route")) %>%
mutate(time_diff = stamp.y - stamp.x) %>% # cluster exists - route sent
arrange(route)
print(filter(from_clusters_to_works, is.na(time_diff)))
print("Calculate Quantiles")
from_config_sent_to_works.q = quantile(from_config_sent_to_works$time_diff, quantiles)
from_clusters_to_works.q = quantile(from_clusters_to_works$time_diff, quantiles)
cptails = tibble(mylabels,
from_config_sent=from_config_sent_to_works.q,
from_clusters=from_clusters_to_works.q) %>%
pivot_longer(c(from_config_sent, from_clusters), names_to="type", values_to="time_diff")
print("Calculate Time Spent Per Step")
latencies_by_route = bind_rows(
"from_config_sent_to_clusters"=from_config_sent_to_clusters,
"from_clusters_to_works"=from_clusters_to_works,
.id="type"
) %>%
filter(route < halfRoute)
# TODO make negative time_diff zero
print(latencies_by_route)
print("Graph Latency to Route Working")
tail_colors <- c("from_config_sent"="black", "from_clusters"="gray85")
tail_latencies = ggplot(cptails, aes(x=mylabels, y=time_diff)) +
labs(title="Control Plane Latency by Percentile") +
ylab("Latency (s)") +
scale_y_continuous(labels=secondsFromNanoseconds) +
xlab("Percentile") +
scale_x_discrete(limits=mylabels) +
geom_line(mapping=aes(color=type, group=type)) +
geom_point() +
geom_text(vjust = -0.5, aes(label = secondsFromNanoseconds(time_diff))) +
scale_colour_manual(values = tail_colors) +
our_theme() %+replace%
theme(legend.position="bottom")
latencies_bars = ggplot(latencies_by_route, aes(x=route, y=time_diff)) +
labs(title="Control Plane Latency by Route") +
ylab("Latency (s)") +
scale_y_continuous(labels=secondsFromNanoseconds) +
xlab("Route") +
# facet_wrap(vars(runID), ncol=1) +
# geom_bar(mapping=aes(fill=type), stat="identity") +
facet_wrap(vars(type), ncol=1) +
geom_point(color="black", alpha=0.25) +
stat_summary_bin(aes(colour="max"), fun.y = "max", bins=100, geom="line") +
stat_summary_bin(aes(colour="median"), fun.y = "median", bins=100, geom="line") +
geom_hline(yintercept = 0, color="grey45") +
scale_colour_brewer(palette = "Set1") +
our_theme() %+replace%
theme(legend.position="bottom")
ggsave(paste(filename, "latency.png", sep=""),
arrangeGrob(tail_latencies, latencies_bars), width=3 * 7, height=3 * 10)
print("Graph Node Usage")
nodemon = read_csv(paste(filename, "nodemon.csv", sep=""), col_types=cols(percent=col_number()))
experiment_time_x_axis(ggplot(nodemon) +
labs(title = "Node Utilization") +
lines() + lineLabels() +
geom_hline(yintercept = 100, color="grey45") +
facet_wrap(vars(type), ncol=1) +
geom_line(mapping=aes(x=timestamp, y=percent, group=interaction(runID, nodename)), color="gray15", alpha=0.15, show.legend=FALSE) +
stat_summary_bin(aes(x=timestamp, y=percent, colour="max"),fun.y="max", bins=100, geom="line") +
stat_summary_bin(aes(x=timestamp, y=percent, colour="median"),fun.y="median", bins=100, geom="line") +
scale_colour_brewer(palette = "Set1") +
our_theme() %+replace%
theme(legend.position="none", axis.title.x=element_blank(), axis.text.x=element_blank()))
ggsave(paste(filename, "nodemon.png", sep=""), width=7, height=3.5)
print("Graph Client VM Usage")
memstats = read_csv(paste(filename, "memstats.csv", sep="")) %>% mutate(memory = (used/total) * 100) %>% select(runID, timestamp=stamp, memory)
cpustats = read_csv(paste(filename, "cpustats.csv", sep="")) %>% filter(cpuid == "all") %>% mutate(cpu = (100 - idle)) %>% select(runID, timestamp=stamp, cpuid, cpu)
clientstats = full_join(memstats, cpustats) %>% gather("metric", "percent", -runID, -cpuid, -timestamp)
cpu = experiment_time_x_axis(ggplot(clientstats, aes(x=timestamp, y=percent)) +
labs(title = "Client Utilization") +
ylab("Utilization %") +
lines() + lineLabels() +
geom_hline(yintercept = 100, color="grey45") +
facet_wrap(vars(metric), ncol=1, scales="free_y") +
geom_line(mapping=aes(group=interaction(runID, cpuid)), color="gray15", alpha=0.15) +
stat_summary_bin(aes(colour="max"),fun.y="max", geom="line", bins=100) +
stat_summary_bin(aes(colour="median"),fun.y="median", bins=100, geom="line") +
scale_colour_brewer(palette = "Set1") +
our_theme() %+replace%
theme(legend.position="none", axis.title.x=element_blank(), axis.text.x=element_blank()))
ggsave(paste(filename, "resources.png", sep=""), width=7, height=3.5)
ifstats = read_csv(paste(filename, "ifstats.csv", sep="")) %>% gather("direction", "rate", -runID, -stamp) %>% mutate(rate = rate / 1024)
experiment_time_x_axis(ggplot(ifstats) +
labs(title = "Client Network Usage") +
ylab("Speed (mb/s)") +
lines() + lineLabels() +
facet_wrap(vars(direction), ncol=1, scales="free_y") +
geom_line(mapping=aes(x=stamp, y=rate, group=runID), color="grey15", alpha=0.15) +
stat_summary_bin(aes(x=stamp, y=rate, colour="max"),fun.y="max", geom="line", bins=100) +
stat_summary_bin(aes(x=stamp, y=rate, colour="median"),fun.y="median", bins=100, geom="line") +
scale_colour_brewer(palette = "Set1") +
our_theme() %+replace%
theme(legend.position="none"))
ggsave(paste(filename, "ifstats.png", sep=""), width=7, height=3.5)
print("All done!")
|
cd3fc62ed488a15da655bed737eace7528920497
|
528b842b3050cd43298e7d816c770ae3e365a05a
|
/man/roct.Rd
|
e8ed633aed99e68472c2a02b2cffd009d575e7ff
|
[] |
no_license
|
cran/onion
|
87745dfd2f5d51a625c854759556c41d78968505
|
fd2df964ee552d8a6036de3ca9f0170ace7c2e9d
|
refs/heads/master
| 2021-07-23T20:19:47.744864
| 2021-02-11T06:00:02
| 2021-02-11T06:00:02
| 17,698,054
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
rd
|
roct.Rd
|
\name{roct}
\alias{rquat}
\alias{roct}
\alias{romat}
\alias{ronionmat}
\title{Random onionic vector}
\description{
Random quaternion or octonion vectors and matrices
}
\usage{
rquat(n=5)
roct(n=5)
romat(type="quaternion", nrow=5, ncol=6, ...)
}
\arguments{
\item{n}{Length of random vector returned}
\item{nrow,ncol,...}{Further arguments specifying properties of the
returned matrix}
\item{type}{string specifying type of elements}
}
\details{
Function \code{rquat()} returns a quaternionic vector,
\code{roct()} returns an octonionic vector, and \code{romat()} a
quaternionic matrix.
Functions \code{rquat()} and \code{roct()} give a quick \dQuote{get
you going} random onion to play with. Function \code{romat()} gives
a simple onionmat, although arguably \code{matrix(roct(4),2,2)} is
as convenient.
}
\author{Robin K. S. Hankin}
\references{
K. Shoemake 1992. \dQuote{Uniform random rotations}.
In D. Kirk, editor, \emph{Graphics Gems III} pages 129-130. Academic,
New York.
}
\examples{
rquat(3)
roct(3)
plot(roct(30))
romat()
}
|
a7c12b275807bef135bfac3c6157d5a935ab3a4d
|
dcb6122c73a09dad2b0f50992e354558ff21440e
|
/test.R
|
2c75d0fa0aa398fe222ec1f7808ff64f1dddd7c4
|
[] |
no_license
|
zlessner/delaware-state-house
|
fe5c52c95fa20452ce653d728563d57bcfd6d25f
|
2ffeb6d2451930fe4128e43e5d2dbb790a52337d
|
refs/heads/master
| 2022-12-16T15:01:06.759678
| 2020-09-21T17:50:17
| 2020-09-21T17:50:17
| 297,416,423
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,479
|
r
|
test.R
|
# library(sf)
# library(tidyverse)
#
# nepal_shp <- read_sf('https://raw.githubusercontent.com/mesaugat/geoJSON-Nepal/master/nepal-districts.geojson')
# nepal_data <- read_csv('https://raw.githubusercontent.com/opennepal/odp-poverty/master/Human%20Poverty%20Index%20Value%20by%20Districts%20(2011)/data.csv')
#
# # calculate points at which to plot labels
# centroids <- nepal_shp %>%
# st_centroid() %>%
# bind_cols(as_data_frame(st_coordinates(.))) # unpack points to lat/lon columns
#
# nepal_data %>%
# filter(`Sub Group` == "HPI") %>%
# mutate(District = toupper(District)) %>%
# left_join(nepal_shp, ., by = c('DISTRICT' = 'District')) %>%
# ggplot() +
# geom_sf(aes(fill = Value)) +
# geom_text(aes(X, Y, label = DISTRICT), data = centroids, size = 1, color = 'white')
#
#
#
#
# delaware_shp <- read_sf("https://opendata.arcgis.com/datasets/85e5da1b74c949e58bb9d64f7498b076_2.geojson")
#
# Delaware_State_Rep
#
# # calculate points at which to plot labels
# centroids <- delaware_shp %>%
# st_centroid() %>%
# bind_cols(as_data_frame(st_coordinates(.))) # unpack points to lat/lon columns
#
#
# Delaware_State_Rep %>%
# filter(`year` == 2018) %>%
# mutate(district = toupper(district)) %>%
# left_join(delaware_shp, ., by = c('DISTRICT' = 'district')) %>%
# ggplot() +
# geom_sf(aes(fill = totalvotes)) +
# geom_text(aes(X, Y, label = DISTRICT), data = centroids, size = 1, color = 'white')
#
#
#
#
#
# library(plotly)
# library(rjson)
#
# url <- 'https://opendata.arcgis.com/datasets/85e5da1b74c949e58bb9d64f7498b076_2.geojson'
# geojson <- rjson::fromJSON(file=url)
# df <- Delaware_State_Rep
#
# g <- list(
# fitbounds = "locations",
# visible = FALSE
# )
# fig <- plot_ly()
# fig <- fig %>% add_trace(
# type="choropleth",
# geojson=geojson,
# locations=df$district,
# z=df$totalvotes,
# colorscale="Viridis",
# featureidkey="properties.DISTRICT"
# )
# fig <- fig %>% layout(
# geo = g
# )
# fig <- fig %>% colorbar(title = "Total Votes")
# fig <- fig %>% layout(
# title = "2013 Montreal Election"
# )
# fig
library(sf) ; library(tidyverse) ; library(classInt) ; library(viridis)
sf_gb <- st_read("https://opendata.arcgis.com/datasets/07194e4507ae491488471c84b23a90f2_3.geojson", quiet = TRUE)
glimpse(sf_gb)
st_geometry(sf_gb)
plot(st_geometry(sf_gb))
lookup <- read_csv("https://opendata.arcgis.com/datasets/046394602a6b415e9fe4039083ef300e_0.csv") %>%
filter(LAD17NM %in% c("Bolton","Bury","Manchester","Oldham","Rochdale","Salford","Stockport","Tameside","Trafford","Wigan")) %>%
pull(WD17CD)
sf_gm <- sf_gb %>%
filter(wd17cd %in% lookup)
plot(st_geometry(sf_gm))
sf_gm <- sf_gm %>%
select(area_code = wd17cd, area_name = wd17nm)
df <- read_csv("/Users/zacharyl/Downloads/bulk.csv")
df_census <- df %>%
select(area_code = `geography code`,
n = `Qualification: No qualifications; measures: Value`,
total = `Qualification: All usual residents aged 16 and over; measures: Value`) %>%
mutate(percent = (n/total)*100)
sf_gm_census <- left_join(sf_gm, df_census, by = "area_code")
ggplot(sf_gm_census, aes(fill = percent)) +
geom_sf(alpha = 0.8, colour = 'white', size = 0.3) +
scale_fill_viridis(discrete = F,
name = "No qualifications (%)",
direction = -1,
guide = guide_colourbar(
direction = "horizontal",
barheight = unit(2, units = "mm"),
barwidth = unit(50, units = "mm"),
draw.ulim = F,
title.position = 'top',
title.hjust = 0.5,
label.hjust = 0.5)) +
labs(x = NULL, y = NULL,
title = "Residents with no qualifications in Greater Manchester, 2011",
subtitle = "Source: Table QS502EW, Census 2011",
caption = "Contains OS data © Crown copyright and database right (2018)") +
coord_sf(datum = NA) +
theme(line = element_blank(),
axis.text = element_blank(),
axis.title = element_blank(),
panel.background = element_blank(),
legend.position = c(0.2, 0.09),
legend.title = element_text(size = 10),
legend.text = element_text(size = 8))
library(ggplot2)
library(ggiraph)
g <- ggplot(mpg, aes( x = displ, y = cty, color = hwy) )
my_gg <- g + geom_point_interactive(aes(tooltip = model), size = 2)
girafe(code = print(my_gg) )
my_gg <- g + geom_point_interactive(
aes(tooltip = model, data_id = model), size = 2)
x <- girafe(code = print(my_gg))
x
# add interactive labels to a ggplot -------
library(ggplot2)
library(ggiraph)
p <- ggplot(mtcars, aes(wt, mpg, label = rownames(mtcars))) +
geom_label_interactive(aes(tooltip = paste(rownames(mtcars), mpg, sep = "\n")))
x <- girafe(ggobj = p)
if( interactive() ) print(x)
p <- ggplot(mtcars, aes(wt, mpg, label = rownames(mtcars))) +
geom_label_interactive(aes(fill = factor(cyl),
tooltip = paste(rownames(mtcars), mpg, sep = "\n")),
colour = "white",
fontface = "bold")
x <- girafe(ggobj = p)
if( interactive() ) print(x)
# add interactive texts to a ggplot -------
library(ggplot2)
library(ggiraph)
## the data
dataset = mtcars
dataset$label = row.names(mtcars)
dataset$tooltip = paste0( "cyl: ", dataset$cyl, "<br/>",
"gear: ", dataset$gear, "<br/>",
"carb: ", dataset$carb)
## the plot
gg_text = ggplot(dataset,
aes(x = mpg, y = wt, label = label,
color = qsec,
tooltip = tooltip, data_id = label ) ) +
geom_text_interactive() +
coord_cartesian(xlim = c(0,50))
## display the plot
x <- girafe(ggobj = gg_text)
x <- girafe_options(x = x,
opts_hover(css = "fill:#FF4C3B;font-style:italic;") )
if( interactive() ) print(x)
library(plotly)
library(rjson)
url <- 'https://opendata.arcgis.com/datasets/85e5da1b74c949e58bb9d64f7498b076_2.geojson'
geojson <- rjson::fromJSON(file=url)
df <- Party_Break
g <- list(
fitbounds = "locations",
visible = FALSE
)
fig <- plot_ly()
fig <- fig %>% add_trace(
type="choropleth",
geojson=geojson,
locations=df$district,
z=df$voteBreakdown,
colorscale="Viridis",
featureidkey="properties.DISTRICT",
marker=list(line=list(
width=3),
opacity=0.5
)
)
fig <- fig %>% layout(
geo = g
)
fig <- fig %>% colorbar(title = "Bergeron Votes")
fig <- fig %>% layout(
title = "2013 Montreal Election"
)
fig
library(plotly)
library(rjson)
url <- 'https://opendata.arcgis.com/datasets/85e5da1b74c949e58bb9d64f7498b076_2.geojson'
geojson <- rjson::fromJSON(file=url)
df <- Dems
g <- list(
fitbounds = "locations",
visible = FALSE
)
colorscale <- data.frame(z=c(0, 0.3, 0.45, 0.499, 0.5, 0.5, 0.501, 0.55, 0.7, 1),col=c("Red", "#F70000", "#FC6767", "#FDCFCF", "purple", "purple", "#AFD2FF", "#3186F5", "#227DF5", "blue"))
fig <- plot_ly()
fig <- fig %>% add_trace(
type="choroplethmapbox",
geojson=geojson,
locations=df$district,
z=df$voteBreakdown,
colorscale = colorscale,
featureidkey="properties.DISTRICT"
)
fig <- fig %>% colorbar(title = "Vote Percentage")
fig <- fig %>% layout(
mapbox=list(
style="carto-positron",
zoom =7.3,
center=list(lon=-75.5277, lat=39.1)),
title = "2018 Delaware Congressional Election"
)
fig
|
6396e37b2ae833a6f9f678897289ce4d6ae07682
|
c8494552202bb07e46b3c328e9c503db92fda70e
|
/man/umx_reorder.Rd
|
e90b8f584291e3a04d2ce1a4b9923bdf7830a461
|
[] |
no_license
|
hmaes/umx
|
fcc85dc40774552d0f664036404e12bbdd75cc05
|
09b3c0efd4131248e30e67925b7650278ca78ff9
|
refs/heads/master
| 2021-01-24T20:26:15.667956
| 2014-09-05T09:02:47
| 2014-09-05T09:02:47
| 23,714,644
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,745
|
rd
|
umx_reorder.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{umx_reorder}
\alias{umx_reorder}
\title{umx_reorder}
\usage{
umx_reorder(old, newOrder)
}
\arguments{
\item{old}{a square matrix of correlation or covariances to reorder}
\item{newOrder}{The order you'd like the variables to be in}
}
\value{
- the re-ordered (and/or resized) matrix
}
\description{
Reorder the variables in a correlation matrix. Can also remove one or more variables from a matrix using this function
}
\examples{
oldMatrix = cov(mtcars)
umx_reorder(oldMatrix, newOrder = c("mpg", "cyl", "disp")) # first 3
umx_reorder(oldMatrix, newOrder = c("hp", "disp", "cyl")) # subset and reordered
}
\references{
- \url{http://www.github.com/tbates/umx}
}
\seealso{
- \code{\link{umxLabel}}, \code{\link{umxRun}}, \code{\link{umxValues}}
Other umx misc functions: \code{\link{demand}};
\code{\link{umxEval}}; \code{\link{umx_add_variances}};
\code{\link{umx_apply}}; \code{\link{umx_check_model}};
\code{\link{umx_check_names}}; \code{\link{umx_explode}};
\code{\link{umx_get_bracket_addresses}};
\code{\link{umx_get_cores}};
\code{\link{umx_get_optimizer}};
\code{\link{umx_has_CIs}};
\code{\link{umx_has_been_run}};
\code{\link{umx_has_means}};
\code{\link{umx_has_square_brackets}};
\code{\link{umx_is_MxMatrix}};
\code{\link{umx_is_MxModel}}; \code{\link{umx_is_cov}};
\code{\link{umx_is_endogenous}};
\code{\link{umx_is_exogenous}};
\code{\link{umx_is_ordered}}; \code{\link{umx_msg}};
\code{\link{umx_paste_names}}; \code{\link{umx_rename}};
\code{\link{umx_rot}};
\code{\link{umx_set_checkpointing}};
\code{\link{umx_set_cores}};
\code{\link{umx_set_optimizer}};
\code{\link{umx_string_to_algebra}};
\code{\link{umx_trim}}
}
|
55a3d7cc6a6fb2ed1f31a5d9958bc788ee90689a
|
aaa7d40918380b1b2d1f2c0c270d0a9900456a6d
|
/Code.R
|
0a406e0033e3ed877d796c959a181629165444fc
|
[] |
no_license
|
Annapurani93/Raghuram-Rajan
|
90b82aa279f3856e98f9a1c5233f6649d979ca9a
|
cfa0da6750909134c3b0f74f9cd5a7ff5c4ed3ce
|
refs/heads/main
| 2023-08-12T13:53:06.798280
| 2021-10-08T05:13:12
| 2021-10-08T05:13:12
| 412,127,191
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,283
|
r
|
Code.R
|
library(tidytuesdayR)
library(tidyverse)
library(gtExtras)
library(gtable)
library(gt)
library(gtsummary)
tuesdata <- tidytuesdayR::tt_load(2021, week = 40)
papers <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-09-28/papers.csv')
authors <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-09-28/authors.csv')
programs <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-09-28/programs.csv')
paper_authors <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-09-28/paper_authors.csv')
paper_programs <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-09-28/paper_programs.csv')
papers%>%left_join(paper_authors)%>%left_join(authors)%>%
left_join(paper_programs)%>%
left_join(programs)%>%drop_na()->paper1
paper1%>%filter(name=="Raghuram G Rajan")%>%distinct(title,.keep_all = TRUE)%>%
filter(year==2021)
paper1%>%select(paper,year,title,name,program_desc)%>%
filter(str_detect(name,"Raghu"))%>%
distinct(title,.keep_all = TRUE)%>%
group_by(program_desc)%>%
arrange(year, .by_group = TRUE)%>%
select(program_desc,title,year)->tabletitle
tabletitle%>%group_by(program_desc)%>%mutate(numbering = row_number())->tabletitle
tabletitle[c(4,1,2,3)]->tabletitle
colnames(tabletitle)<-c("SNo.","Program","Title","Year")
data.frame(tabletitle)->tabletitle
tabletitle
source_tag <- "Data: <a href='https://www.nber.org/'>NBER</a> via TidyTuesday| Design and Analysis: @annapurani93"
tabletitle%>%gt(groupname_col = "Program")%>%
tab_style(style = list(cell_text(align="center")),
location=cells_row_groups())%>%
tab_style(
style = list(
cell_fill("black"),
cell_text(color = "white", weight = "bold", transform = "uppercase")
),
locations = cells_row_groups()
)%>%
tab_header(
title = md("**Raghuram Rajan's Working Papers at the National Bureau of Economic Research**"),
subtitle = "Indian economist Raghuram Rajan has published 55 papers in total at the NBER, so far, on different subjects - he has published 47 papers in Corporate Finance, 4 in International Finance and Macroeconomics, 2 each in Asset Pricing and Economic Fluctuations and Growth.
His latest paper with fellow economists Douglas Diamond and Yunzhi Hu is titled 'Liquidity, Pledgeability, and the Nature of Lending', which discusses how corporate lending and financial intermediation change based on the fundamentals of the firm and its environment"
)%>%
tab_source_note(md(html(source_tag)))%>%
tab_style(
style = list(
cell_text(
align = "right",
color = "black",
weight = "bold"
)
),
locations = cells_source_notes()
)%>%
cols_align(
align = "left",
columns = c(Title))%>%
cols_align(
align = "center",
columns = c(SNo.)
)%>%
cols_align(
align = "center",
columns = c(Year)
)%>%
tab_style(style = cell_text(align = "center", weight="bold",transform = "uppercase"),
locations = cells_column_labels(everything())
)%>%
opt_table_lines("all")%>%
opt_table_outline()%>%
opt_row_striping()%>%
tab_options(
source_notes.background.color = "#ff8c8c",
heading.background.color = "#ff8c8c",
column_labels.background.color = "#d1dd93",
table_body.hlines.color = "#989898",
table_body.border.top.color = "#989898",
heading.border.bottom.color = "#989898",
row_group.border.top.color = "#989898",
summary_row.border.color = "#989898"
)%>%
tab_style(
style = list(
cell_borders(
sides = c("top", "bottom"),
color = "#989898",
weight = px(1),
style="dashed"
),
cell_borders(
sides = c("left", "right"),
color = "#989898",
weight = px(1),
style="dashed"
)),
locations = cells_body(
columns = everything(),
rows = everything()
)
) %>%
tab_style(
style = list(
cell_fill(color = "#f0edaa"),
cell_text(color = "black")
),
locations = cells_body(
rows = Program=="Asset Pricing")
)%>%
tab_style(
style = list(
cell_fill(color = "#c2d5f4"),
cell_text(color = "black")
),
locations = cells_body(
rows = Program=="Corporate Finance")
)%>%
tab_style(
style = list(
cell_fill(color = "#bdeeed"),
cell_text(color = "black")
),
locations = cells_body(
rows = Program=="Economic Fluctuations and Growth")
)%>%
tab_style(
style = list(
cell_fill(color = "#f0d14f"),
cell_text(color = "black")
),
locations = cells_body(
rows = Program=="International Finance and Macroeconomics")
)%>%
tab_style(
style = list(
cell_text(
color = "black",
transform = "uppercase"
)
),
locations = list(
cells_title(groups = "title")
)
) %>%
# Adjust sub-title font
tab_style(
style = list(
cell_text(
color="black"
)
),
locations = list(
cells_title(groups = "subtitle")
)
)->table
gtsave(table,"table2.png")
|
a2fa1128b55ac72c654f49b53bd9a12faf814e6a
|
3df9b54f81dd4f9d6af2b56e93384c4152a525ab
|
/DATA/data_preparation/find_1023ez_efilers.R
|
5b6c39dc697c4984cfe28aef2b78f0ba61a8c136
|
[] |
no_license
|
Nonprofit-Open-Data-Collective/machine_learning_mission_codes
|
7bca4e5eeca39730d7a297b8034f1a0a860a8614
|
74ce928dbf433bdedccee014e2467875041f6ddc
|
refs/heads/master
| 2023-06-26T16:02:29.422816
| 2023-06-13T20:46:10
| 2023-06-13T20:46:10
| 169,822,293
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,609
|
r
|
find_1023ez_efilers.R
|
library( dplyr )
library( tidyr )
setwd( "C:/Users/jdlecy/Dropbox/04 - PAPERS/01 - In Progress/18 - 1023 EZ Forms" )
"D:/Dropbox/04 - PAPERS/01 - In Progress/18 - 1023 EZ Forms/Data"
dd1 <- read.csv( "f1023ez_approvals_2014.csv", stringsAsFactors=F )
dd2 <- read.csv( "f1023ez_approvals_2015.csv", stringsAsFactors=F )
dd3 <- read.csv( "f1023ez_approvals_2016.csv", stringsAsFactors=F )
dd4 <- read.csv( "f1023ez_approvals_2017.csv", stringsAsFactors=F )
d2 <- bind_rows( dd1, dd2, dd3, dd4 )
nrow( d2 )
source( "https://raw.githubusercontent.com/Nonprofit-Open-Data-Collective/irs-990-efiler-database/master/BUILD_SCRIPTS/build_efile_database_functions.R" )
d2$EIN <- gsub( "-", "", d2$EIN )
d2$EIN <- as.numeric( d2$EIN )
d$EIN <- as.numeric( d$EIN )
head( EIN )
ein <- unique( d$EIN )
length( intersect( d2$EIN, ein ) )
these <- intersect( d2$EIN, ein )
d3 <- filter( d, EIN %in% these )
table( d3$FormType, d3$TaxYear )
saveRDS( d3, "New1023ezEfilers.rds" )
d4 <- filter( d2, d2$EIN %in% these )
nrow( d4 )
saveRDS( d4, "Form_1023ez_Efilers.rds" )
table( substr( d4$Nteecode, 1, 1 ) )
table( d4$Orgpurposecharitable )
table( d4$Orgpurposereligious )
table( d4$Orgpurposeeducational )
table( d4$Orgpurposescientific )
table( d4$Orgpurposeliterary )
table( d4$Orgpurposepublicsafety )
table( d4$Orgpurposeamateursports )
table( d4$Orgpurposecrueltyprevention )
### ~~~
setwd( "D:/Dropbox/04 - PAPERS/01 - In Progress/25 - USC Mission Taxonomies/USC Mission Paper/Data and Analysis/Sample Framework" )
dat <- readRDS( "Form_1023ez_Efilers.rds" )
length( unique( dat$EIN ) )
source( "https://raw.githubusercontent.com/Nonprofit-Open-Data-Collective/irs-990-efiler-database/master/BUILD_SCRIPTS/build_efile_database_functions.R" )
index <- buildIndex()
index$EIN <- as.numeric( index$EIN )
length( intersect( index$EIN, dat$EIN ) ) # [1] 13983
length( unique( dat$EIN ) ) # [1] 13983
sample <- index[ index$EIN %in% intersect( index$EIN, dat$EIN ) , ]
saveRDS( sample, "EfileSampleIndex.rds" )
### ~~~ CREATE BUILD FUNCTION ~~~ ###
library( dplyr )
part.iii <- read.csv( "PartIII.csv", stringsAsFactors=F )
names( part.iii ) <- toupper( names( part.iii ) )
part.iii.one.to.one <-
filter( part.iii, CARDINALITY == "ONE" ) %>%
filter( VARIABLE_NAME_NEW != "F9_03_PC_PROG_1_GRANTS_FRGN" ) %>%
arrange( RDB_TABLE )
create_code_chunks( part.iii.one.to.one )
dd <- buildFUNCTION( doc=doc1, url=url )
dd <- as.data.frame( dd, stringsAsFactors=F )
names(dd)
head <- select( dd, NAME, EIN, TAXYR, FORMTYPE, OBJECTID, URL )
mission <- select( dd, F9_03_PC_NEW_PROG_CHECKBOX,
F9_03_PC_SIG_CHANGE_CHECKBOX,
F9_03_PC_TOTAL_PROG_EXP,
F9_03_PZ_MISSION,
F9_03_PZ_SCHED_O_CHECKBOX )
prog1 <- select( dd, F9_03_PC_PROG_1_ACTIVITY_CODE,
F9_03_PC_PROG_1_DESCRIPTION,
F9_03_PC_PROG_1_EXPENSE,
F9_03_PC_PROG_1_GRANTS,
F9_03_PC_PROG_1_REVENUE )
prog2 <- select( dd, F9_03_PC_PROG_2_ACTIVITY_CODE,
F9_03_PC_PROG_2_DESCRIPTION,
F9_03_PC_PROG_2_EXPENSE,
F9_03_PC_PROG_2_GRANTS,
F9_03_PC_PROG_2_REVENUE )
prog3 <- select( dd, F9_03_PC_PROG_3_ACTIVITY_CODE,
F9_03_PC_PROG_3_DESCRIPTION,
F9_03_PC_PROG_3_EXPENSE,
F9_03_PC_PROG_3_GRANTS,
F9_03_PC_PROG_3_REVENUE )
prog4 <- select( dd, F9_03_PC_PROG_4_ACTIVITY_CODE,
F9_03_PC_PROG_4_DESCRIPTION,
F9_03_PC_PROG_4_EXPENSE,
F9_03_PC_PROG_4_GRANTS,
F9_03_PC_PROG_4_REVENUE )
d1 <- data.frame( head, PROGRAM="PROG1", prog1, stringsAsFactors=F )
d2 <- data.frame( head, PROGRAM="PROG2", prog2, stringsAsFactors=F )
d3 <- data.frame( head, PROGRAM="PROG3", prog3, stringsAsFactors=F )
d4 <- data.frame( head, PROGRAM="PROG4", prog4, stringsAsFactors=F )
names( d1 ) <- gsub( "F9_03_PC_PROG_1_", "", names(d1) )
names( d2 ) <- gsub( "F9_03_PC_PROG_2_", "", names(d2) )
names( d3 ) <- gsub( "F9_03_PC_PROG_3_", "", names(d3) )
names( d4 ) <- gsub( "F9_03_PC_PROG_4_", "", names(d4) )
d.programs <- bind_rows( d1, d2, d3, d4 )
d.mission <- cbind( head, mission )
### ~~~ TEST DATA
library( xmltools )
library( purrr )
library( xml2 )
library( dplyr )
V_990_2014 <- "https://s3.amazonaws.com/irs-form-990/201543089349301829_public.xml"
V_990_2012 <- "https://s3.amazonaws.com/irs-form-990/201322949349300907_public.xml"
V_990EZ_2014 <- "https://s3.amazonaws.com/irs-form-990/201513089349200226_public.xml"
V_990EZ_2012 <- "https://s3.amazonaws.com/irs-form-990/201313549349200311_public.xml"
### GENERATE ALL XPATHS: V 990 2014
url1 <- V_990_2014
doc1 <- read_xml( url1 )
xml_ns_strip( doc1 )
# doc1 %>% xml_find_all( '//*') %>% xml_path()
### GENERATE ALL XPATHS: V 990 2012
url2 <- V_990_2012
doc2 <- read_xml( url2 )
xml_ns_strip( doc2 )
# doc2 %>% xml_find_all( '//*') %>% xml_path()
### GENERATE ALL XPATHS: V 990EZ 2014
url3 <- V_990EZ_2014
doc3 <- read_xml( url3 )
xml_ns_strip( doc3 )
# doc3 %>% xml_find_all( '//*') %>% xml_path()
### GENERATE ALL XPATHS: V 990EZ 2012
url4 <- V_990EZ_2012
doc4 <- read_xml( url4 )
xml_ns_strip( doc4 )
# doc4 %>% xml_find_all( '//*') %>% xml_path()
test.dat <- sample[ 1:10 , ]
build_part_iii_table_00( doc=doc1, url=url )
build_part_iii_table_00( doc=doc2, url=url2 )
build_part_iii_table_00( doc=doc3, url=url3 )
build_part_iii_table_00( doc=doc4, url=url4 )
buildFUNCTION( doc=doc1, url=url )
buildFUNCTION( doc=doc2, url=url2 )
buildFUNCTION( doc=doc3, url=url3 )
buildFUNCTION( doc=doc4, url=url4 )
part.iii.01.group.names <- find_group_names( part.iii, "F9-P03-TABLE-01-PROG-ACCOMPLISHMENTS" )
part.iii.01.v.map <- get_var_map( part.iii, table.name="F9-P03-TABLE-01-PROG-ACCOMPLISHMENTS" )
build_rdb_table( doc1, url=url, group.names=part.iii.01.group.names, v.map=part.iii.01.v.map )
build_rdb_table( doc2, url=url2, group.names=part.iii.01.group.names, v.map=part.iii.01.v.map )
build_rdb_table( doc3, url=url3, group.names=part.iii.01.group.names, v.map=part.iii.01.v.map )
build_rdb_table( doc4, url=url4, group.names=part.iii.01.group.names, v.map=part.iii.01.v.map )
library( XML )
xmlToList( doc1 )
library(xml2)
library(jsonlite)
x <- xml2::as_list( doc1 )
xl <- lapply( x, attributes )
toJSON( xl, pretty = TRUE, auto_unbox = TRUE )
xml_find_all( doc1, '//Return//ReturnData')
doc1 %>% xml2::xml_find_all( '//*') %>% xml2::xml_path()
xml_name( xml_children( doc1 ) )
xml_name( xml_children( customer1 ) )
customer1 <- xml_find_all( doc1, ".//Return/ReturnData" )
xml_name( xml_children( xml_find_first( doc1, "//ReturnData/IRS990" ) ) )
### GENERATE ALL XPATHS: V 990 2014
url1 <- V_990_2014
doc1 <- read_xml( url )
xml_ns_strip( doc1 )
# doc1 %>% xml_find_all( '//*') %>% xml_path()
these <- xml_children( xml_find_first( doc1, "//ReturnData/IRS990" ) )
x <- xml2::as_list( these )
xl <- lapply( x, attributes )
toJSON( xl, pretty = TRUE, auto_unbox = TRUE )
url <- "https://s3.amazonaws.com/irs-form-990/201123149349301147_public.xml"
doc <- read_xml( url )
xml_ns_strip( doc )
doc <- xmlParse( doc )
a <- xmlToList(doc)
dd <- jsonlite::toJSON(a, pretty=TRUE)
names( dd )
str( dd )
str( a )
library(jsonlite)
library(dplyr)
library(purrr)
library(tidyr)
dd %>%
mutate( json = map(json, ~ fromJSON(.) %>% as.data.frame())) %>%
unnest( json)
|
bbbe2ae9a5b389b98d9df8f45d2bda9526eb42fc
|
417b44d377cc158e86bfd65830da272169672512
|
/Assignment 1/Wenhan-Xiao/complete.R
|
80902594611bf0b6c2d3c2645d199200386310df
|
[] |
no_license
|
hanxu-ust/R-Programming-Assignments
|
a6cdc7a0bdcdffbde324d1c2e613fd31af5955f1
|
4b7db24572ec8fda359ca31e51df30977297e58e
|
refs/heads/master
| 2022-08-11T13:34:41.594744
| 2014-07-14T01:54:37
| 2014-07-14T01:54:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,160
|
r
|
complete.R
|
complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
nc <- length(id)
nobs <- vector("numeric", nc)
cors <- vector("numeric", nc)
ans <- data.frame(id, nobs, cors)
for (i in 1:length(id)) {
#print (i)
if (id[i]<10) {
file <- paste(c(directory, "//00", id[i], ".csv"), collapse = "")
}
else if (id[i]<100) {
file <- paste(c(directory, "//0", id[i], ".csv"), collapse = "")
}
else {
file <- paste(c(directory, "//", id[i], ".csv"), collapse = "")
}
#print (file)
lines <- read.csv(file)
#my_data <- c(my_data, lines[[pollutant]])
good <- complete.cases(lines)
data <- lines[good,]
nob <- dim(data)[1]
cor <- cor(data$nitrate, data$sulfate)
ans[id==id[i],] <- c(id[i], nob, cor)
#ans[i,] <- (id[i], nob)
}
ans
}
|
0adc4f197c97200c1ebbbd326f623ad5f8d4da4e
|
d9aac903c28f21f10a713cb50d170381454321de
|
/NonParameterEstimation/KernelEstimate.R
|
140007d4f6a5cdd93f8a9e6c354acf31a62e1156
|
[] |
no_license
|
syuoni/statistics-R
|
c38fa7c1628135e95ac9fdc61eda4ade07493a84
|
cc790bdcc4b8a93656549e9cc6c3e681c5bad66f
|
refs/heads/master
| 2020-09-18T06:37:20.031529
| 2016-10-13T11:18:28
| 2016-10-13T11:18:28
| 66,198,755
| 0
| 0
| null | 2016-10-13T01:56:34
| 2016-08-21T12:55:21
|
Stata
|
UTF-8
|
R
| false
| false
| 2,626
|
r
|
KernelEstimate.R
|
uniform.kernel <- function(z){
return(ifelse((z>-1) & (z<=1), 0.5, 0))
}
normal.kernel <- function(z){
return(dnorm(z))
}
kernel.density <- function(x, kernel.func, h=NULL, k=100){
minx <- min(x)
maxx <- max(x)
n <- length(x)
# get the optimal bandwidth
if(is.null(h)){
a <- integrate(function(t){return(kernel.func(t)**2)}, -5, 5)
b <- integrate(function(t){return(t**2*kernel.func(t))}, -5, 5)
delta <- (a$value/(b$value)**2)**0.2
h <- 1.3643 * delta * n**(-0.2) * sd(x)
}
dx <- (maxx-minx)/k
x.vec <- NULL
fx.vec <- NULL
for(i in 1:k){
xi <- minx+(i-0.5)*dx
z <- (x-xi)/h
fxi <- sum(kernel.func(z))/(n*h)
x.vec <- c(x.vec, xi)
fx.vec <- c(fx.vec, fxi)
}
return(data.frame(x=x.vec, fx=fx.vec))
}
get.non.parameter.func <- function(x.vec, fx.vec){
k <- length(x.vec)
non.parameter.func <- function(x){
if(x<=x.vec[1]){
return(fx.vec[1])
}else if(x>=x.vec[k]){
return(fx.vec[k])
}else{
for(i in 2:k){
if(x<=x.vec[i]){
ratio <- (x-x.vec[i-1])/(x.vec[i]-x.vec[i-1])
return(fx.vec[i-1]+ratio*(fx.vec[i]-fx.vec[i-1]))
}
}
}
}
return(Vectorize(non.parameter.func))
}
kernel.regression <- function(y, x, kernel.func, h=NULL, k=100){
minx <- min(x)
maxx <- max(x)
n <- length(x)
# use the optimal bandwidth for kernel density temporarily
if(is.null(h)){
a <- integrate(function(t){return(kernel.func(t)**2)}, -5, 5)
b <- integrate(function(t){return(t**2*kernel.func(t))}, -5, 5)
delta <- (a$value/(b$value)**2)**0.2
h <- 1.3643 * delta * n**(-0.2) * sd(x)
}
dx <- (maxx-minx)/k
x.vec <- NULL
y.vec <- NULL
for(i in 1:k){
xi <- minx+(i-0.5)*dx
z <- (x-xi)/h
kfz <- kernel.func(z)
yi <- sum(kfz*y) / sum(kfz)
x.vec <- c(x.vec, xi)
y.vec <- c(y.vec, yi)
}
return(data.frame(x=x.vec, y=y.vec))
}
non.parameter.demo <- function(){
set.seed(1226)
n <- 5000
x <- rnorm(n, 0, 1)
kd.df <- kernel.density(x, normal.kernel)
kd.func <- get.non.parameter.func(kd.df$x, kd.df$fx)
print(dnorm(-2:2))
print(kd.func(-2:2))
y <- x**2-x + rnorm(n, 0, 0.5)
kr.df <- kernel.regression(y, x, normal.kernel)
kr.func <- get.non.parameter.func(kr.df$x, kr.df$y)
print(sapply(-2:2, function(x){return(x**2-x)}))
print(kr.func(-2:2))
# library(ggplot2)
# p <- ggplot(data=kr.df, aes(x=x, y=y))
# p <- p + geom_line()
# print(p)
}
non.parameter.demo()
|
3cbf37c5e94f7e196cd3c03f34b93dcc9da88928
|
72d902d01c6ec6c1a0ba27db5a69c073135298e1
|
/R/deps.R
|
535fd911201408c3833de6273c9d9483750e1c5e
|
[
"MIT"
] |
permissive
|
yonicd/pkgr.utils
|
c180281a7b4a0ecfbc0cf2caab37bba9eec72a06
|
97d916586b2fd3598e925ea461d9d20e26f4ba77
|
refs/heads/master
| 2022-11-17T18:48:50.150109
| 2020-07-14T02:01:36
| 2020-07-14T02:01:36
| 265,820,883
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,105
|
r
|
deps.R
|
#' @title Extract DESCRIPTION file dependencies
#' @description Extract from a DESCRIPTION file a vector of the listed package
#' dependencies.
#' @param file PARAM_DESCRIPTION, Default: '.'
#' @param type character, type of file to parse, Default: c('DESCRIPTION','pkgSetup')
#' @param \dots arguments to pass to desc_deps or pkgSetup_deps
#' @return character
#' @details For the vector of base and recommended packages use
#' [base_packages][pkgr.utils::base_packages]
#' @rdname get_deps
#' @export
#' @export
get_deps <- function(file = '.',type = c('DESCRIPTION','pkgSetup'),...){
switch(match.arg(type,c('DESCRIPTION','pkgSetup')),
'DESCRIPTION' = {
desc_deps(file,...)
},
'pkgSetup' = {
pkgSetup_deps(file,...)
})
}
#' @importFrom utils getParseData
pkgSetup_deps <- function(file = 'pkgSetup.R', excl_set = base_packages()){
x <- utils::getParseData(parse(file,keep.source = TRUE),includeText = TRUE)
y <- grep('^pkgs',x$text[x$parent==0],value = TRUE)[1]
ret <- sort(eval(parse(text = gsub('^pkgs(.*?)<- |\\n','',y))))
if(!is.null(excl_set))
ret <- setdiff(ret,excl_set)
ret
}
#' @importFrom desc desc_get_deps
desc_deps <- function(file='.', excl_set = base_packages()){
x <- desc::desc_get_deps(file = check_path(file))
ret <- x$package
if(!is.null(excl_set))
ret <- setdiff(ret,excl_set)
ret
}
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if(interactive()){
#' #EXAMPLE1
#' }
#' }
#' @seealso
#' \code{\link[utils]{installed.packages}}
#' @rdname base_packages
#' @export
#' @importFrom utils installed.packages
base_packages <- function(){
i <- utils::installed.packages()
ret <- i[ i[,"Priority"] %in% c("base","recommended"), c("Package","Priority")]
c('R',rownames(ret))
}
check_path <- function(x){
if(!inherits(try(httr::GET(x),silent = TRUE),'try-error')){
desc_uri <- x
x <- tempfile()
utils::download.file(desc_uri,destfile = x,quiet = TRUE)
}
x
}
|
c77f41d091e5a03e67c5e40c8b1403679e80998e
|
ed74cc0be2c6e52078cf3cc9fbce3727c33b6f5d
|
/plot4.R
|
e5445cf1c938b17e7873fd8329e1df6d5893b959
|
[] |
no_license
|
adaongithub/ExData_Plotting1
|
8bafa01d0732e5ccb121760ec6e290263563f395
|
cc33e19e628e22e73d3b8341e7bac190cbd3a964
|
refs/heads/master
| 2021-01-16T22:00:34.423302
| 2015-03-06T21:40:18
| 2015-03-06T21:40:18
| 31,581,859
| 0
| 0
| null | 2015-03-03T05:52:18
| 2015-03-03T05:52:18
| null |
UTF-8
|
R
| false
| false
| 3,584
|
r
|
plot4.R
|
# Exploratory Data Analysis Project #1
# Program plot4.R
# Writes a four-plot panel to file plot4.png
#
# See the project's problem statement for details.
# Download and read in the electric power data set for this program:
dirname <- "../data"
file1_local_zip_name <- "electric_power.zip"
file1_path <- paste( dirname, file1_local_zip_name, sep="/" )
# Only take the time to download the .zip file if it's not already downloaded
if ( ! file.exists( file1_path ) )
{ # .zip file not here so download it and unzip() it
dir.create( dirname )
file1_URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# for Mac use method="curl"; for Windows don't
# download.file( file1_URL, destfile=file1_path, method="curl" )
download.file( file1_URL, destfile=file1_path )
unzip( file1_path, exdir=dirname )
}
# We already manually took a look inside the .zip and know the name of the
# single .txt data file that it contains.
file1_local_unzip_name <- "household_power_consumption.txt"
file1_unzip_path <- paste( dirname, file1_local_unzip_name, sep="/" )
ep_df <- read.csv( file=file1_unzip_path, sep=";", na.strings=c("?") )
# Now only keep, in ep_Feb_01_02_df, the rows for those two dates just above.
rows_1 <- grepl( "^1/2/2007", x=as.character(ep_df$Date) )
rows_2 <- grepl( "^2/2/2007", x=as.character(ep_df$Date) )
ep_Feb_01_02_df <- ep_df[ rows_1 | rows_2 , ]
# Add a new column with an R POSIXct Date-Time for each row.
# (Note, we don't use this column in this program but will in later programs.)
fmt <- "%d/%m/%Y %H:%M:%S"
ep_Feb_01_02_df$DateTime <-
as.POSIXct( paste( ep_Feb_01_02_df$Date, ep_Feb_01_02_df$Time ),
format=fmt
)
# Create our plot (four plots in panels) in the file "plot4.png" in
# PNG graphics format. Size, color, etc. as specified in problem statement.
png( filename="plot4.png", width=480, height=480 )
par( mfcol=c(2,2) )
# Upper-Left Panel: Plot "Global Active Power" versus Time
# (From plot2.R which writes the file plot2.png)
plot( x=ep_Feb_01_02_df$DateTime, y=ep_Feb_01_02_df$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power"
)
# Lower-Left Panel: Plot "Energy sub metering" (3 variables) versus Time
# (From plot3.R which writes the file plot3.png)
plot( x=ep_Feb_01_02_df$DateTime,
y=ep_Feb_01_02_df$Sub_metering_1,
type="l", col="black",
xlab="",
ylab="Energy sub metering"
)
lines( x=ep_Feb_01_02_df$DateTime,
y=ep_Feb_01_02_df$Sub_metering_2,
type="l", col="red"
)
lines( x=ep_Feb_01_02_df$DateTime,
y=ep_Feb_01_02_df$Sub_metering_3,
type="l", col="blue"
)
# Instructors seem to be trying to be tricky by not having a border for
# the legend box for this composite plot; hence, the bty="n" option below.
legend( "topright",
legend = c( "Sub_metering_1", "Sub_metering_2", "Sub_metering_3" ),
col = c( "black", "red", "blue" ),
lty = c( 1, 1, 1 ),
bty = "n"
)
# Upper-Right Panel: Plot "Voltage" versus Time
plot( x=ep_Feb_01_02_df$DateTime, y=ep_Feb_01_02_df$Voltage,
type="l",
xlab="datetime",
ylab="Voltage"
)
# Lower-Right Panel: Plot "Global_reactive_power" versus Time
plot( x=ep_Feb_01_02_df$DateTime, y=ep_Feb_01_02_df$Global_reactive_power,
type="l",
xlab="datetime",
ylab="Global_reactive_power"
)
dev.off() # end the plot session to our PNG file
|
0782b17be6dace46fc4d3c74c7bffcf8b804f1e1
|
f162035df72bdce32a08b39c48ea85b0da1b5b29
|
/R scripts/LIHC_splicing_figures.R
|
ec690b28257b7eeb2e6f84c3f1a6e01fe4735385
|
[] |
no_license
|
QiliShi/RNA-splicing-in-HCC
|
42b67fa2a3d59f445ebd3ab580dabfbaffc32b01
|
e259b7ff85f7e48c4758bc0287be04b94b2970d2
|
refs/heads/master
| 2020-07-06T16:22:40.240222
| 2019-08-19T02:44:14
| 2019-08-19T02:44:14
| 203,078,562
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,782
|
r
|
LIHC_splicing_figures.R
|
### Main figures of RNA splicing in HCC - Qili Shi - 2019-03-25
#AS pie
pie.data <- as.data.frame(table(LIHC.PI.DS.data$splice_type))
mycolor<-c('#ff5f58','#ffeb6d','#ffae42','#ff7cb3','#00b2e3','#00fadc','#9fe98c')
library(extrafont)
tiff(filename = 'E:/LIHC_splicing/pie_LIHC.tif', res=600, compression = "lzw", height=8, width=8, units="in",pointsize = 24)
par(family='Times New Roman')
pie(pie.data$Freq,labels=pie.data$Freq,col=mycolor,border = NA)
dev.off()
# module bar plot
modules.splicing <- mapply(function(x,y){tmp <- as.data.frame(table(x));tmp[,2]<-tmp[,2]/sum(tmp[,2]);tmp$type <- y;return(tmp)},splicing.type,names(splicing.type),SIMPLIFY = F)
modules.splicing.plot <- do.call(rbind,modules.splicing)
modules.splicing.plot <-modules.splicing.plot[modules.splicing.plot$type!='grey',]
library(ggplot2)
library(extrafont)
modules.splicing.plot$type <- factor(modules.splicing.plot$type,levels = names(splicing.type)[order(names(splicing.type),decreasing = T)],ordered = T)
tiff("E:/LIHC_splicing/modules_bar.tif", res=600, compression = "lzw", height=9, width=10, units="in")
bp<- ggplot(modules.splicing.plot , aes(x=type, y=Freq*100, fill=x))+
geom_bar(stat = "identity")+scale_fill_manual(values=c('#ff5f58','#ffeb6d','#ffae42','#ff7cb3','#00b2e3','#00fadc','#9fe98c'))
bp+theme_bw()+theme(text=element_text(family="Times New Roman",face = 'bold'),axis.title=element_text(size = 36),axis.text=element_text(size =28,colour = 'black'),
legend.text =element_text(size =28),legend.title=element_blank(),legend.key = element_rect(size = 8),
legend.key.size = unit(3, 'lines'))+labs(x ='',y='Percent(%)')+ coord_flip()
#+geom_text(aes(y=c(41/79*100,100),label=label), vjust=1.2, size=20)
dev.off()
## AT GO
modules.GOs.df.mito <- modules.GOs.df[grep('mitoch',modules.GOs.df$Description),]
modules.GOs.df.mito$module <- sapply(strsplit(row.names(modules.GOs.df.mito),"\\."),'[',1)
modules.GOs.df.mito <- modules.GOs.df.mito[!(modules.GOs.df.mito$module=='red'|modules.GOs.df.mito$module=='blue'|
modules.GOs.df.mito$module=='grey'),]
modules.GOs.df.mito$GeneRatio <- sapply(modules.GOs.df.mito$GeneRatio ,function(x) eval(parse(text =x)))
#fix(modules.GOs.df.mito)
modules.GOs.df.mito <-modules.GOs.df.mito[rev(order(modules.GOs.df.mito$Description)),]
modules.GOs.df.mito$Description <- factor(modules.GOs.df.mito$Description,
levels = unique(modules.GOs.df.mito$Description),ordered = T)
modules.GOs.df.mito$p.adjust <-(-log10(as.numeric(modules.GOs.df.mito$p.adjust )))
library(ggplot2)
library(extrafont)
p <- ggplot(modules.GOs.df.mito, aes_(x = ~module, y = ~Description,
size = ~GeneRatio))+geom_point() + aes_string(color ='p.adjust') + scale_size_continuous(range = c(4,10))+
scale_colour_gradient(low = "#80e1f1", high ="#ff6362" ,name='-log10(p)')+theme_bw()+labs(y = "",x="")
tiff("E:/LIHC_splicing/AT_modules_GO.tif", res=600, compression = "lzw", height=8, width=13, units="in")
p+theme(text=element_text(family="Times New Roman",face = 'bold'),axis.text.y=element_text(size =18,colour = 'black'),axis.text.x = element_text(colour = 'black',angle=45, size=20,hjust = 1),
axis.title.x=element_text(size =20),legend.text =element_text(size =20),legend.title =element_text(size =20))+
theme(panel.border = element_rect(fill=NA, colour = "black", size=2))
dev.off()
# RBP counts
library(ggplot2)
RBP.counts <- apply(LIHC.RBP.p.value,2,function(x) sum(x<0.05))
RBP.counts <- data.frame(counts=RBP.counts,modules=substring(names(RBP.counts),3))
RBP.counts <- RBP.counts[RBP.counts$modules!='grey',]
tiff("E:/LIHC_splicing//RBP_signiticant_new.tif", res=600, compression = "lzw", height=8, width=10, units="in")
p<- ggplot(RBP.counts,aes(x=modules, y=counts))+
geom_bar(stat = "identity",fill='#ff7473',width = 0.75)
p+theme_bw()+theme(text=element_text(family="Times New Roman",face = 'bold'),
axis.title=element_text(size = 28),axis.text=element_text(size =28,colour = 'black'),axis.text.x=element_text(angle=45, size=24,hjust = 1))+
labs(x ='',y='Number of RBPs')
# geom_text(aes(y=Percent*100+3,label =round(Percent*100,1)),size=7)
#+geom_text(aes(y=c(41/79*100,100),label=label), vjust=1.2, size=20)
dev.off()
RBP.GOs.df <- do.call(rbind,RBP.GOs)
RBP.GOs.table <- as.data.frame(table(RBP.GOs.df$Description))
choose.GO <- c('GO:0008380','GO:0050658','GO:0031123','GO:0009451','GO:0043631','GO:0001510','GO:0000966','GO:0000184','GO:0000288','GO:0006368')
RBP.GOs.plot <- RBP.GOs.df[RBP.GOs.df$ID%in%choose.GO,]
RBP.GOs.plot$modules <- gsub('ME(.+)\\.GO:.+','\\1',row.names(RBP.GOs.plot))
RBP.GOs.plot$Description <-gsub('nuclear-transcribed mRNA catabolic process,(.+)','\\1',RBP.GOs.plot$Description)
RBP.GOs.plot$GeneRatio <- sapply(RBP.GOs.plot$GeneRatio ,function(x) eval(parse(text =x)))
RBP.GOs.plot$Description[RBP.GOs.plot$ID=='GO:0006368'] <- 'transcription elongation from\n RNA polymerase II promoter'
RBP.GOs.plot <- RBP.GOs.plot[RBP.GOs.plot$modules!='grey',]
RBP.GOs.plot$p.adjust <- -log10(RBP.GOs.plot$p.adjust)
library(ggplot2)
library(extrafont)
p <- ggplot(RBP.GOs.plot, aes_(x = ~modules, y = ~Description,
size = ~GeneRatio))+geom_point() + aes_string(color ='p.adjust') + scale_size_continuous(range = c(4,10))+
scale_colour_gradient(low = "#80e1f1", high = "#ff6362",name='-log10(p)')+theme_bw()+labs(y = "")
tiff("E:/LIHC_splicing//RBP_modules_GO2.tif", res=600, compression = "lzw", height=8, width=13, units="in")
p+labs(x ='')+theme(text=element_text(family="Times New Roman",face = 'bold'),axis.text.y=element_text(size =18,colour = 'black'),axis.text.x = element_text(colour = 'black',angle=45, size=20,hjust = 1),
axis.title.x=element_text(size =20),legend.text =element_text(size =20),legend.title =element_text(size =20))+
theme(panel.border = element_rect(fill=NA, colour = "black", size=2))
dev.off()
#module figure
library(WGCNA)
tiff("E:/LIHC_splicing/module_cor2.tif", res=600, compression = "lzw", height=8, width=4.5,units="in")
# Will display correlations and their p-values
opar<-par(no.readonly = T)
par(mar=c(6.5, 10.4, 2.7, 1)+0.3)
# Display the correlation values within a heatmap plot
labeledHeatmap(Matrix = t(LIHC.RBP.cor[1:5,]),
colorLabels = FALSE,
xLabels = row.names(LIHC.RBP.cor)[1:5],
yLabels = colnames(LIHC.RBP.cor),
colors = blueWhiteRed(50),
setStdMargins = FALSE,
cex.text = 1.2,
cex.lab = 1.3,
zlim = c(-1,1))
dev.off()
par <- opar
# autoregulation
# box
tiff(filename = 'E:/LIHC_splicing/PI_DEGS_RBPs_new.tif', res=600, compression = "lzw", height=10, width=8, units="in")
# windowsFonts(A = windowsFont("Times New Roman"))
# par(family="A")
par(cex.lab=2,cex.axis=2,font.axis=2,font.lab=2,lwd=3,family='Times New Roman',mar=c(7, 5, 2.7, 1)+0.3)
p <- list('ARSP-RBPs'=rowMeans(log2(LIHCMatrix.RBP[row.names(LIHCMatrix.RBP)%in%all.genes[all.genes%in%row.names(LIHC.DEGs.RBP)],]+1)),
'AR-RBPs'=rowMeans(log2(LIHCMatrix.RBP[row.names(LIHCMatrix.RBP)%in%LIHC.DEGs.RBP.PI,]+1)),
'NAR-RBPs'=rowMeans(log2(LIHCMatrix.RBP[!row.names(LIHCMatrix.RBP)%in%LIHC.DEGs.RBP.PI,]+1)))
boxplot(p,col=c("#ff6362","#80e1f1","#80debb"),outline=F,boxwex=0.5,ylim=c(10.5,30),ylab='mRNA expression')
dev.off()
#bar
library(ggplot2)
library(extrafont)
RBP.bar <- LIHC.DEGs.RBP[row.names(LIHC.DEGs.RBP)%in%LIHC.DEGs.RBP.PI&LIHC.DEGs.RBP$logFC>0,]
RBP.bar <- RBP.bar[order(RBP.bar$logFC,decreasing = T)[1:10],]
RBP.bar$symbol <- factor(row.names(RBP.bar),levels = row.names(RBP.bar),order=T)
RBP.bar$q <- -log10(RBP.bar$P.ajust)
tiff("E:/LIHC_splicing/RBP_overlap.tif", res=600, compression = "lzw", height=8, width=8, units="in")
p<- ggplot(RBP.bar,aes(x=symbol,y=logFC,fill=q))+
geom_bar(stat = "identity",width = 0.75)+scale_fill_gradient(low="#80e1f1",high='#ff6362',name='-log10(p)')
p+theme_minimal()+theme(text=element_text(family="Times New Roman",face = 'bold'),
axis.title=element_text(size = 28),axis.text=element_text(size =28,colour = 'black'),axis.text.x=element_text(angle=45, size=24,hjust = 1),
legend.text =element_text(size =20),legend.title =element_text(size =20))+
labs(x ='',y='log2(FC)')
# geom_text(aes(y=Percent*100+3,label =round(Percent*100,1)),size=7)
#+geom_text(aes(y=c(41/79*100,100),label=label), vjust=1.2, size=20)
dev.off()
#venn
library(VennDiagram)
venn.diagram(list(row.names(LIHC.DEGs.RBP),LIHC.PI.DS.data$symbol[LIHC.PI.DS.data$symbol%in%c(RDBP,RBP$SYMBOL)]),
filename = "E:/LIHC_splicing/RBP_DASEs_DEGs_new.tif",fill =c("#80e1f1",'#ff6362'),cex=0,lwd=2,resolution = 800,category.names = c(1,1))
library(VennDiagram)
venn.diagram(list(RBP.self,LIHC.DEGs.RBP.PI),
filename = "E:/LIHC_splicing/AR-RBP_eclip_new.tif",fill =c("#8484FB", "#CBC9FB"),cex=0,lwd=2,resolution = 800,category = rep("", 2))
#autoregulation survival
tiff(filename = 'E:/LIHC_splicing/PI_sur_RBPs.tif', res=600, compression = "lzw", height=10, width=8, units="in")
# windowsFonts(A = windowsFont("Times New Roman"))
# par(family="A")
par(cex.lab=2,cex.axis=2,font.axis=2,font.lab=2,lwd=3,family='Times New Roman',mar=c(7, 5, 2.7, 1)+0.3)
p <- list('RBPs-Sig-onco'=rowMeans(LIHC.RBP.sig.onco),
'RBPs-Sig'=rowMeans(LIHC.RBP.sig.exp[LIHC.RBP.anti,]),
'RBPs-NS'=rowMeans(LIHC.RBP.sig.exp[!row.names(LIHCMatrix.RBP)%in%LIHC.RBP.anti,]))
boxplot(p,col=c("#ff6362","#80e1f1","#80debb"),outline=F,boxwex=0.5,ylim=c(10,30),ylab='mRNA expression')
dev.off()
stage
venn.diagram(list(eCLIP=RBP.self,'RBP-Sig'=LIHC.RBP.sig,'AS-Sig'=LIHC.PI.RBP.sig),filename = "E:/LIHC_splicing/SURRBPVenn.tif")
# hUb MMS
tiff(filename = 'E:/LIHC_splicing/MMS_sur_RBPs.tif', res=600, compression = "lzw", height=10, width=8, units="in")
par(cex.lab=2,cex.axis=2,font.axis=2,font.lab=2,lwd=3,family='Times New Roman',mar=c(7, 5, 2.7, 1)+0.3)
LIHC.GMM.list <- list('RBPs-AS-Sig'=abs(LIHC.GMM$MMS[LIHC.PI.pvalue < 0.05&LIHC.GMM$module!='grey'&LIHC.PI.DS.data$symbol%in%c(RDBP,RBP$SYMBOL)]),
'AS-Sig'=abs(LIHC.GMM$MMS[LIHC.PI.pvalue < 0.05&LIHC.GMM$module!='grey']),
'AS-NS'=abs(LIHC.GMM$MMS[LIHC.PI.pvalue>=0.05&LIHC.GMM$module!='grey']))
boxplot(LIHC.GMM.list,col=c("#ff6362","#80e1f1","#80debb"),outline=F,boxwex=0.5,ylim=c(0.25,1.15),ylab='Module membership')
dev.off()
#cytoscape
library(WGCNA)
MEs0 <- moduleEigengenes(t(LIHC.PI.DS), mergedColors.LIHC)$eigengenes
MEs <- orderMEs(MEs0)
TOM <- TOMsimilarityFromExpr(t(LIHC.PI.DS), power = 3,networkType = "unsigned", TOMType = "unsigned")
modules <- c('green')
inModule <- is.finite(match(mergedColors.LIHC, modules))
modgenes <- LIHC.PI.DS.data$as_id[inModule]
modTOM <- TOM[inModule, inModule]
dimnames(modTOM) <- list(modgenes, modgenes)
library(WGCNA)
cyt <- exportNetworkToCytoscape(modTOM,
edgeFile = paste('E:/LIHC_splicing/cytoscape_edge.txt', sep=""),
nodeFile = paste('E:/LIHC_splicing/cytoscape_node.txt', sep=""),
weighted = TRUE,
threshold =0.05,
nodeNames = modgenes,
nodeAttr = mergedColors.LIHC[inModule]);
edge <-cyt$edgeData
node <- cyt$nodeData
edge <- edge[order(abs(edge$weight),decreasing = T)[1:(0.05*nrow(edge))],]
node <- node[node$nodeName%in%c(edge$fromNode,edge$toNode),]
node$symbol <-LIHC.PI.DS.data$symbol[LIHC.PI.DS.data$as_id%in%node$nodeName]
library(igraph)
g.green <- graph.data.frame(cyt$edgeData,directed="FALSE")
node$strength <- graph.strength(g.green)[as.character(node$nodeName)]
node$RBP <- ifelse(node$symbol%in%c(RBP$SYMBOL,RDBP),'Y','N')
node$MMS <- abs(LIHC.GMM[LIHC.PI.DS.data$as_id%in%node$nodeName,1])
node$type <- 'NS'
node$type[node$nodeName%in%LIHC.PI.DS.data$as_id[LIHC.PI.pvalue<0.05]] <- 'significant'
node$type[node$nodeName%in%LIHC.PI.DS.data$as_id[LIHC.PI.pvalue<0.05&LIHC.PI.DS.data$symbol%in%c(RDBP,RBP$SYMBOL)]] <-'RBP-Sig'
write.table(edge,'E:/LIHC_splicing/cytoscape_green_edge.txt',quote = F,row.names = F,sep='\t')
write.table(node,'E:/LIHC_splicing/cytoscape_green_node.txt',quote = F,row.names = F,sep='\t')
# RNA splicing modules
modules <- 'blue'
inModule <- mergedColors.LIHC=='blue'& LIHC.PI.DS.data$symbol%in%blue.genes
modgenes <- LIHC.PI.DS.data$as_id[inModule]
modTOM <- TOM[inModule, inModule]
dimnames(modTOM) <- list(modgenes, modgenes)
cyt <- exportNetworkToCytoscape(modTOM,
edgeFile = paste('E:/LIHC_splicing/cytoscape_edge_blue.txt', sep=""),
nodeFile = paste('E:/LIHC_splicing/cytoscape_node_blue.txt', sep=""),
weighted = TRUE,
threshold =0.02,
nodeNames = modgenes,
nodeAttr = mergedColors.LIHC[inModule]);
node <- cyt$nodeData
node$symbol <-LIHC.PI.DS.data$symbol[LIHC.PI.DS.data$as_id%in%node$nodeName]
library(igraph)
g.blue <- graph.data.frame(cyt$edgeData,directed="FALSE")
node$strength <- graph.strength(g.blue)[as.character(node$nodeName)]
node$RBP <- ifelse(node$symbol%in%c(RBP$SYMBOL,RDBP),'Y','N')
write.table(node,'E:/LIHC_splicing/cytoscape_node_blue.txt',quote = F,row.names = F,sep='\t')
#magenta
modules <- 'magenta'
inModule <- mergedColors.LIHC=='magenta'& LIHC.PI.DS.data$symbol%in%magenta.genes
modgenes <- LIHC.PI.DS.data$as_id[inModule]
modTOM <- TOM[inModule, inModule]
dimnames(modTOM) <- list(modgenes, modgenes)
cyt <- exportNetworkToCytoscape(modTOM,
edgeFile = paste('E:/LIHC_splicing/cytoscape_edge_magenta.txt', sep=""),
nodeFile = paste('E:/LIHC_splicing/cytoscape_node_magenta.txt', sep=""),
weighted = TRUE,
threshold =0.02,
nodeNames = modgenes,
nodeAttr = mergedColors.LIHC[inModule]);
node <- cyt$nodeData
node$symbol <-LIHC.PI.DS.data$symbol[LIHC.PI.DS.data$as_id%in%node$nodeName]
library(igraph)
g.magenta <- graph.data.frame(cyt$edgeData,directed="FALSE")
node$strength <- graph.strength(g.magenta)[as.character(node$nodeName)]
node$RBP <- ifelse(node$symbol%in%c(RBP$SYMBOL,RDBP),'Y','N')
write.table(node,'E:/LIHC_splicing/cytoscape_node_magenta.txt',quote = F,row.names = F,sep='\t')
#tan
modules <- 'tan'
inModule <- mergedColors.LIHC=='tan'& LIHC.PI.DS.data$symbol%in%tan.genes
modgenes <- LIHC.PI.DS.data$as_id[inModule]
modTOM <- TOM[inModule, inModule]
dimnames(modTOM) <- list(modgenes, modgenes)
cyt <- exportNetworkToCytoscape(modTOM,
edgeFile = paste('E:/LIHC_splicing/cytoscape_edge_tan.txt', sep=""),
nodeFile = paste('E:/LIHC_splicing/cytoscape_node_tan.txt', sep=""),
weighted = TRUE,
threshold =0.02,
nodeNames = modgenes,
nodeAttr = mergedColors.LIHC[inModule]);
node <- cyt$nodeData
node$symbol <-LIHC.PI.DS.data$symbol[LIHC.PI.DS.data$as_id%in%node$nodeName]
library(igraph)
g.tan <- graph.data.frame(cyt$edgeData,directed="FALSE")
node$strength <- graph.strength(g.tan)[as.character(node$nodeName)]
node$RBP <- ifelse(node$symbol%in%c(RBP$SYMBOL,RDBP),'Y','N')
write.table(node,'E:/LIHC_splicing/cytoscape_node_tan.txt',quote = F,row.names = F,sep='\t')
# green GO
green <- modules.GOs$green
all.genes <- strsplit(green$geneID,'/')
hub.rbp <- unique(node$symbol[node$type=='RBP-Sig'&node$MMS>0.9])
green$'HubRatio' <- sapply(all.genes,function(x) sum(hub.rbp%in%x))
green$'HubRatio' <- green$'HubRatio'/green$Count
go.plot <- green[sapply(all.genes,function(x) sum(hub.rbp%in%x)>5),]
go.plot$Description <-gsub('nuclear-transcribed mRNA catabolic process,(.+)','\\1',go.plot$Description)
go.plot$GeneRatio <- sapply(go.plot$GeneRatio ,function(x) eval(parse(text =x)))
go.plot <- go.plot[c(3,6,7,8,10:13,17),]
go.plot$Description <- factor(go.plot$Description,levels = rev(go.plot$Description),ordered = T)
go.plot$p.adjust <- -log10(go.plot$p.adjust)
library(ggplot2)
library(extrafont)
go.plot$p.adjust <- signif(go.plot$p.adjust,1)
p <- ggplot(go.plot , aes(x = HubRatio, y = Description,
size = GeneRatio,color=p.adjust))+geom_point() + scale_size_continuous(range = c(4,10))+
scale_colour_gradient(low = "#80e1f1", high = "#ff6362",name='-log10(p)')+theme_bw()+labs(y='')
tiff("E:/LIHC_splicing/green_GO.tif", res=600, compression = "lzw", height=10, width=10, units="in")
p+theme(text=element_text(family="Times New Roman",face = 'bold'),axis.text.y=element_text(size =20,colour = 'black'),axis.text.x = element_text(colour = 'black',angle=45, size=20,hjust = 1),
axis.title=element_text(size =24),legend.text =element_text(size =20),legend.title =element_text(size =20))+
theme(panel.border = element_rect(fill=NA, colour = "black", size=2))+scale_x_continuous(limits = c(0.1, 0.18))
dev.off()
# RBP expression survival
library(ggplot2)
library(extrafont)
tmp <- data.frame('Percent'=c(231/302,1-231/302),group=c('Anti RBPs-sig','All RBPs-sig'),label=c(231,302))
tiff("E:/LIHC_splicing/RBPbar.tif", res=600, compression = "lzw", height=8, width=6.5, units="in")
bp<- ggplot(tmp, aes(x="", y=Percent*100, fill=group))+
geom_bar(width = 1, stat = "identity")+scale_fill_manual(values=c("#80e1f1","#ff6362"))
bp+theme_bw()+theme(text=element_text(family="Times New Roman",face = 'bold'),axis.title=element_text(size = 36),axis.text=element_text(size =36,colour = 'black'),
legend.text =element_text(size =28),legend.title=element_blank(),legend.key = element_rect(size = 8),
legend.key.size = unit(3, 'lines'))+labs(x ='',y='Percent(%)')+
geom_text(aes(y=c(231/302*100,100),label=label), vjust=1.2, size=20)
dev.off()
library("survminer")
library(survival)
df <- data.frame(OS=LIHC.clinical$OS,groups=as.character(ifelse(t(LIHCMatrix.RBP['SOX11',])>
median(t(LIHCMatrix.RBP['SOX11',])),'high','low')),
status=LIHC.clinical$vital_status=='dead',stringsAsFactors = F)
SOX11.fit <- survfit(Surv(as.numeric(OS)/30,status)~groups,data =df )
tiff("E:/LIHC_splicing/SOX11_survial.tif", res=600, compression = "lzw", height=10, width=10, units="in")
ggsurvplot(SOX11.fit,data=df ,xlab = "Time in months ",
legend.title = "SOX11",legend.labs = c("high", "low"),pval = T,
conf.int = F,palette = c('#e7220e','#354c9f'),
ggtheme = theme_survminer(base_family = 'Times New Roman',font.x =28,font.y =28,font.legend = 28,font.tickslab=c(24,"plain", "black")))
graph2ppt(file='E:/LIHC_splicing/SOX11_survial.pptx',width=12,height=12)
dev.off()
df <- data.frame(OS=LIHC.clinical$OS,groups=as.character(ifelse(t(LIHCMatrix.RBP['KPNA2',])>
median(t(LIHCMatrix.RBP['KPNA2',])),'high','low')),
status=LIHC.clinical$vital_status=='dead',stringsAsFactors = F)
KPNA2.fit <- survfit(Surv(as.numeric(OS)/30,status)~groups,data =df )
tiff("E:/LIHC_splicing/KPNA2_survial.tif", res=600, compression = "lzw", height=10, width=10, units="in")
ggsurvplot(KPNA2.fit,data=df ,xlab = "Time in months ",
legend.title = "KPNA2",legend.labs = c("high", "low"),
conf.int = F,palette = c('#e7220e','#354c9f'),
ggtheme = theme_survminer(base_family = 'Times New Roman',font.x =28,font.y =28,font.legend = 28,font.tickslab=c(24,"plain", "black")))
graph2ppt(file='E:/LIHC_splicing/KPNA2_survial.pptx',width=12,height=12)
dev.off()
library(VennDiagram)
venn.diagram(list(LIHC.RBP.anti,row.names(LIHC.RBP.DEG)),
filename = "E:/LIHC_splicing/RBP_sig.tif",cex=0,category = c("155", '140'))
#stage box
tiff(filename = 'E:/LIHC_splicing/stages_RBPs.tif', res=600, compression = "lzw", height=10, width=8, units="in")
# windowsFonts(A = windowsFont("Times New Roman"))
# par(family="A")
par(cex.lab=2,cex.axis=2,font.axis=2,font.lab=2,lwd=3,family='Times New Roman',mar=c(7, 5, 2.7, 1)+0.3)
boxplot(results.stage.sig,col=c("#80debb","#80e1f1","#ff6362"),outline=F,boxwex=0.5,ylim=c(4,17),ylab='mRNA expression')
legend("topleft", inset=.05,legend=c("Stage1","Stage2","Stage3"),
fill=c("#ff6362","#80e1f1","#80debb"), cex=2,text.font=2,
box.lty=0)
dev.off()
# survival
library("survminer")
library(extrafont)
clusters.df <- data.frame(OS=LIHC.clinical$OS,stage=LIHC.clinical$tumor_stage,Cluster=clusters,
status=LIHC.clinical$vital_status=='dead',stringsAsFactors = F)
fit <- survfit(Surv(as.numeric(OS)/30,status)~Cluster,data = clusters.df)
tiff("E:/LIHC_splicing/clusters_survial.tif", res=600, compression = "lzw", height=8, width=8, units="in")
ggsurvplot(fit,data=clusters.df,xlab = "Time in months",palette = c("#ff6362","#80e1f1","#80debb",'#ffae00'), pval = TRUE,
ggtheme = theme_survminer(base_family = 'Times New Roman',font.x =20,font.y =20,font.legend = 20,font.tickslab=c(20,"plain", "black")))
dev.off()
#hub
clusters.hub.df <- data.frame(OS=LIHC.clinical$OS,stage=LIHC.clinical$tumor_stage,Cluster=clusters.hub,
status=LIHC.clinical$vital_status=='dead',stringsAsFactors = F)
fit <- survfit(Surv(as.numeric(OS)/30,status)~Cluster,data = clusters.hub.df)
tiff("E:/LIHC_splicing/clusters_survial_hub.tif", res=600, compression = "lzw", height=8, width=8, units="in")
ggsurvplot(fit,data=clusters.hub.df,xlab = "Time in months",palette = c("#ff7cb3","#80e1f1","#80debb",'#ffae00',"#00b2e3","#ff6362"), pval = TRUE,
ggtheme = theme_survminer(base_family = 'Times New Roman',font.x =20,font.y =20,font.legend = 20,font.tickslab=c(20,"plain", "black")))
dev.off()
#box
LIHC.RBP <-log2(LIHCMatrix.RBP+1)
results <- sapply(unique(clusters.hub),function(x) colMeans(LIHC.RBP[,names(clusters.hub)[clusters.hub==x]]),simplify = F)
tiff(filename = 'E:/LIHC_splicing/clusters_RBPs.tif', res=600, compression = "lzw", height=10, width=8, units="in")
# windowsFonts(A = windowsFont("Times New Roman"))
# par(family="A")
par(cex.lab=2,cex.axis=2,font.axis=2,font.lab=2,lwd=3,family='Times New Roman',mar=c(7, 5, 2.7, 1)+0.3)
boxplot(results,col=c("#ff7cb3","#80e1f1","#80debb",'#ffae00',"#00b2e3","#ff6362"),outline=F,boxwex=0.5,ylim=c(16.5,17.8),ylab='RBPs mRNA expression')
graph2ppt(file='E:/LIHC_splicing/clusters_RBPs.pptx',width=8,height=10)
dev.off()
#
LIHC.RBP <-log2(LIHCMatrix.RBP+1)
results <- sapply(unique(clusters.hub),function(x) colMeans(log2(LIHCMatrix.RBP[,names(clusters.hub)[clusters.hub==x]]+1)),simplify = F)
tiff(filename = 'E:/LIHC_splicing/clusters_RBPs-DEG.tif', res=600, compression = "lzw", height=10, width=8, units="in")
# windowsFonts(A = windowsFont("Times New Roman"))
# par(family="A")
par(cex.lab=2,cex.axis=2,font.axis=2,font.lab=2,lwd=3,family='Times New Roman',mar=c(7, 5, 2.7, 1)+0.3)
boxplot(results,col=c("#ff7cb3","#80e1f1","#80debb",'#ffae00',"#00b2e3","#ff6362"),outline=F,boxwex=0.5,ylim=c(16.5,17.5),
ylab='RBPs-DEG mRNA expression')
dev.off()
#
LIHC.RBP <-log2(LIHCMatrix.RBP+1)
results <- sapply(unique(clusters.hub),function(x) colMeans(LIHC.RBP[LIHC.RBP.sig,names(clusters.hub)[clusters.hub==x]]),simplify = F)
tiff(filename = 'E:/LIHC_splicing/clusters_RBPs-sig.tif', res=600, compression = "lzw", height=10, width=8, units="in")
# windowsFonts(A = windowsFont("Times New Roman"))
# par(family="A")
par(cex.lab=2,cex.axis=2,font.axis=2,font.lab=2,lwd=3,family='Times New Roman',mar=c(7, 5, 2.7, 1)+0.3)
boxplot(results,col=c("#ff7cb3","#80e1f1","#80debb",'#ffae00',"#00b2e3","#ff6362"),outline=F,boxwex=0.5,ylim=c(7,11),ylab='RBPs-DEG mRNA expression')
dev.off()
#subtype 6 heatmap
RBP.subtype6.heatmap <- t(apply(LIHCMatrix.RBP[RBP.subtype6,],1,scale))
RBP.subtype6.heatmap <-RBP.subtype6.heatmap[,order(clusters.hub,decreasing = F)]
cbPalette <- c("#ff7cb3","#80e1f1","#80debb",'#ffae00',"#00b2e3","#ff6362")
table(clusters.hub.df$Cluster)
colors=c(rep(cbPalette[1],40),rep(cbPalette[2],66),rep(cbPalette[3],145),rep(cbPalette[4],55),rep(cbPalette[5],22),rep(cbPalette[6],43))
library(gplots)
tiff("E:/LIHC_splicing/heatmap_colorkey.tif", res=600, compression = "lzw", height=10, width=16, units="in")
#pdf("E:/LIHC_splicing/heatmap3.pdf", height=16, width=10)
heatmap.2(RBP.subtype6.heatmap,Colv=F,col=greenred(75),scale=NULL,margins = c(11,13),
ColSideColors=colors,key=TRUE,keysize=1,symkey=FALSE, density.info="none", trace="none", cexRow=2.2,cexCol =0.00001,font=2)
dev.off()
graph2ppt(file='E:/LIHC_splicing/heatmap.pptx',width=10,height=16)
P <- heatmap.2(RBP.subtype6.heatmap,Colv=F,col=greenred(75),scale=NULL,
ColSideColors=colors,key=F,symkey=FALSE, density.info="none", trace="none", cexRow=0.1,cexCol =0.00001)
row.names(RBP.subtype6.heatmap)[rev(P$rowInd)]
# RBPs events
library(ggplot2)
library(extrafont)
RBPs_hub_RBPs <- data.frame(percent=100*rowSums(t(rbp.events.pvalue<0.05))/94,genes=factor(colnames(rbp.events.pvalue),
levels = rev(row.names(RBP.subtype6.heatmap)[rev(P$rowInd)]),ordered = T))
#RBPs_hub_RBPs <- RBPs_hub_RBPs[RBPs_hub_RBPs$percent>50,]
tiff("E:/LIHC_splicing/RBPs_RBPs_new.tif", res=600, compression = "lzw", height=10, width=8, units="in")
p<- ggplot(RBPs_hub_RBPs ,aes(x=genes,y=percent,fill='#ff6362'))+
geom_bar(stat = "identity",width = 0.75)
p+theme_minimal()+theme(text=element_text(family="Times New Roman",face = 'bold'),
axis.title=element_text(size = 16),axis.text=element_text(size=16,colour = 'black'),axis.text.x=element_text(angle=45, size=20,hjust = 1,colour = 'black'),
legend.text =element_text(size =20),legend.title =element_text(size =24))+
labs(x ='',y='Percent(%)')+coord_flip()
# geom_text(aes(y=Percent*100+3,label =round(Percent*100,1)),size=7)
#+geom_text(aes(y=c(41/79*100,100),label=label), vjust=1.2, size=20)
dev.off()
rev(P$rowInd)
# clip track
auto <- intersect(intersect(row.names(LIHC.DEGs.RBP),RBP.self),intersect(RBP.self,LIHC.PI.DS.data$symbol))
ILF3.AS <- LIHC.PI.DS.data[LIHC.PI.DS.data$symbol=='ILF3',]
|
ca0ac4e8ffb84846175870a3e3bdac805e795f5a
|
714dae73788e1237ad98193b5c4714c021da5cdc
|
/R/get_initial_params.R
|
e3b78e47312d9243df472ec18d42094d1bfda6e5
|
[
"MIT"
] |
permissive
|
davidmaciel/covertzbr
|
a69fa9586401e7dc59b6cdb4da89b95d77703167
|
fb0e444e5a8b216f4f5dd6db8348100ae83a4eb7
|
refs/heads/master
| 2023-01-23T17:21:10.187508
| 2020-12-10T11:16:32
| 2020-12-10T11:16:32
| 273,328,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,590
|
r
|
get_initial_params.R
|
#' Possible values for the Gompertz curves
#'
#' Given a min and and a max values for the three parameters in the gompertz equation,
#' generates a sequence of possible values and all the combinations of them.
#'
#' @param lambda_min Numeric. Minimun possible value for the lambda parameter in the gompertz
#' equation.
#' @param lambda_max Numeric. Maximun possible value for the lambda parameter in the gompertz
#' equation.
#' @param mu_min Numeric. Minimun possible value for the mu parameter in the gompertz
#' equation.
#' @param mu_max Numeric. Maximum possible value for the mu parameter in the gompertz
#' equation
#' @param a_min Numeric. Minimun possible value for the alpha parameter in the gompertz
#' equation.
#' @param a_max Numeric. Maximum possible value for the alpha parameter in the gompertz
#' equation.
#' @param int_lambda Numeric. Increment of the sequence for the possible values of the lambda parameter.
#' @param int_mu Numeric. Increment of the sequence for the possible values of the mu parameter.
#' @param int_a Numeric. Increment of the sequence for the possible values of the alpha parameter.
#'
#' @return a tibble, with every possible combination between the values in the sequences
#' of each parameter.
#' @export
get_initial_params <-
function(lambda_min, lambda_max, mu_min, mu_max, a_min, a_max,
int_lambda = 1, int_mu = 10, int_a = 100){
lambda <- seq(lambda_min, lambda_max, by = int_lambda)
mu <- seq(mu_min, mu_max, by = int_mu)
a <- seq(a_min, a_max, by = int_a)
expand.grid("lambda" = lambda,"mu" = mu,"a" = a)
}
|
07a3c7aaaac5a5d87c8f667738eeaa610bebde69
|
4712fa74d2c13ed84eb3ba2eb0c1c24a4680bb56
|
/utilities.R
|
e8efb5c1a11e4f62985a3379f9ff41de3303c1c9
|
[] |
no_license
|
npwinkler/STAT-work
|
a7bfda8b5be48410f9db9bf848bd12baa4636f8d
|
16a2432eb3977e8b1b8f5cd4163364f430a8082b
|
refs/heads/master
| 2021-01-10T05:49:12.055700
| 2016-09-18T17:45:48
| 2016-09-18T17:45:48
| 51,219,298
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 1,550
|
r
|
utilities.R
|
#Nolan Winkler
require(fastR)
# Utilities
#Get in utilities dataset and do quick look at it
data(utilities)
summary(utilities)
#raw plot
xyplot(ccf ~ (year+month/12), data=utilities, xlab="Time", ylab="Gas Usage (ccf)", main="Gas Usage by Month 1") -> prelimplot1
#group by months & output nicer
xyplot(ccf ~ (year+month/12), data=utilities, xlab="Time", ylab="Gas Usage (ccf)", groups=month, type=c('p', 'l'), main="Gas Usage by Month 2") -> prelimplot2
bwplot(ccf~factor(month), data=utilities, ylab="Gas Usage (ccf)", xlab="Month", main="Gas Usage by Month 3") -> prelimplot3
#Adjust for different number of billing days per month
utilities$ccfpday <- utilities$ccf / utilities$billingDays
xyplot(ccfpday ~ (year+month/12), data=utilities, xlab="Time", ylab="Gas Usage (ccf per day)", main="Gas Usage by Month Adjusted for Billing Days 1") -> prelimplot4
xyplot(ccfpday ~ (year+month/12), data=utilities, xlab="Time", ylab="Gas Usage (ccf per day)", groups=month, type=c('p', 'l'), main="Gas Usage by Month Adjusted for Billing Days 2") -> prelimplot5
bwplot(ccfpday~factor(month), data=utilities, ylab="Gas Usage (ccf per day)", xlab="Month", main="Gas Usage by Month Adjusted for Billing Days 3") -> prelimplot6
#Check vs. temperature
xyplot(ccfpday ~ temp, data=utilities, xlab="Temperature (°F)", ylab="Gas Usage (ccf per day)", main="Gas Usage vs. Temperature Adjusted for Billing Days") -> prelimplot7
#Run a regression based on data displaying linear shape
ols <- lm(ccfpday ~ temp, data=utilities)
summary(ols)
|
2f06dfe9fae7b2cb1c5d17fd268dae8506db6852
|
aa0db6e46b4641d4925ee4fc5e68bdadfb1c585c
|
/Lecture Code/Lecture 11/lecture_11_code.R
|
77044b14d17357262e670c3f52acd573d772516e
|
[] |
no_license
|
ChrisHayduk/Stat-Methods-and-Computation-Code
|
7e3da8029d7b7121387d7a5919004fb99479fd60
|
e956b5b016f7a1eb19fbba5ced8abf68a7f87355
|
refs/heads/master
| 2021-03-14T05:41:39.251877
| 2020-03-12T04:15:45
| 2020-03-12T04:15:45
| 246,741,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,726
|
r
|
lecture_11_code.R
|
#############################################
## ##
## SDGB-7844 - Lecture 11 Code ##
## Hypothesis Testing ##
## Confidence Intervals ##
## Prof. Matthew Murphy ##
## ##
############################################
###############################################
# sampling distribution: Poisson data #
###############################################
samp_dist <- function(n, k, lambda){
# n=sample size, k=times to run, lambda=Poisson parameter
# stop function if input invalid
if(any(c(k, n, lambda) <= 0)){return("error!")}
mean_vector <- rep(NA, times=k) # empty results vector
for(i in 1:k){
x <- rpois(n=n, lambda=lambda) # simulate
mean_vector[i] <- mean(x) # compute x-bar
rm(x) # clear memory
} # end for loop
return(mean_vector) #output
} # end function
###################################
# running & graphing samp_dist #
###################################
#pdf("poisson_sampdist.pdf", width=12, height=8)
par(mfrow=c(2,2))
#####
## Poisson population distribution
lambda <- 5
k <- 0:15
prob <- dpois(k, lambda=lambda)
barplot(prob, names.arg=k, las=TRUE, col="firebrick", xlab="k (number of calls)", ylim=c(0, 0.3),
ylab="probability P(X=k)", main=expression(paste("PMF for X~Poisson(", lambda==5,")", sep="")))
legend("topright", legend=c(expression(paste(lambda==5, sep="")), "", "",
expression(paste(mu==5, sep="")),
expression(paste(sigma==2.24, sep=""))), col="white", bty="n", cex=1.2)
#######
# sampling distribution simulation
z.10 <- samp_dist(n=10, k=500, lambda=5)
z.30 <- samp_dist(n=30, k=500, lambda=5)
z.100 <- samp_dist(n=100, k=500, lambda=5)
y.max <- ceiling(max(c(hist(z.10, plot=FALSE)$density, hist(z.30, plot=FALSE)$density, hist(z.100, plot=FALSE)$density)))
x.min <- min(c(z.10, z.30, z.100))
x.max <- max(c(z.10, z.30, z.100))
#####
## n= 10
# histogram: density not frequency
hist(z.10, freq=FALSE, ylim=c(0,y.max), xlim=c(x.min, x.max),
main=expression(paste("Sampling Distribution of ",bar(x), ": n=10, ", lambda==5,sep="")),
las=TRUE, col="cadet blue", xlab="sample means (from Poisson data)", ylab="density")
# add normal density curve to histogram:
curve(dnorm(x, mean=5, sd=sqrt(5/10)), add=TRUE,
lwd=2, col="firebrick")
# add legend
legend("topright", legend=c(expression(paste("E[",bar(x),"]=5", sep="")),
paste("mean of sample means: ", round(mean(z.10), digits=3), sep=""), "",
expression(paste("SD[",bar(x),"]=0.707")),
paste("sd of sample means: ", round(sd(z.10), digits=3), sep="")), bty="n")
#####
## n=30
# histogram: density not frequency
hist(z.30, freq=FALSE, ylim=c(0,y.max), xlim=c(x.min, x.max),
main=expression(paste("Sampling Distribution of ",bar(x), ": n=30, ", lambda==5,sep="")),
las=TRUE, col="cadet blue", xlab="sample means (from Poisson data)", ylab="density")
# add normal density curve to histogram:
curve(dnorm(x, mean=5, sd=sqrt(5/30)), add=TRUE,
lwd=2, col="firebrick")
# add legend
legend("topright", legend=c(expression(paste("E[",bar(x),"]=5", sep="")),
paste("mean of sample means: ", round(mean(z.30), digits=3), sep=""), "",
expression(paste("SD[",bar(x),"]=0.408")),
paste("sd of sample means: ", round(sd(z.30), digits=3), sep="")), bty="n")
#####
## n=100
# histogram: density not frequency
hist(z.100, freq=FALSE, ylim=c(0,y.max), xlim=c(x.min, x.max),
main=expression(paste("Sampling Distribution of ",bar(x), ": n=100, ", lambda==5,sep="")),
las=TRUE, col="cadet blue", xlab="sample means (from Poisson data)", ylab="density")
# add normal density curve to histogram:
curve(dnorm(x, mean=5, sd=sqrt(5/100)), add=TRUE,
lwd=2, col="firebrick")
# add legend
legend("topright", legend=c(expression(paste("E[",bar(x),"]=5", sep="")),
paste("mean of sample means: ", round(mean(z.100), digits=3), sep=""), "",
expression(paste("SD[",bar(x),"]=0.224")),
paste("sd of sample means: ", round(sd(z.100), digits=3), sep="")), bty="n")
rm(z.10, z.30, z.100, y.max, x.min, x.max)
#dev.off()
rm(list=ls()) # clear workspace
###################################################
# upload and process transaction data #
###################################################
# upload data
x <- read.csv("transaction_data.csv")
dim(x)
head(x)
tail(x)
colnames(x)
################################
# 4. exploratory data analysis #
################################
#pdf("sumplot_rent.pdf", height=4, width=12)
par(mfrow=c(1,3), mar=c(5, 5, 4, 2))
# histogram of data
hist(x$transaction_amount, las=TRUE, xlab="transaction amount", ylab="frequency",
main="Histogram of \nTransaction Amount", cex.axis=1.2, cex.lab=1.3, col = "royalblue")
abline(v=200, col="red", lty=2, lwd=2) # add vertical line at $1,500 mph
# boxplot
boxplot(x$transaction_amount, main="Box Plot of \nTransaction Amount", pch=19, las=TRUE, ylab="transaction amount")
abline(h=200, col="red", lty=2, lwd=2) # add vertical line at $1,500 mph
# normal quantile plots
qqnorm(x$transaction_amount, pch=19, las=TRUE, main="Normal Q-Q Plots of \nTransaction Amount", ylab="",
cex.lab=1.3, cex.axis=1.2)
qqline(x$transaction_amount)
dev.off()
# summary statistics:
# amounts are in dollars, so we round to the nearest cent
nrow(x) # sample size
round(mean(x$transaction_amount), digits=2)
round(sd(x$transaction_amount), digits=2)
min(x$transaction_amount)
median(x$transaction_amount)
max(x$transaction_amount)
# percent of transactions with amount higher than 200
100*round(sum(x$transaction_amount > 200)/length(x$transaction_amount), digits=3)
###########################################
# transaction amounts, hypothesis tests #
###########################################
####
# calculations by hand:
# test statistic
test.stat <- (mean(x$transaction_amount)-200)/(sd(x$transaction_amount)/sqrt(nrow(x)))
test.stat <- 2.8857
# p-value: P(T > test statistic)
# p - probability, yields CDF, i.e. probability of returning number smaller than an argument to this function
pt(test.stat, df=nrow(x)-1, lower.tail=FALSE)
# (graphical version of p-value)
dev.off()
#pdf("pvalue.pdf", height=4, width=7)
curve(dt(x, df=119), from=-3, to=5, las=TRUE, xlab="t", ylab="density",
main=paste("Density of the t(", nrow(x)-1, ") Distribution", sep=""),
col="firebrick", lwd=2, xaxt="n")
axis(side=1, at=test.stat, labels=round(test.stat, digits=3))
w <- seq(test.stat, 5 ,length=200)
# d - density, yields density function value in a given point
y <- dt(w, df=nrow(x)-1)
# shade in the p-value area
polygon(c(test.stat, w, 5) , c(0, y, 0), col="blue", border=NA, density=40)
arrows(2.8857, 0.1, 2.8857, 0.02)
text(2.9, 0.13, labels=paste("p-value=P(T > ",round(test.stat, digits=3), ")", sep=""))
rm(w, y)
#dev.off()
# with t.test()
t.test(x=x$transaction_amount, alternative="greater", mu=200, conf.level = 0.99)
###########################################
# one-, two-sided confidence intervals #
###########################################
# q - quantile, inverse CDF, i.e. what value is at given quantile
qt(p=0.01, df=119, lower.tail=FALSE)
qt(p=0.005, df=119, lower.tail=FALSE)
# confirming confidence interval in t.test() output for wind speed example
# lower bound:
mean(x$transaction_amount) - (qt(p=0.01, df=119, lower.tail=FALSE)*sd(x$transaction_amount)/sqrt(nrow(x)))
# upper bound: infinity
# confidence interval: ($201.20, infinity)
# this matches the confidence interval computed from t.test() in previous section
|
39c793fa206f246264b25e6445884c956ef81971
|
264424e51a7c4684cea89a266946c60ec419413b
|
/R/set_options.R
|
0513463ec06b93a8415ac8f75cf1ab2d71b77465
|
[] |
no_license
|
cran/mlergm
|
fe5e740abf775ee12d43c5a47084e4e94a3a75ac
|
467a7a70ddbf5587eb6712f03e3f5359d43c8deb
|
refs/heads/master
| 2021-08-31T20:08:22.327934
| 2021-08-23T15:00:02
| 2021-08-23T15:00:02
| 160,185,882
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,602
|
r
|
set_options.R
|
#' Set and adjust options and settings.
#'
#' Function allows for specification of options and settings for simulation and estimation procedures.
#'
#' The main simulation settings are \code{burnin}, \code{interval}, and \code{sample_size}. For estimation of the loglikelihood value, options include \code{bridge_num} which controls the number of bridges to be used for approximating the loglikelihood (see, e.g., Hunter and Handcock (2006) for a discussion). The main estimation settings and options include \code{NR_tol}, \code{NR_max_iter}, \code{MCMLE_max_iter}, \code{adaptive_step_len}, and \code{step_len}. Parameters \code{NR_tol} and \code{NR_max_iter} control the convergence tolerance and maximum number of iterations for the Newton-Raphson, or Fisher scoring, optimization. When the L2 norm of the incremenet in the Newton-Raphson procedure is under the specified tolerance \code{NR_tol} convergence is reached; and, no more than \code{NR_max_iter} iterations are performed. The MCMLE procedure uses the stepping algorithn of Hummel, et al., (2012) to give stabiity to the estimation procedure. Each MCMLE iteration draws samples from an MCMC chain, and \code{MCMLE_max_iter} controls how many iterations are performed before termination. Most functions support parallel computing for efficiency; by default \code{do_parallel} is \code{TRUE}. The number of computing cores can be adjusted by \code{number_cores}, and the default is one less than the number of cores available.
#'
#'
#' @param burnin The burnin length for MCMC chains.
#' @param interval The sampling interval for MCMC chains.
#' @param sample_size The number of points to sample from MCMC chains for the MCMLE procedure.
#' @param NR_tol The convergence tolerance for the Newton-Raphson optimization (implemented as Fisher scoring).
#' @param NR_max_iter The maximum number of Newton-Raphson updates to perform.
#' @param MCMLE_max_iter The maximum number of MCMLE steps to perform.
#' @param do_parallel (logical) Whether or not to use parallel processesing (defaults to TRUE).
#' @param number_cores The number of parallel cores to use for parallel computations.
#' @param adaptive_step_len (logical) If \code{TRUE}, an adaptive steplength procedure is used
#' for the Newton-Raphson procedure. Arguments \code{NR_step_len} and \code{NR_step_len_multiplier}
#' are ignored when \code{adaptive_step_len} is \code{TRUE}.
#' @param step_len_multiplier The step_len adjustment multplier when convergence fails.
#' @param step_len The step length adjustment default to be used for the Newton-Raphson updates.
#' @param bridge_num The number of bridges to use for likelihood computations.
#' @param bridge_burnin The burnin length for the bridge MCMC chain for approximate likelihood computation.
#' @param bridge_interval The sampling interval for the brdige MCMC chain for approximate likelihood computation.
#' @param bridge_sample_size The number of points to sample from the bridge MCMC chain for approximate likelihood computation.
#'
#' @references
#' Hunter, D. R., and Handcock, M. S. (2006).
#' Inference in curved exponential family models for networks.
#' Journal of Computational and Graphical Statistics, 15(3), 565-583.
#'
#' Hummel, R. M., Hunter, D. R., and Handcock, M. S. (2012).
#' Improving simulation-based algorithms for fitting ERGMs.
#' Journal of Computational and Graphical Statistics, 21(4), 920-939.
#'
#' @export
#' @importFrom parallel detectCores
set_options <- function(burnin = 1e+4,
interval = 1000,
sample_size = 1000,
NR_tol = 1e-4,
NR_max_iter = 50,
MCMLE_max_iter = 10,
do_parallel = TRUE,
number_cores = detectCores(all.tests = FALSE, logical = TRUE) - 1,
adaptive_step_len = TRUE,
step_len_multiplier = 0.5,
step_len = 1,
bridge_num = 10,
bridge_burnin = 1e+4,
bridge_interval = 500,
bridge_sample_size = 5000) {
sim_param <- list(burnin = burnin,
interval = interval,
num_obs = sample_size,
stats = NULL,
cond_stats = NULL,
bridge_burnin = bridge_burnin,
bridge_interval = bridge_interval,
bridge_sample_size = bridge_sample_size)
est_param <- list(eta = NULL,
eta_0 = NULL,
eta_grad = NULL,
eta_fun = NULL,
score_val = NULL,
NR_tol = NR_tol,
NR_iter = 1,
NR_max_iter = NR_max_iter,
NR_status = FALSE,
step_err = 0,
MCMLE_iter = 1,
MCMLE_max_iter = MCMLE_max_iter,
MCMLE_status = FALSE,
info_mat = NULL,
bridge_num = bridge_num,
adaptive_step_len = adaptive_step_len,
NR_step_len = step_len,
NR_step_len_multiplier = step_len_multiplier,
NR_conv_thresh = NULL,
MCMLE_conv_thresh = NULL,
par_flag = do_parallel,
par_n_cores = number_cores,
ML_status_fail = FALSE)
return(list(sim_param = sim_param,
est_param = est_param))
}
|
f56cbdd898fc00e5ff716fd67420c43b3767a289
|
4450235f92ae60899df1749dc2fed83101582318
|
/ThesisRpackage/tests/testthat/Sampler/test_Sampler_TrueSampler.R
|
ff5ff584f3878d390861baf3aafab9c91ce336b3
|
[
"MIT"
] |
permissive
|
cayek/Thesis
|
c2f5048e793d33cc40c8576257d2c9016bc84c96
|
14d7c3fd03aac0ee940e883e37114420aa614b41
|
refs/heads/master
| 2021-03-27T20:35:08.500966
| 2017-11-18T10:50:58
| 2017-11-18T10:50:58
| 84,567,700
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,604
|
r
|
test_Sampler_TrueSampler.R
|
library(testthat)
context("TrueDataSet")
test_that("TrueDataSet ind.clumping", {
s <- TrueSampler(G.file = matrix(1,3,3),
X.file = matrix(2,3,1),
outlier.file = NULL,
ind.clumping = c(2,3),
n = NULL,
L = NULL)
dat <- sampl(s)
})
test_that("TrueDataSet reference", {
s <- TrueSampler(G.file = matrix(1,3,3),
X.file = matrix(2,3,1),
outlier.file = NULL,
n = NULL,
L = NULL,
reference = TRUE)
dat <- sampl(s)
expect_equal(class(dat)[1], "TrueDataSet")
expect_equal(typeof(dat), "S4") ## this is a RC object !
})
test_that("TrueDataSet Case3", {
skip("Work only au labo")
n <- 100
L <- 3000
s <- TrueSampler(G.file = "./Data/SSMPG2015/Case3/Case3.lfmm",
X.file = "../Data/SSMPG2015/Case3/Case3.env",
outlier.file = "./Data/SSMPG2015/Case3/Case3.outlier",
n = n,
L = L)
dat <- sampl(s)
expect_equal(dim(dat$G), c(n, L))
expect_equal(dim(dat$X), c(n, 1))
})
test_that("TrueDataSet Case2", {
skip("Work only au labo")
s <- TrueSampler(G.file = "../../Data2016_2017/SSMPG2015/Case2/Case2.lfmm",
X.file = "../../Data2016_2017/SSMPG2015/Case2/Case2.env",
outlier.file = "../../Data2016_2017/SSMPG2015/Case2/Case2.outlier")
dat <- sampl(s)
expect_equal(dim(dat$G), c(517, 4542))
expect_equal(dim(dat$X), c(517, 1))
expect_equal(length(dat$outlier), 12)
})
|
2d78effe157b71a021f986b143ef07a037073362
|
b3a5c21adf890f0b66790f23332f0082e7f1b40a
|
/man/cli_par.Rd
|
632d7df96c6dc9fdda70cc43fed383df01ea293a
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
r-lib/cli
|
96886f849fe69f8435f2d22fccf5d00dee7a5ce4
|
c36066ca6a208edbeb37ab13467a4dc6f5b5bbe2
|
refs/heads/main
| 2023-08-29T14:19:41.629395
| 2023-08-18T13:18:33
| 2023-08-18T13:18:33
| 89,723,016
| 560
| 69
|
NOASSERTION
| 2023-09-13T11:46:10
| 2017-04-28T16:10:28
|
R
|
UTF-8
|
R
| false
| true
| 2,311
|
rd
|
cli_par.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cli.R
\name{cli_par}
\alias{cli_par}
\title{CLI paragraph}
\usage{
cli_par(id = NULL, class = NULL, .auto_close = TRUE, .envir = parent.frame())
}
\arguments{
\item{id}{Element id, a string. If \code{NULL}, then a new id is generated
and returned.}
\item{class}{Class name, sting. Can be used in themes.}
\item{.auto_close}{Whether to close the container, when the calling
function finishes (or \code{.envir} is removed, if specified).}
\item{.envir}{Environment to evaluate the glue expressions in. It is
also used to auto-close the container if \code{.auto_close} is \code{TRUE}.}
}
\value{
The id of the new container element, invisibly.
}
\description{
The builtin theme leaves an empty line between paragraphs.
See also \link{containers}.
}
\details{
\if{html}{\out{<div class="sourceCode r">}}\preformatted{clifun <- function() \{
cli_par()
cli_text(cli:::lorem_ipsum())
\}
clifun()
clifun()
}\if{html}{\out{</div>}}\if{html}{\out{
<div class="asciicast" style="color: #172431;font-family: 'Fira Code',Monaco,Consolas,Menlo,'Bitstream Vera Sans Mono','Powerline Symbols',monospace;line-height: 1.300000"><pre>
#> Sunt anim ullamco Lorem qui mollit anim est in deserunt adipisicing.
#> Enim deserunt laborum ad qui qui. Anim esse non anim magna Lorem
#> consequat dolore labore cupidatat magna et. Esse nulla eiusmod Lorem
#> exercitation cupidatat velit enim exercitation excepteur non officia
#> incididunt. Id laborum dolore commodo Lorem esse ea sint proident.
#>
#> Fugiat mollit in Lorem velit qui exercitation ipsum consectetur ad
#> nisi ut eu do ullamco. Mollit officia reprehenderit culpa Lorem est
#> reprehenderit excepteur enim magna incididunt ea. Irure nisi ad
#> exercitation deserunt enim anim excepteur quis minim laboris veniam
#> nulla pariatur. Enim irure aute nulla irure qui non. Minim velit
#> proident sunt sint. Proident sit occaecat ex aute.
#>
</pre></div>
}}
}
|
44c1fbfe628f0b6851ade0205c968313a2de3efc
|
e9c88f70d6a4cb2027eef741c5b86b13ff07142b
|
/run_models/data_SIM_model13A1_2020-03-11-16-22-51.R
|
ec9a718ac3bad1a29a48ad2247a7378a6125f8ab
|
[] |
no_license
|
zhouyifan233/covid_adjusted_cfr
|
947574828dcbee49cc49081c9d2aa4ca257e79c1
|
93e91366e348c8663e7e1106012f5e6779bb516f
|
refs/heads/master
| 2021-03-15T04:01:43.304869
| 2020-03-19T12:17:29
| 2020-03-19T12:17:29
| 246,822,443
| 0
| 0
| null | 2020-03-12T11:57:18
| 2020-03-12T11:57:17
| null |
UTF-8
|
R
| false
| false
| 4,145
|
r
|
data_SIM_model13A1_2020-03-11-16-22-51.R
|
age_dist <-
c(0.118574863476131, 0.115752003134772, 0.12863483070627, 0.158984676786142, 0.150148122961526, 0.154368236677233, 0.105371638917985, 0.0496721490268388, 0.0184934783131012)
agedistr_cases <-
c(416, 549, 3619, 7600, 8571, 10008, 8583, 3918, 1408)
agedistr_deaths <-
c(0, 1, 7, 18, 38, 130, 309, 312, 208)
com_dist <-
c(6.53431338642808e-05, 0.00398439059092551, 0.0342311061492189, 0.0540347228006328, 0.0768461219101128, 0.105538968643142, 0.127814406180783, 0.135180007040608, 0.132220911055837)
contact <-
c(0.810810810810811, 0.0908559523547268, 0.372736406439194, 1.27360250772, 0.200569529052988, 0.375083342749019, 0.60252680195839, 0.0934189610338407, 0.0225225225225225, 0.0904095466183592,
2.4392523364486, 0.140093983348316, 0.706545801082683, 0.942937990573664, 0.27920963239528, 0.326366336169345, 0.196893495540358, 0.106045179398683, 0.289504965045504, 0.109348487445688, 1.76086956521739,
0.923069180041088, 0.93772012267962, 0.965186137047983, 0.274120168579709, 0.116564256844925, 0.0773400190233669, 0.91820215964926, 0.511898453884688, 0.85680985412458, 2.70542635658915, 1.41323192857305,
0.993399938008648, 0.719603621821669, 0.146103509716984, 0.07633130138862, 0.13119227828341, 0.619819944222649, 0.789700390093264, 1.28218991206025, 2.17699115044248, 1.1199461877854, 0.514253349451317,
0.496466649026704, 0.101504389707241, 0.259078294801222, 0.193808465356441, 0.858341528544101, 0.951750199084178, 1.18265232149625, 2.31730769230769, 0.977037933291252, 0.606164987575222, 0.4393566902894,
0.552747314447092, 0.300880970126328, 0.323770338070664, 0.915670466885606, 0.721247101248993, 1.29765260904839, 2.76146788990826, 0.959867553314515, 0.340125585278128, 0.121809161946423, 0.25799743320884,
0.1956843612527, 0.264241585561661, 0.989672909331423, 1.14428055461657, 1.36428769674242, 1.96363636363636, 1.0266513139522, 0.0447824174066075, 0.211894911958445, 0.197988778289041, 0.210517772531686,
0.308554588199316, 1.26474943927563, 0.737190168823191, 1.56555579008225, 2.0625)
D <- 42
doprint <- 0
G <- 60
incidence_cases <-
c(20, 7, 27, 13, 40, 27, 40, 76, 67, 138, 125, 177, 230, 250, 414, 461, 616, 661, 844, 1286, 1484, 1893, 2339, 2609, 2484, 2598, 2553, 2454, 2203, 2143, 2009, 2879, 1744, 1483, 1267, 1108, 730,
622, 436, 292, 160, 83)
incidence_deaths <-
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 2, 3, 7, 7, 14, 13, 21, 23, 23, 33, 38, 40, 40, 50, 56, 58, 64, 64, 76, 78, 85, 95, 85)
inference <- 0
K <- 9
p_beta <- 1
p_chi <- 5
p_epsilon <-
c(1, 1)
p_eta <-
c(1, 1)
p_gamma <-
c(2.09684753737558e-06, 0.000132019123956822, 0.00115731464972174, 0.00421573276068498, 0.00965306291744055, 0.0168194489763876, 0.024629708930221, 0.0320774044389765, 0.038466025643508,
0.0434332294855151, 0.0468820275499212, 0.0488912688352894, 0.0496379101864372, 0.049340402878571, 0.0482221101716813, 0.0464902536577958, 0.0443256228724282, 0.0418791438632311, 0.0392724703278992, 0.036600689023525,
0.033935932400518, 0.0313311809537464, 0.02882386109063, 0.0264390480563363, 0.0241922058447767, 0.0220914653183689, 0.0201394776460742, 0.0183348958144401, 0.016673540897594, 0.0151493073537353, 0.0137548561213622,
0.0124821376447682, 0.0113227802437869, 0.0102683730150654, 0.0093106669586161, 0.00844171333319487, 0.0076539543301437, 0.00694027794364776, 0.00629404631365052, 0.00570910473160993, 0.00517977684070072,
0.00470085025288766, 0.0042675557784493, 0.00387554266292444, 0.00352085160572619, 0.00319988685620045, 0.00290938831624464, 0.00264640429948453, 0.00240826538606826, 0.00219255965408253, 0.00199710945140043,
0.00181994978596636, 0.00165930835072867, 0.00151358715591376, 0.0013813457115976, 0.00126128568415969, 0.00115223693849587, 0.00105314487181949, 0.000963058942894227, 0.000881122301460636)
p_incubation <- 5.95
p_infectious <- 2.4
p_nu <- 0.2
p_phi <- 0.01
p_pi <-
c(1, 999)
p_psi <-
c(71, 18)
p_rho <-
c(1, 1)
p_xi <- 1
pop_t <- 59020000
S <- 42
t_data <- 1
t0 <- 0
ts <-
c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42)
tswitch <- 20
|
3d410dcc00cb2646b1f89c27dd4462836ecfddcd
|
a779e45576dd8b6c3cb3667a5ccf6bb7a67eac89
|
/R/mergePalaeoData.R
|
3bfb43f0c4b7f706c062ffe53b66d9a74ce48fed
|
[] |
no_license
|
BlasBenito/memoria
|
f78ee1352dcf108bdf69b91a2df19918a3dd86cc
|
fee69a25eab74cf860eac292d64cc97b8c8463b0
|
refs/heads/master
| 2022-02-22T18:51:11.390613
| 2022-02-18T12:01:21
| 2022-02-18T12:01:21
| 179,102,027
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,233
|
r
|
mergePalaeoData.R
|
#' Merges palaeoecological datasets with different time resolution.
#'
#' @description It merges palaeoecological datasets with different time intervals between consecutive samples into a single dataset with samples separated by regular time intervals defined by the user
#'
#'
#' @usage mergePalaeoData(
#' datasets.list = NULL,
#' time.column = NULL,
#' interpolation.interval = NULL
#' )
#'
#' @param datasets.list list of dataframes, as in \code{datasets.list = list(climate = climate.dataframe, pollen = pollen.dataframe)}. The provided dataframes must have an age/time column with the same column name and the same units of time. Non-numeric columns in these dataframes are ignored.
#' @param time.column character string, name of the time/age column of the datasets provided in \code{datasets.list}.
#' @param interpolation.interval temporal resolution of the output data, in the same units as the age/time columns of the input data
#'
#' @details This function fits a \code{\link{loess}} model of the form \code{y ~ x}, where \code{y} is any column given by \code{columns.to.interpolate} and \code{x} is the column given by the \code{time.column} argument. The model is used to interpolate column \code{y} on a regular time series of intervals equal to \code{interpolation.interval}. All columns in every provided dataset go through this process to generate the final data with samples separated by regular time intervals. Non-numeric columns are ignored, and absent from the output dataframe.
#'
#' @author Blas M. Benito <blasbenito@gmail.com>
#'
#' @return A dataframe with every column of the initial dataset interpolated to a regular time grid of resolution defined by \code{interpolation.interval}. Column names follow the form datasetName.columnName, so the origin of columns can be tracked.
#'
#'
#' @examples
#'
#' \donttest{
#'#loading data
#'data(pollen)
#'data(climate)
#'
#'x <- mergePalaeoData(
#' datasets.list = list(
#' pollen=pollen,
#' climate=climate
#' ),
#' time.column = "age",
#' interpolation.interval = 0.2
#' )
#'
#' }
#'
#'@export
mergePalaeoData<-function(datasets.list = NULL,
time.column = NULL,
interpolation.interval = NULL){
#CHECKING datasets.list
#######################
if(inherits(datasets.list, "list") == FALSE){stop("The argument dataset.list must be a list. Try something like: datasets.list = list(climate = climate.dataframe, pollen = pollen.dataframe).")
} else {
if(length(datasets.list) < 2){stop("The argument dataset.list only has one object, there is nothing to merge here!")}
}
#checking each element in the list
for(i.list in 1:length(datasets.list)){
if(inherits(datasets.list[[i.list]], "data.frame") == FALSE){
stop(paste("Element ", i.list, " in datasets.list is not a dataframe.", sep=""))
} else {
if(!(time.column %in% colnames(datasets.list[[i.list]]))){
stop(paste("Element ", i.list, " in datasets.list does not have a column named ", time.column, sep=""))
}
}
}
#computing average temporal resolution of the datasets
message(paste("Argument interpolation.interval is set to ", interpolation.interval, sep=""))
for(i.list in 1:length(datasets.list)){
#getting time column
temp.time <- datasets.list[[i.list]][, time.column]
temp.diff <- vector()
for(i.time in 2:length(temp.time)){
temp.diff <- c(temp.diff, temp.time[i.time] - temp.time[i.time - 1])
}
temporal.resolution <- round(mean(temp.diff), 2)
resolution.increase.factor <- round(temporal.resolution / interpolation.interval, 2)
message(paste("The average temporal resolution of ", names(datasets.list)[i.list], " is ",temporal.resolution, "; you are incrementing data resolution by a factor of ",resolution.increase.factor, sep=""))
if(resolution.increase.factor > 10){
message("The resolution increase factor is higher than 10, please consider incrementing the value of the argument interpolation.interval.")
}
}
#computing age ranges
time.ranges<-sapply(datasets.list, FUN=function(x) range(x[, time.column]))
#min of maximum times
min.time<-round(max(time.ranges[1,]), 1)
#max of minimum times
max.time<-round(min(time.ranges[2,]), 1)
#subsetting dataframes in list
datasets.list<-lapply(datasets.list, function(x) x[x[, time.column] >= min.time & x[, time.column] <= max.time, ])
#reference data
reference.time <- seq(min.time, max.time, by=interpolation.interval)
#looping through datasets to interpolate
for (dataset.to.interpolate in names(datasets.list)){
#getting the dataset
temp <- datasets.list[[dataset.to.interpolate]]
#removing time/age from the colnames list
colnames.temp <- colnames(temp)
colnames.temp <- colnames.temp[which(colnames.temp != time.column)]
#empty dataset to store interpolation
temp.interpolated <- data.frame(time=reference.time)
#iterating through columns
for (column.to.interpolate in colnames.temp){
#do not interpolate non-numeric columns
if (is.numeric(temp[, column.to.interpolate]) == FALSE | column.to.interpolate == time.column){
next
}
#interpolation formula
interpolation.formula <- as.formula(paste(column.to.interpolate, "~", time.column, sep=" "))
#iteration through span values untill R-squared equals 0.9985 (R-squared equal to 1 may throw errors)
span.values <- seq(50/nrow(temp), 5/nrow(temp), by = -0.0005)
for(span in span.values){
interpolation.function <- loess(interpolation.formula, data = temp, span = span, control = loess.control(surface = "direct"))
#check fit
if(cor(interpolation.function$fitted, temp[, column.to.interpolate]) >= 0.99){break}
}
interpolation.result <- predict(interpolation.function, newdata=reference.time, se=FALSE)
#constraining the range of the interpolation result to the range of the reference data
interpolation.range<-range(temp[, column.to.interpolate])
interpolation.result[interpolation.result < interpolation.range[1]] <- interpolation.range[1]
interpolation.result[interpolation.result > interpolation.range[2]] <- interpolation.range[2]
#putting the interpolated data back in place
temp.interpolated[, column.to.interpolate]<-interpolation.result
}#end of iteration through columns
#removing the time column
temp.interpolated[, time.column]=NULL
#putting the data back in the list
datasets.list[[dataset.to.interpolate]] <- temp.interpolated
}#end of iterations through datasets
#same rows?
nrow.datasets <- sapply(datasets.list, FUN=function(x) nrow(x))
if(length(unique(nrow.datasets)) == 1){
#remove time from all dataframes
datasets.list<-lapply(datasets.list, function(x) { x[, "time"] <- NULL; x })
#put dataframes together
output.dataframe <- do.call("cbind", datasets.list) #changes names
} else {
stop("Resulting datasets don't have the same number of rows, there's something wrong with something.")
}
#add reference.age
output.dataframe <- data.frame(age=reference.time, output.dataframe)
return(output.dataframe)
}
|
6aba8a13273729994e07c4ea8983b8ae6816fb2f
|
f242ea29cfd9d5cdf3fafdc3e5f72b180c35c5a5
|
/data/scripts/feature-engineering-data-classical-methods.R
|
9175514ed46ca71c3cf257777a0e5675518ad75f
|
[] |
no_license
|
jtbai/act-7009-ml
|
df445e47b24a3cfec5cf5ccd7d3db2137bceff79
|
1516d16b4ef068feca8ff6cb3b571111b39d16c8
|
refs/heads/master
| 2020-03-14T05:07:21.941912
| 2018-04-21T01:38:28
| 2018-04-21T01:38:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,447
|
r
|
feature-engineering-data-classical-methods.R
|
# -------------------------------------------------------------------------
# Title: preprocess-data
# Goal: This script is aimed to functionalize the features engineering of the classic models
# Date: April 2018
# Author: Stéphane Caron
# -------------------------------------------------------------------------
# Features engineering ----------------------------------------------------
create_features_of_classical_modeling <- function(dt, split_variables){
if (split_variables){
# Number of tie break
data_featured <- copy(dt)[, nb_tie_break := str_count(score, "7")]
# Indicator game played to the limit of sets
data_featured[, ind_max_sets := ifelse((5 - (is.na(winner_score_1) + is.na(winner_score_2) + is.na(winner_score_3) + is.na(winner_score_4) + is.na(winner_score_5))) == best_of, TRUE, FALSE)]
# Indicator game played to the min of stes
data_featured[, ind_min_sets := FALSE]
data_featured[best_of == 3 & (5 - (is.na(winner_score_1) + is.na(winner_score_2) + is.na(winner_score_3) + is.na(winner_score_4) + is.na(winner_score_5))) == 2, ind_min_sets := TRUE]
data_featured[best_of == 5 & (5 - (is.na(winner_score_1) + is.na(winner_score_2) + is.na(winner_score_3) + is.na(winner_score_4) + is.na(winner_score_5))) == 3, ind_min_sets := TRUE]
# % of aces / number of points at serve
data_featured[, winner_ace_svpt := w_ace/w_svpt]
data_featured[, loser_ace_svpt := l_ace/l_svpt]
# % 1st serve won / first serve in
data_featured[, winner_1stwon_1stin := w_1stWon/w_1stIn]
data_featured[, loser_1stwon_1stin := l_1stWon/l_1stIn]
# % 1st serve in / svpt
data_featured[, winner_1stin_svpt := w_1stIn/w_svpt]
data_featured[, loser_1stin_svpt := l_1stIn/l_svpt]
# % df / svpt
data_featured[, winner_df_svpt := w_df/w_svpt]
data_featured[, loser_df_svpt := l_df/l_svpt]
# Minutes played / svpt
data_featured[, winner_min_svpt := minutes/w_svpt]
data_featured[, loser_min_svpt := minutes/l_svpt]
# % first serve won / server won
data_featured[, winner_1stwon_servewon := w_1stWon/(w_1stWon + w_2ndWon)]
data_featured[, loser_1stwon_servewon := l_1stWon/(l_1stWon + l_2ndWon)]
# % serve won / server won
data_featured[, winner_serve_won := (w_1stWon + w_2ndWon)/w_svpt]
data_featured[, loser_serve_won := (l_1stWon + l_2ndWon)/l_svpt]
# Number of breaks
data_featured[, winner_break_pts := (l_bpFaced - l_bpSaved)]
data_featured[, loser_break_pts := (w_bpFaced - w_bpSaved)]
# Missing data imputation -------------------------------------------------
# We replace ratios that have denominator of 0 by 0 instead of NaN
data_featured[w_1stIn == 0, winner_1stwon_1stin := 0]
data_featured[l_1stIn == 0, loser_1stwon_1stin := 0]
data_featured[(w_1stWon + w_2ndWon) == 0, winner_1stwon_servewon := 0]
data_featured[(l_1stWon + l_2ndWon) == 0, loser_1stwon_servewon := 0]
data_featured[w_svpt == 0, (c("winner_ace_svpt", "winner_1stin_svpt", "winner_df_svpt", "winner_min_svpt", "winner_serve_won")) := 0]
data_featured[l_svpt == 0, (c("loser_ace_svpt", "loser_1stin_svpt", "loser_df_svpt", "loser_min_svpt", "loser_serve_won")) := 0]
} else {
# Number of tie break
data_featured <- copy(dt)[, nb_tie_break := str_count(score, "7")]
# Indicator game played to the limit of sets
data_featured[, ind_max_sets := ifelse((5 - (is.na(winner_score_1) + is.na(winner_score_2) + is.na(winner_score_3) + is.na(winner_score_4) + is.na(winner_score_5))) == best_of, TRUE, FALSE)]
# Indicator game played to the min of stes
data_featured[, ind_min_sets := FALSE]
data_featured[best_of == 3 & (5 - (is.na(winner_score_1) + is.na(winner_score_2) + is.na(winner_score_3) + is.na(winner_score_4) + is.na(winner_score_5))) == 2, ind_min_sets := TRUE]
data_featured[best_of == 5 & (5 - (is.na(winner_score_1) + is.na(winner_score_2) + is.na(winner_score_3) + is.na(winner_score_4) + is.na(winner_score_5))) == 3, ind_min_sets := TRUE]
# % of aces / number of points at serve
data_featured[, ace_by_svpt := (w_ace + l_ace)/(w_svpt + l_svpt)]
# % 1st serve won / first serve in
data_featured[, first_won_by_first_in := (w_1stWon + l_1stWon)/(w_1stIn + l_1stIn)]
# % 1st serve in / svpt
data_featured[, first_in_by_svpt := (w_1stIn + l_1stIn)/(w_svpt + l_svpt)]
# % df / svpt
data_featured[, df_by_svpt := (w_df + l_df)/(w_svpt + l_svpt)]
# Minutes played / svpt
data_featured[, min_by_svpt := minutes/(w_svpt + l_svpt)]
# % first serve won / server won
data_featured[, first_won_by_serve_won := (w_1stWon + l_1stWon)/(w_1stWon + w_2ndWon + l_1stWon + l_2ndWon)]
# % serve won / server won
data_featured[, serve_won_by_serve_pts := (w_1stWon + w_2ndWon + l_1stWon + l_2ndWon)/(w_svpt + l_svpt)]
# Number of breaks
data_featured[, nb_break_pts := (w_bpFaced - w_bpSaved) + (l_bpFaced - l_bpSaved)]
# Missing data imputation -------------------------------------------------
# We replace ratios that have denominator of 0 by 0 instead of NaN
data_featured[(w_1stIn + l_1stIn) == 0, first_won_by_first_in := 0]
data_featured[(w_1stWon + w_2ndWon + l_1stWon + l_2ndWon) == 0, first_won_by_serve_won := 0]
data_featured[(w_svpt + l_svpt) == 0, (c("ace_by_svpt", "first_in_by_svpt", "df_by_svpt", "min_by_svpt", "serve_won_by_serve_pts")) := 0]
}
# Create diff between scores of each sets
# data_featured[, difference_score_set_1 := ifelse(is.na(winner_score_1) | is.na(loser_score_1), 0, winner_score_1 - loser_score_1)]
# data_featured[, difference_score_set_2 := ifelse(is.na(winner_score_2) | is.na(loser_score_2), 0, winner_score_2 - loser_score_2)]
# data_featured[, difference_score_set_3 := ifelse(is.na(winner_score_3) | is.na(loser_score_3), 0, winner_score_3 - loser_score_3)]
# data_featured[, difference_score_set_4 := ifelse(is.na(winner_score_4) | is.na(loser_score_4), 0, winner_score_4 - loser_score_4)]
# data_featured[, difference_score_set_5 := ifelse(is.na(winner_score_5) | is.na(loser_score_5), 0, winner_score_5 - loser_score_5)]
variables_scores <- c(paste0("winner_score_", seq(1, 5)), paste0("loser_score_", seq(1, 5)))
data_featured[, (variables_scores) := NULL]
return(data_featured)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.