blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae92476477cc9f4bf28339e20e7c69268ccd3422
|
8f56589e11ae57523a7d8011386738a53999a4e1
|
/man/Grey-palette.Rd
|
e5bda785bbd309a0e988e7087d0f3b146e831ef1
|
[] |
no_license
|
grattaninstitute/grattan
|
b7b5c738401850fc67f03f4003f3715f11b27406
|
56fac9813308cf695d850be84adddad61b50d774
|
refs/heads/master
| 2020-05-20T22:23:58.720935
| 2015-06-27T02:12:23
| 2015-06-27T02:12:23
| 38,221,337
| 0
| 0
| null | 2015-06-29T01:28:33
| 2015-06-29T01:28:33
| null |
UTF-8
|
R
| false
| false
| 197
|
rd
|
Grey-palette.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/grey.pal.R
\name{Grey palette}
\alias{Grey palette}
\title{The grey palette}
\description{
The grey palette
}
|
40bb96f1e056348b4abe8040974bc83cead612aa
|
3c1e397114cc63d7a75ee2e8ea68fd23b65e5577
|
/Projeto DataScience IDD/DataScienceIDD/microdados_idd_2015/microdados_idd_2015/3.Input/input_idd.R
|
8c4538248ae7781a6bdb78b66e47099f37f04897
|
[] |
no_license
|
edsonbrendon/data-science-idd
|
a70ec205ffc41db2d095bc551814dd5f64bf55ef
|
cafc25d3448e5945caa8bfc2491e38795486806e
|
refs/heads/master
| 2022-03-22T12:45:24.343837
| 2019-12-09T13:43:06
| 2019-12-09T13:43:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,343
|
r
|
input_idd.R
|
##################################################################################
# MEC/INEP/DAES (Diretoria de Avaliação da Educação Superior) #
# Coordenação Geral de Controle de Qualidade da Educação Superior #
#--------------------------------------------------------------------------------#
# Programa: #
# input_enade.R (Pasta "INPUTS") #
#--------------------------------------------------------------------------------#
# Descrição: #
# Programa para Leitura dos Microdados do Idd 2015 #
# #
#********************************************************************************#
# Obs: Para executar este programa é necessário salvar o arquivo #
# "microdados_idd_2015.csv" (Pasta "DADOS") no diretório "C:\" do computador. #
# #
#********************************************************************************#
microdados_enade <- read.table("C:/microdados_idd_2015.csv",header=T,sep=";")
|
bc56d161459169057e127f97e00febe557caefbd
|
f2f2bcd6ac1d7c385ed477daf4fc09476dfbc616
|
/us_citizen.R
|
9e491ed10f76d43eeb9b04da407a967f1a8e727f
|
[] |
no_license
|
AnuC2/academic_projects
|
459c29fb56dd66e508c65a0b8ed0c00342c17d03
|
38455d80b0d6c17020d8b643ae64416c61c602f5
|
refs/heads/master
| 2020-05-03T07:41:53.650496
| 2019-06-11T04:17:58
| 2019-06-11T04:17:58
| 178,505,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,907
|
r
|
us_citizen.R
|
# Core Tidyverse
library(tidyverse)
library(lubridate)
library(glue)
library(forcats)
library(magrittr)
library(scales)
# Visualization
library(ggplot2)
library(cowplot)
library(scales)
library(vcd)
library(grid)
df<- read.csv("usa.csv")
df1<- df[,c("student_id","age","full_part","classification","appl_response","gpa","educ","exper","Sex")]
head(df1)
summary(df1)
######## cleaning gpa variable ######
sum(is.na(df1$gpa))
# Check distribution of one field
df1["gpa"] %>%
gather() %>%
ggplot() +
geom_point(mapping = aes(x = value,y=df1$gpa)) +
facet_wrap(~ key, scales = "free")
## replace na with mean value
df1$gpa[is.na(df1$gpa)] = mean(df$gpa, na.rm=TRUE)
##outlier of gpa variable
##plot the above plot again - saw two outliers, moving them to mean
??squish
df1$gpa <- squish(df1$gpa,round(quantile(df1$gpa,c(0.05,0.95))))
summary(df$gpa)
####clean level of education variable
summary(df$education)
# Check distribution of one field
df["education"] %>%
gather() %>%
ggplot() +
geom_point(mapping = aes(x = value,y=df$education)) +
facet_wrap(~ key, scales = "free")
## number of empty cells
sum(is.na(df1))
## change all categorical variables to factors
df1<-transform(df1, full_part.f = as.factor(full_part))
df1<-transform(df1, classification.f = as.factor(classification))
df1<-transform(df1, appl_response.f = as.factor(appl_response))
df1<-transform(df1, educ.f = as.factor(educ))
df1<-transform(df1, sex.f = as.factor(Sex))
## check for str
str(df1)
## lpm run
lpm=lm( classification~ age+gpa+educ.f+sex.f+exper, data=df1)
summary(lpm)
## logit run
logit <- glm(classification~ age+gpa+educ.f+sex.f+exper, family = binomial(link = "logit"), data = df1)
summary(logit)
### probit run
probit <- glm(classification~ age+gpa+educ.f+sex.f+exper, family = binomial(link = "probit"), data = df1)
summary(probit)
## logit with only one variable
df2<- df[,c("classification","gpa")]
df2$gpa[is.na(df2$gpa)] = mean(df$gpa, na.rm=TRUE)
df2$gpa <- squish(df2$gpa,round(quantile(df2$gpa,c(0.05,0.95))))
logit1 <- glm(classification~ gpa, family = binomial(link = "logit"), data = df2)
summary(logit1)
## Plotting all regressions:-
## logit curve with gpa variables:-
quartz(title="admission vs. gpa")
plot(df2$gpa,df2$classification,xlab="gpa",ylab="Probability")
curve(predict(logit1,data.frame(gpa=x),type="resp",col="blue"),add= TRUE)
# 2-way contingency tables
xtabs(~education + gpa, data = df)
df1<- cbind(df$gpa,df$education)
cotab_mosaic(x=df1,condvars = NULL)
??mosaic
??cotab_mosaic
## decision variable classification
summary(df$classification)
## removing rows with classification = 2
## Sample regression:
ols<-lm(df$classification~df$age + df$gpa + df$education)
summary(ols)
logit1 <- glm(df$classification~df$age + df$gpa + df$education,family ="binomial")
|
48cad6fdebbb20b0e7d30efc4bc52cdbbe6ebc5e
|
408212ab41a932e3d8f9cd6b0db64678040be63d
|
/vignettes/precompile.R
|
e8ab88e8b69f5a05133d018c3d1d00f5e11857ee
|
[
"MIT"
] |
permissive
|
ropensci/phruta
|
802ed58510e8cffee3316e9a001c47d70ddafff3
|
0000972691748baa5d178c491b660aec18b9ba39
|
refs/heads/main
| 2023-05-22T13:41:46.126313
| 2023-05-02T18:41:44
| 2023-05-02T18:41:44
| 391,212,019
| 3
| 0
|
NOASSERTION
| 2023-01-11T16:06:57
| 2021-07-30T23:51:38
|
R
|
UTF-8
|
R
| false
| false
| 892
|
r
|
precompile.R
|
# Precompiled vignettes
knitr::knit("vignettes/Exporting_data_phruta.Rmd.orig", "vignettes/Exporting_data_phruta.Rmd")
knitr::knit("vignettes/Future_phruta.Rmd.orig", "vignettes/Future_phruta.Rmd")
knitr::knit("vignettes/Phruta_advanced.Rmd.orig", "vignettes/Phruta_advanced.Rmd")
knitr::knit("vignettes/phruta_targetgenes.Rmd.orig", "vignettes/phruta_targetgenes.Rmd")
knitr::knit("vignettes/phruta.Rmd.orig", "vignettes/phruta.Rmd")
knitr::knit("vignettes/Phylogenetics_phruta.Rmd.orig", "vignettes/Phylogenetics_phruta.Rmd")
knitr::knit("vignettes/Future_phruta.Rmd.orig", "vignettes/Future_phruta.Rmd")
knitr::knit("vignettes/usando_phruta.Rmd.orig", "vignettes/usando_phruta.Rmd")
knitr::knit("vignettes/Using_phruta.Rmd.orig", "vignettes/Using_phruta.Rmd")
# Move figures into vignettes/ folder
#figs <- list.files(pattern = ".jp")
#fs::file_move(figs, fs::path("vignettes/", figs))
|
ad90b574eb2ca0c80184fd106e42503e22e0b02e
|
c8b609bf58dab1a383bbea8b43a7bc2708adcb38
|
/R/contactsimulator-package.R
|
394de7e84c5280b66864f54e935a79b79be8ac9e
|
[] |
no_license
|
holaanna/contactsimulator
|
ce788627c12323c4ab6b3aa902da26bf3e2e4cf5
|
8bcd3f01e0bbe5fb7328d9f6beb27eb907779bdd
|
refs/heads/master
| 2022-03-17T03:25:18.841897
| 2019-11-26T18:33:29
| 2019-11-26T18:33:29
| 111,702,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,343
|
r
|
contactsimulator-package.R
|
##' Inference for spatio-temporal contact type models
##'
##' The \pkg{contactsimulator} package provides facilities for inference on spatial-temporal
##' data using contact type distribution models.
##' These models are also known as state-space models, hidden Markov models, or
##' nonlinear stochastic dynamical systems. One can use \pkg{contactsimulator} to fit
##' nonlinear, non-Gaussian dynamic models to time-series data. The package is
##' both a set of tools for data analysis and a platform upon which statistical
##' inference methods for contacts models can be implemented.
##'
##' @name contactsimulator-package
##' @aliases contactsimulator,package contactsimulator-package
##' @docType package
##' @author Hola Adrakey
##' @family information on model implementation
##' @family contactsimulator parameter estimation methods
##' @family elementary contact methods
##'
##' @section Data analysis using \pkg{contactsimulator}:
##' \pkg{contactsimulator} provides algorithms for
##' \enumerate{
##' \item simulation of stochastic
##' dynamical systems; see \code{\link[=simulate-contactsimulator]{simulate}}
##' }
##' The package
##' also provides various tools for plotting and extracting information on
##' models and data.
#' @importFrom Rdpack reprompt
#'
#' @useDynLib contactsimulator
#' @importFrom Rcpp sourceCpp
NULL
|
3af02537cd935851607f38038acb38612711b105
|
1bf642761ba389da2f5fbcaef59aec45ea939fa5
|
/01_code/00_load_packages.R
|
ecf20c423b48293d7f07117a01680fd7a26af7c3
|
[] |
no_license
|
jaspercooper/terror_von_schirach
|
24c04df48802191a74d5b2529221f51f45d17069
|
c897e75c29fb48c3489377b5aca801218cceada6
|
refs/heads/master
| 2021-01-20T01:59:48.287914
| 2017-10-24T20:04:03
| 2017-10-24T20:04:03
| 89,358,856
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 749
|
r
|
00_load_packages.R
|
# Load packages -----------------------------------------------------------
packages <- c("dplyr",
"ggplot2",
"knitr",
"stargazer",
"rvest",
"tsModel",
"lmtest",
"Epi",
"splines",
"vcd",
"rdd",
"magrittr",
"ggthemes",
"ggrepel",
"memisc",
"pander"
)
package_installed <-
sapply(packages, function(pack)
pack %in% rownames(installed.packages()))
if (any(!package_installed)) {
sapply(packages[!package_installed], install.packages)
}
sapply(packages, require, character.only = TRUE)
rm(packages,package_installed)
|
a135d5e70254979824cf717efc828a723e7b3045
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/6453_0/rinput.R
|
ea96284aba35b75d40f47935430b4a191d7ae1ad
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("6453_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6453_0_unrooted.txt")
|
9853a2be6b4eed2dda623f850273e9e45a387276
|
7d811e8081b82aadf1f20d69dca568a1d3ebe2cb
|
/scripts/r/us_languages_functions.R
|
5abec3fe39df0b10ed35ccd10b9220f6734ca40b
|
[] |
no_license
|
pagepiccinini/us_languages
|
aa4e84edbce7def04d6bf9fe3793111196ffc3c6
|
41e645f5a6c9374ed155d7290b999116e04e8115
|
refs/heads/master
| 2021-01-20T04:51:08.019241
| 2017-04-29T17:46:41
| 2017-04-29T17:46:41
| 89,745,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,266
|
r
|
us_languages_functions.R
|
# Make map with percentage non-English over time
percnoneng_map = function(data, plot_year) {
data %>%
filter(year == plot_year) %>%
ggplot(aes(x = long, y = lat, group = group, fill = perc_noneng)) +
# Add title
ggtitle(paste0("Year ", plot_year)) +
# Draw states as polygons with white borders between states
geom_polygon(color = "white") +
# Change legend title and colors
scale_fill_distiller("percentage non-English\nlanguage", palette = "PuBu",
direction = 1, limits = c(0, 50)) +
# Update map projection to match most maps
coord_map(projection = "polyconic") +
# Remove axes and background
theme_void() +
# Move legend position and increase text size
theme(legend.position = "top",
text = element_text(size = 16))
}
# Make map for top X language
top_map = function(data, plot_year, number) {
data %>%
filter(ranking == number) %>%
filter(year == plot_year) %>%
ggplot(aes(x = long, y = lat, group = group, fill = language, alpha = percentage)) +
# Add title
ggtitle(paste0("Top ", number, " Non-English Language for ", plot_year),
subtitle = "Darkness function of percentage of speakers") +
# Draw states as polygons with white borders between states
geom_polygon(color = "white") +
# Update legend
guides(fill = guide_legend(nrow = round(length(unique(levels(data$language))) / 10, 0))) +
# Set limits for alpha and supress legend
scale_alpha(guide = 'none', limits = c(0, 30)) +
# Update map projection to match most maps
coord_map(projection = "polyconic") +
# Remove axes and background
theme_void() +
# Move legend position and increase text size
theme(legend.position = "top",
text = element_text(size = 16))
}
# Make barplot for top X language
top_barplot = function(data, number) {
data %>%
filter(ranking == number) %>%
ggplot(aes(x = language, fill = language)) +
facet_wrap(~year) +
geom_bar() +
ggtitle(paste0("Number of States Where Langauge ", number, " Most Common\n(after English)")) +
xlab("Language") +
ylab("Number of states") +
guides(fill = guide_legend(nrow = round(length(unique(levels(data$language))) / 10, 0))) +
theme_classic() +
theme(text = element_text(size = 16),
legend.position = "top",
axis.text.x = element_blank(), axis.ticks.x = element_blank())
}
# Map map for specific language
specificlg_map = function(data, plot_year, specific_language) {
data %>%
filter(year == plot_year) %>%
filter(language == specific_language) %>%
ggplot(aes(x = long, y = lat, group = group, fill = percentage)) +
# Add title
ggtitle(paste0("Percentage of ", specific_language, " in ", plot_year)) +
# Draw states as polygons with white borders between states
geom_polygon(color = "white") +
# Change legend title and colors
scale_fill_distiller(palette = "RdPu", direction = 1) +
# Update map projection to match most maps
coord_map(projection = "polyconic") +
# Remove axes and background
theme_void() +
# Move legend position and increase text size
theme(legend.position = "top",
text = element_text(size = 16))
}
|
41608c3e3c47c07f340fb9aea92e74f761931ee9
|
94f6936109575030a1c264bda8e7c59978e46982
|
/ch01/1_1_Variable.R
|
6a4e1c6ef411a3f04960f8636226be6dfef76231
|
[] |
no_license
|
hungry4therock/my-r
|
c5a9bc0b3afb7c8d059fd3a32a6f9cf5aca13ef3
|
9f78fcb8eba394fa6bd64a56306e1f9a62fea879
|
refs/heads/master
| 2023-06-11T16:35:21.320434
| 2021-07-08T07:01:05
| 2021-07-08T07:01:05
| 384,037,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 558
|
r
|
1_1_Variable.R
|
#날짜:2021-06-28
#이름:최현진
#변수 - 하나 이상의 값을 갖는 변수,배열열
num1 <- 1
num2 <- 2
print(num1)
print(num2)
str1<-'hello r!'
str2<-'안녕 R!'
str1
str2
#스칼라(scala) 변수 - 한개의 값을 갖는 변수, 일반적인 변수
var1<-1
var2<-2
var3<- var1+var2
var3
#벡터(vector) 변수
x<-c(1,2,3)
y<-c('김유신','김춘추','장보고','강감찬','이순신')
x
y
x[1]
y[2]
#팩터(factor) 변수 - 문자열이 저장되는 벡터, 범주형 데이터에 사용
str <- c('A','B','C','D','E')
ft<-factor(str)
f
|
570e242215babf87abc39b1fc52836639f863a94
|
b851275f632ab2745d9cd7982712b2fda66b631a
|
/R/get_portfolio.r
|
e560960f7224487ef17b62bb66765d241c10f291
|
[] |
no_license
|
jguszcza/stockvis2
|
a0bf9b78ffee9a4ee51121e46b44c5e276ec8100
|
14b2449f28b7c39fe479edc1081c660767f3ab57
|
refs/heads/master
| 2016-09-16T00:37:23.496895
| 2012-12-04T22:10:51
| 2012-12-04T22:10:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,972
|
r
|
get_portfolio.r
|
#' Get historical daily returns for a portfolio of stocks or indexes
#'
#' get_portfolio retrieves historical returns data from the yahoo finance API
#' for an equal weighted portfolio of stocks. Daily closing prices are retrieved
#' for the portfolio for the investment period that begins on start and ends on
#' end. Prices are converted into returns as a proportion of the portfolio's
#' value on start.
#'
#' get_portfolio differs from \code{\link{get_stock}} in that get_portfolio
#' provides a single set of returns: the returns on a portfolio made by
#' investing equally in each stock within the portfolio on start and never
#' rebalancing. get_stocks provides an individual set of returns for each stock.
#'
#' @seealso \code{\link{get_stock}}, \code{\link{get_returns}}
#'
#' @param tickers A vector of stock ticker symbols. get_portfolio will generate
#' a portfolio that invests equal amounts of money in each of these stocks on
#' start and never rebalances. Ticker symbols should comply with URL encoding
#' standards for special characters.
#'
#' @param start A date or character string that marks the beginning of the
#' investment period. Defaults to "2007-12-01".
#'
#' @param end A date or character string that marks the end of the investment
#' period. end must occur after start. Defaults to "2012-12-01".
#'
#' @param name An optional name to give the portfolio in the results. Must be
#' a character string. Defaults to "portfolio."
#'
#' @return get_portfolio returns a data frame arranged in the tidy format.
#' The data frame shows the daily values of the portfolio as a proportion of the
#' initial investment.
#' @export
get_portfolio <- function(tickers, start = "2007-12-01", end = "2012-12-01",
name = "portfolio"){
returns <- get_stocks(tickers = tickers, start = start, end = end)
returns <- ddply(returns, "date", summarise, return = mean(return))
returns$.id <- name
returns
}
|
48d03673d4fa64c4bdbe764a62eacda98b11437b
|
f3d37dbf97b62635de9a59d4eb414cc92c73fc09
|
/man/verbose.Rd
|
3df6d329f49f55376611b132ee3e9ef7f7bdfd4d
|
[
"MIT"
] |
permissive
|
r-lib/httr
|
72b44493b68f597e47bd458072cd580954985943
|
7aa629fa53668669dd2a51012877cddc96c64003
|
refs/heads/main
| 2023-08-21T00:00:14.223974
| 2023-08-15T18:19:54
| 2023-08-15T18:19:54
| 2,756,403
| 444
| 684
|
NOASSERTION
| 2023-08-12T17:36:56
| 2011-11-11T15:05:00
|
R
|
UTF-8
|
R
| false
| true
| 2,014
|
rd
|
verbose.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verbose.r
\name{verbose}
\alias{verbose}
\title{Give verbose output.}
\usage{
verbose(data_out = TRUE, data_in = FALSE, info = FALSE, ssl = FALSE)
}
\arguments{
\item{data_out}{Show data sent to the server.}
\item{data_in}{Show data recieved from the server.}
\item{info}{Show informational text from curl. This is mainly useful
for debugging https and auth problems, so is disabled by default.}
\item{ssl}{Show even data sent/recieved over SSL connections?}
}
\description{
A verbose connection provides much more information about the flow of
information between the client and server.
}
\section{Prefixes}{
\code{verbose()} uses the following prefixes to distinguish between
different components of the http messages:
\itemize{
\item \code{*} informative curl messages
\item \verb{->} headers sent (out)
\item \verb{>>} data sent (out)
\item \verb{*>} ssl data sent (out)
\item \verb{<-} headers received (in)
\item \verb{<<} data received (in)
\item \verb{<*} ssl data received (in)
}
}
\examples{
\dontrun{
GET("http://httpbin.org", verbose())
GET("http://httpbin.org", verbose(info = TRUE))
f <- function() {
GET("http://httpbin.org")
}
with_verbose(f())
with_verbose(f(), info = TRUE)
# verbose() makes it easy to see exactly what POST requests send
POST_verbose <- function(body, ...) {
POST("https://httpbin.org/post", body = body, verbose(), ...)
invisible()
}
POST_verbose(list(x = "a", y = "b"))
POST_verbose(list(x = "a", y = "b"), encode = "form")
POST_verbose(FALSE)
POST_verbose(NULL)
POST_verbose("")
POST_verbose("xyz")
}
}
\seealso{
\code{\link[=with_verbose]{with_verbose()}} makes it easier to use verbose mode
even when the requests are buried inside another function call.
Other config:
\code{\link{add_headers}()},
\code{\link{authenticate}()},
\code{\link{config}()},
\code{\link{set_cookies}()},
\code{\link{timeout}()},
\code{\link{use_proxy}()},
\code{\link{user_agent}()}
}
\concept{config}
|
f14bad39d7f077eb1e467007239d28c800c41087
|
24431275baa11c3d471cd2838d508e14a5fc3037
|
/man/dot-new_soil_layer.Rd
|
8c32b897871f252d7959395b9520767ace38d05f
|
[
"MIT"
] |
permissive
|
brownag/soilvctrs
|
c6848ff6476658689a2a8ce715d05b6e2e6365c6
|
d4963236c5fd8e4bc37f8634d36a15df7374a833
|
refs/heads/master
| 2022-12-01T19:14:33.731567
| 2020-08-14T08:28:36
| 2020-08-14T08:33:46
| 281,048,740
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 768
|
rd
|
dot-new_soil_layer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/soil_layer.R
\name{.new_soil_layer}
\alias{.new_soil_layer}
\title{Internal constructor for \code{soil_layer} object}
\usage{
.new_soil_layer(
x = list(pid = integer(), hid = integer(), geom = geovctrs::geo_rect()),
metadata = list(pid = "pid", hid = "hid", ztop = "ztop", zbot = "zbot")
)
}
\arguments{
\item{x}{A named list containing, at a minimum, \code{pid}, \code{profile}, and \code{geom} named vectors.}
}
\value{
A \code{soil_layer}
}
\description{
Requires minimum 3 vectors (all of same length) in a named list \code{x}, with following \code{name : class}:
}
\details{
- \code{pid : character}
- \code{hid : integer}
- \code{geom : geovctrs::geo_rect}
}
\keyword{internal}
|
358900e920e7390c0117736298e5b2cd8b809fc6
|
fe7788c1e4eba9b2668835da8535b05fcc94e32b
|
/Bin/Rscript/GR1.r
|
faaf9285ef982d6ea16d85d2e6ee35e5b402bcca
|
[] |
no_license
|
yiliao1022/Pepper3Dgenome
|
77045dd1480abdfe217d47f7c20ff360c080108b
|
d4a8bc6e181eba45db1dff405f3a179fe4e9b99c
|
refs/heads/main
| 2023-04-14T20:07:09.192776
| 2022-05-30T04:34:10
| 2022-05-30T04:34:10
| 338,714,255
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 268
|
r
|
GR1.r
|
library(CALDER)
contact_mat_file_LJGR1 = "/home/yiliao/OLDISK/genome_assembly/hic_explorer/13_bnbc/100k/Chr12/matrixGR1.Chr12.csv.txt"
CALDER_main(contact_mat_file_LJGR1, chr=12, bin_size=10E4, out_dir='./Chr12_LJGR1', sub_domains=TRUE, save_intermediate_data=FALSE)
|
e171f22946a1693e96d049de916d8a1a754cbb1c
|
a6cf1c32b434836a4097d3a5567176914837a7ac
|
/R_genomics/fit_genie3.R
|
04907f38a844e5b09cef1f41ea294cc5dd885747
|
[
"CC0-1.0"
] |
permissive
|
limorigu/ComplexCauses
|
55b146f256f690138dfd1f09767982b2b82d3b76
|
e047bea494329e4c4ca0f124c1a44daf900055df
|
refs/heads/main
| 2023-05-26T03:34:21.426187
| 2021-06-10T19:46:15
| 2021-06-10T19:46:15
| 374,762,328
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
fit_genie3.R
|
# Load libraries, register cores
library(data.table)
library(randomForest)
library(doMC)
registerDoMC(8)
# Set seed
set.seed(42, kind = "L'Ecuyer-CMRG")
# Import data, downloaded from:
# http://dreamchallenges.org/project/dream-5-network-inference-challenge/
mat <- as.matrix(fread('net3_expression_data.tsv'))
# Scale (per Huyn-Thu et al., 2010)
mat <- scale(mat)
# First 334 genes are transcription factors, rest are not
x <- mat[, seq_len(334)]
y <- mat[, 335:ncol(mat)]
# Loop through, with hyperparameters as in Huyn-Thu et al., 2010
rf_loop <- function(gene) {
f <- randomForest(x, y[, gene], ntree = 1000, mtry = floor(sqrt(334)),
importance = TRUE)
saveRDS(f, paste0('genie3_models/G', 334 + gene, '.rds'))
out <- f$importance[, 2]
return(out)
}
# Execute in parallel, save to disk
imp <- foreach(g = seq_len(ncol(y)), .combine = rbind) %dopar%
rf_loop(g)
fwrite(imp, 'adj_mat.csv')
|
18bfc90335f58687d4d7b47fd1482d2c5e6b8c6a
|
f52a881c1b5a6755c8eb46feb62adbb9302b9dfd
|
/download_statistics/download_stats.R
|
156d10e1ac1551e5fe8625a2de4e48e41024194e
|
[] |
no_license
|
CAPLTER/informatics-documentation
|
dc86ce97ee0daa64c5835ff51810077264f2929a
|
754f2a4cb4fd6460c62f5c5af1f1959aed2453c2
|
refs/heads/master
| 2022-12-11T00:27:33.032081
| 2022-11-27T19:14:07
| 2022-11-27T19:14:07
| 102,893,896
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,678
|
r
|
download_stats.R
|
# libraries
library(tidyverse)
library(RSQLite)
# options
options(dplyr.summarise.inform = FALSE)
# GIOS
source("~/Documents/localSettings/mysql_prod.R")
dbGetQuery(mysql, "SELECT * FROM gios2_production.dataset_access_log ORDER BY id DESC LIMIT 3 ;")
gios_log <- dbGetQuery(mysql, "
SELECT
CAST(datasets.package_id AS char) AS pid,
CAST(dal.file AS char) AS file,
dal.created
FROM gios2_production.dataset_access_log dal
LEFT JOIN gios2_production.dataset_files df ON (df.url = dal.file)
LEFT JOIN gios2_production.datasets ON (datasets.id = df.dataset_id)
WHERE
datasets.data_access IN ('PUBLIC', 'CAPLTER') AND
datasets.package_id NOT LIKE 'msb-%'
;")
# for viewing and error checking tally
# gios_log %>%
# mutate(
# created = as.POSIXct(created, format = "%Y-%m-%d %H:%M:%S"),
# created = as.Date(created),
# year = lubridate::year(created)
# ) %>%
# mutate(
# dataset = str_extract(
# string = str_extract(
# string = pid,
# pattern = "(?:knb-lter-cap\\.)([0-9]+)"
# ),
# pattern = "[0-9]+"
# )
# )
gios_log <- gios_log %>%
mutate(
created = as.POSIXct(created, format = "%Y-%m-%d %H:%M:%S"),
created = as.Date(created),
year = lubridate::year(created)
) %>%
group_by(pid, file, year) %>%
summarise(
count = n()
) %>%
ungroup() %>%
group_by(year, pid) %>%
summarise(
accessed_at_least = max(count, na.rm = TRUE)
) %>%
ungroup() %>%
mutate(
dataset = str_extract(
string = str_extract(
string = pid,
pattern = "(?:knb-lter-cap\\.)([0-9]+)"
),
pattern = "[0-9]+"
)
) %>%
group_by(year, dataset) %>%
summarise(
touches = sum(accessed_at_least)
) %>%
ungroup() %>%
mutate(dataset = as.integer(dataset)) %>%
arrange(year, dataset)
# exclude datasets that are not in EDI
gios_log <- gios_log %>%
filter(dataset %in% unique(entities_summary$dataset))
# match temporal range of EDI
# gios_log <- gios_log %>%
# filter(year >= 2015)
# EDI
con15 <- dbConnect(
RSQLite::SQLite(),
"~/Dropbox/development/dataset_statistics/year_2015/knb-lter-cap.sqlite"
)
con16 <- dbConnect(
RSQLite::SQLite(),
"~/Dropbox/development/dataset_statistics/year_2016/knb-lter-cap.sqlite"
)
con17 <- dbConnect(
RSQLite::SQLite(),
"~/Dropbox/development/dataset_statistics/year_2017/knb-lter-cap.sqlite"
)
con18 <- dbConnect(
RSQLite::SQLite(),
"~/Dropbox/development/dataset_statistics/year_2018/knb-lter-cap.sqlite"
)
con19 <- dbConnect(
RSQLite::SQLite(),
"~/Dropbox/development/dataset_statistics/year_2019/knb-lter-cap.sqlite"
)
con20 <- dbConnect(
RSQLite::SQLite(),
"~/Dropbox/development/dataset_statistics/year_2020/knb-lter-cap.sqlite"
)
dbListTables(con19)
entities <- rbind(
dbGetQuery(con15, "SELECT * FROM entities ;") %>%
mutate(year = 2015),
dbGetQuery(con16, "SELECT * FROM entities ;") %>%
mutate(year = 2016),
dbGetQuery(con17, "SELECT * FROM entities ;") %>%
mutate(year = 2017),
dbGetQuery(con18, "SELECT * FROM entities ;") %>%
mutate(year = 2018),
dbGetQuery(con19, "SELECT * FROM entities ;") %>%
mutate(year = 2019),
dbGetQuery(con20, "SELECT * FROM entities ;") %>%
mutate(year = 2020)
)
# edi logs: 2018 - oct_2020
edi_logs <- rbind(
dbGetQuery(con18, "SELECT * FROM packages ;") %>%
mutate(year = 2018),
dbGetQuery(con19, "SELECT * FROM packages ;") %>%
mutate(year = 2019),
dbGetQuery(con18, "SELECT * FROM packages ;") %>%
mutate(year = 2020)
)
# from logs: total file downloads for from EDI (2018-oct_2020)
edi_logs %>% summarise(downloads = sum(count))
# from ent: total file downloads for from EDI (2018-oct_2020)
entities %>% summarise(total = sum(count))
# From the two above, why are the counts different? Regardless, they are within
# ~6% of each other so we can say > 21K file downloads plus ~2K from CAP for
# the period 2018 through Oct_2020 for >23K file downloads for that period.
# total number of downloads ranked by dataset id
edi_log %>%
mutate(
dataset = str_extract(
string = str_extract(
string = edi_log$pid,
pattern = "(?:knb-lter-cap\\.)([0-9]+)"
),
pattern = "[0-9]+"
)
) %>%
group_by(dataset) %>%
summarise(downloads = sum(count)) %>%
arrange(desc(downloads))
# The number of files downloaded that we see with edi_log can be deceptive as
# it exaggerates the values based on the number of files in a dataset. For
# example, knb-lter-cap.514.x (Salt River rephotography) contains 42 files. As
# such, if the files in this dataset are accessed twice, that reflects a
# download total of 84. That is, of course, technically accurate - that 84
# files were downloaded. However, it over accentuates the contribution of that
# particular dataset. For example, consider a dataset with only one file for
# which the single file was downloaded 12 times. In this comparison, it is of
# greater interest for this analysis that the dataset with the single file was
# accessed 12 times and the dataset with 42 files was accessed twice. As such,
# the approach below focuses on 'touches' rather than total file downloads by
# isolating the maximum number of files downloaded for each version of dataset
# (by year as well). For example, keeping with the aforementioned rephotography
# dataset, consider that in 2018, the maximum number of files downloaded from
# version 5 (knb-lter-cap.514.5) of that dataset was 2, of version 6 was 2, and
# of version 10 was 3. As such, the total number of times that at least one
# file was accessed in dataset knb-lter-cap.514.x (any version) in 2018 is the
# sum of the maximum number of downloads per version (n = 7 in this example).
# calculate dataset touches delinated by year and dataset number
entities_summary <- entities %>%
group_by(year, pid) %>%
summarise(
accessed_at_least = max(count, na.rm = TRUE)
) %>%
ungroup() %>%
mutate(
dataset = str_extract(
string = str_extract(
string = pid,
pattern = "(?:knb-lter-cap\\.)([0-9]+)"
),
pattern = "[0-9]+"
)
) %>%
group_by(year, dataset) %>%
summarise(
touches = sum(accessed_at_least)
) %>%
ungroup() %>%
mutate(dataset = as.integer(dataset)) %>%
arrange(dataset)
# explore most touched datasets
entities_summary %>%
group_by(dataset) %>%
summarise(accessed_at_least = sum(touches)) %>%
arrange(desc(accessed_at_least))
# example most touched dataset (McDowell arthropods - wow!)
entities %>%
filter(grepl("knb-lter-cap.643.", pid))
# calcuate unique datasets per year
entities_summary %>%
count(year)
# touches
touches <- rbind(
gios_log %>%
group_by(year) %>%
summarise(accessed = sum(touches)) %>%
ungroup() %>%
mutate(source = "CAP"),
entities_summary %>%
group_by(year) %>%
summarise(accessed = sum(touches)) %>%
ungroup() %>%
mutate(source = "EDI")
) %>%
rbind(
rbind(
gios_log %>%
group_by(year) %>%
summarise(accessed = sum(touches)) %>%
ungroup() %>%
mutate(source = "CAP"),
entities_summary %>%
group_by(year) %>%
summarise(accessed = sum(touches)) %>%
ungroup() %>%
mutate(source = "EDI")
) %>%
group_by(year) %>%
summarise(accessed = sum(accessed)) %>%
ungroup() %>%
mutate(source = "total")
) %>%
rename(touches = accessed) %>%
add_row(
year = 2017,
touches = NA,
source = "CAP"
) %>%
mutate(
touches = case_when(
year %in% c(2015, 2016, 2017) & source == "total" ~ NA_integer_,
year %in% c(2015, 2016, 2017) & source == "EDI" ~ NA_integer_,
TRUE ~ touches)
)
dataset_count <- rbind(
gios_log,
entities_summary
) %>%
group_by(year) %>%
summarise(datasets = n_distinct(dataset))
# touches
library(patchwork)
plot_touches <- ggplot() +
geom_bar(
mapping = aes(x = year, y = touches, fill = source),
data = touches %>% filter(year >= 2018),
stat = "identity",
position = position_dodge2(preserve = "single", padding = 0),
na.rm = FALSE
) +
# geom_line(
# mapping = aes(x = year, y = datasets*10, colour = "a"),
# data = dataset_count,
# size = 2
# ) +
scale_x_continuous(
name = "year",
breaks = touches$year) +
scale_fill_grey(
name = "data source",
start = 0.8,
end = 0.4,
na.value = "blue"
) +
# scale_y_continuous(
# sec.axis = sec_axis(
# trans = ~ . / 10,
# name = "number of datasets"
# )
# ) +
# scale_colour_manual(
# name = "number of datasets",
# values = c("a" = "black"),
# labels = c("#")
# ) +
theme(
text = element_text(face = "bold"),
axis.text.y = element_text(face = "bold"),
legend.position = c(0.2, 0.8)
) +
ggtitle(
label = "approximate number of accesses\nto data resources",
subtitle = "2018 - ~current"
)
plot_datasets <- ggplot() +
geom_line(
mapping = aes(x = year, y = datasets),
data = dataset_count %>% filter(year >= 2015),
size = 2
) +
# scale_colour_manual(
# name = "number of datasets",
# values = c("a" = "black"),
# labels = c("#")
# ) +
scale_x_continuous(
name = "year",
breaks = dataset_count$year) +
theme(
text = element_text(face = "bold"),
axis.text.x = element_text(face = "bold", size = 10),
axis.text.y = element_text(face = "bold")
) +
ggtitle(
label = "number of CAP LTER datasets\nin EDI",
subtitle = "2015 - ~current"
)
plot_datasets + plot_touches
ggsave(
filename = "~/Desktop/download_stats.png",
width = 9,
height = 5
)
|
6b9297ed50f6b310b4f19bb9bd7013cf12781f98
|
29de68f521a4d71d89045fafecfb3acc406d8f97
|
/R/dhest.R
|
f9825340cb025e71bcfb7023da5d72efc1d243ca
|
[] |
no_license
|
evolvedmicrobe/msde
|
76a29f4383ec8c3bdb4a817f33857f144ba1ab08
|
b7982fb56612836cebbcb5f7661e4ba118ec0fe4
|
refs/heads/master
| 2021-01-21T18:00:25.373337
| 2016-08-25T04:08:39
| 2016-08-25T04:08:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,213
|
r
|
dhest.R
|
#'@name dhest
#'@title Stationary Distribution of Heston Model Log-Return Differences
#'@description these will roughly be centered at alpha*dT \cr
#' requires Fourier inversion of the characteristic function, ie. numerical \cr
#' complex integration. currently done by grid method. \cr
#' LConst allows fewer exp evaluations if alpha is held constant. \cr
#'@param x
#'@param alpha
#'@param gamma
#'@param mu
#'@param beta
#'@param sigma
#'@param rho
#'@param dT
#'@param inter
#'@param n
#'@param LConst
#'@return numeric
#'@export
dhest <- function(x, alpha, gamma, mu, beta, sigma, rho, dT,
inter = c(-5e3, 5e3), n = 1e3, LConst, debug = FALSE) {
if(missing(mu)) mu <- (beta + sigma^2/2)/(2*gamma)
y <- x - alpha*dT
px <- seq(inter[1], inter[2], len = n)
dx <- px[2]-px[1]
if(debug) browser()
G <- gamma + 1i*rho*sigma*px
O <- sqrt(G^2 + sigma^2*(px^2 - 1i*px))
K <- O*dT/2
ans <- (1 + exp(-2*K))/2 + (O^2-G^2+2*gamma*G)/(2*gamma*O) * (1 - exp(-2*K))/2
ans <- gamma*mu/sigma^2*G*dT - 2*gamma*mu/sigma^2 * (K + log(ans))
ans <- if(missing(LConst)) exp(1i*(px %o% y) + ans) else LConst * exp(ans)
ans <- colSums(Re(ans))*dx/2/pi
}
|
133db3999fa2d7705b1de6754c0c864f27862912
|
43ac4b09b759bdb84dc254186e5fa3541c70a137
|
/plot2.R
|
59a5a5866774a0af6ca3f09208741cf1d2acc7a8
|
[] |
no_license
|
pmichene/ExData_Plotting1
|
50a34da163660662349fbfd5c4e865c8a278c67d
|
ebda984dc5d266ebe93d4d40f08ff4fbd7ff711c
|
refs/heads/master
| 2020-03-15T23:01:05.427108
| 2018-05-08T03:55:05
| 2018-05-08T03:55:05
| 132,385,065
| 0
| 0
| null | 2018-05-06T23:51:42
| 2018-05-06T23:51:42
| null |
UTF-8
|
R
| false
| false
| 1,104
|
r
|
plot2.R
|
library(data.table)
# Download data file if needed
if (!file.exists("household_power_consumption.txt")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "data.zip", method = "curl", quiet = TRUE)
}
unzip("data.zip")
# Read data
data_file <- "household_power_consumption.txt"
all_data <- read.table(data_file, header = TRUE, na.strings = "?", sep = ";")
# Specify the data in the particular range
data <- all_data[(all_data$Date == "1/2/2007" | all_data$Date == "2/2/2007"),]
# Format the Date column as a date class
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# Add a new column to the data table, by pasting together the values of the Date column and the Time column
dateTime <- paste(data$Date, data$Time)
# Reformat the DateTime coumn to a calendar date
data$DateTime <- as.POSIXct(dateTime)
plot(data$Global_active_power~data$DateTime,
type="l",
ylab="Global Active Power (kilowatts)",
xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
58a194ea96667763e491996c3ac4412ddc7c57dc
|
862bee845b055a3cfb7c0ab5ac1c277ed35f438f
|
/library/ggthemes/examples/ex-scale_colour_gradient2_tableau.R
|
a48cb517dbb642f6fe9a3ed00d93248f93524de5
|
[
"MIT"
] |
permissive
|
mayousif/Turbidity-Cleaner
|
a4862f54ca47305cfa47d6fcfa048e1573d6a2b7
|
e756d9359f1215a4c9e6ad993eaccbc3fb6fe924
|
refs/heads/master
| 2022-05-09T07:32:55.856365
| 2022-04-22T20:05:10
| 2022-04-22T20:05:10
| 205,734,717
| 2
| 2
| null | 2020-11-02T21:31:19
| 2019-09-01T21:31:43
|
HTML
|
UTF-8
|
R
| false
| false
| 496
|
r
|
ex-scale_colour_gradient2_tableau.R
|
library("ggplot2")
df <- data.frame(
x = runif(100),
y = runif(100),
z1 = rnorm(100),
z2 = abs(rnorm(100))
)
p <- ggplot(df, aes(x, y)) + geom_point(aes(colour = z2))
palettes <-
ggthemes_data[["tableau"]][["color-palettes"]][["ordered-diverging"]]
for (palette in head(names(palettes))) {
print(p + scale_colour_gradient2_tableau(palette) +
ggtitle(palette))
}
# If you need to reverse a palette, use a transformation
p + scale_colour_gradient2_tableau(trans = "reverse")
|
7841f37efc787605525abfb1df37642a487e2c95
|
96aba36ec950b4752423cd352e56001e4f14d33b
|
/GettingAndCleaningData/Week1/DownloadFromGoogleDrive.R
|
14b38fb1200f9fe2ff35ccbe587b66439c628e26
|
[] |
no_license
|
figoyouwei/datasciencecoursera
|
4f879e6cd23cbcd0b99981741520d9ea8443c817
|
ddd307451d8158fa6a58c655a513748a6dcfbf4e
|
refs/heads/master
| 2021-01-10T20:25:35.309704
| 2014-09-21T06:52:59
| 2014-09-21T06:52:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
DownloadFromGoogleDrive.R
|
dl_from_GoogleD <- function(key, localpath) {
## Arguments:
## output = output file name
## key = Google document key
## format = output format (pdf, rtf, doc, txt..)
## Note: File must be shareable!
require(RCurl)
#cyberpath <- paste0("https://docs.google.com/file/d/", key, "/export?format=", format)
cyberpath <- paste0("https://docs.google.com/file/d/", key)
bin <- getBinaryURL(cyberpath, ssl.verifypeer = FALSE)
con <- file(localpath, open = "wb")
writeBin(bin, con)
close(con)
message(noquote(paste(localpath, "read into", getwd())))
}
# Example:
dl_from_GoogleD(key = "0B937jFpi7V9RN3QwOE9ZOWt3Q2c",
localpath = "./Data/cameras.csv")
cameraData <- read.table("./Data/cameras.csv",sep=",",header=TRUE)
|
48b7d269761d6648fdb46d6b77c0c8cc3668561c
|
4c928899f170bbd5db2e30eafcacb59aa283f204
|
/R/add_binom_gest.R
|
2201522cb5626541441d8f6643cc15aea4748f2b
|
[
"MIT"
] |
permissive
|
alexhallam/gest
|
efa2b7a6e2387885f0c5b37e4f0d0254f7ec2def
|
1be7ed995378a021f291ebeccc2b3319fde3b922
|
refs/heads/master
| 2021-01-09T17:46:36.787414
| 2020-03-19T00:42:37
| 2020-03-19T00:42:37
| 242,395,163
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,487
|
r
|
add_binom_gest.R
|
#' Add binomial G-Estimate
#'
#' @description adds an estimated shinkage value obtained by G-Modeling
#'
#' @param tbl a table which *requires* two columns. A column with the number of
#' successes (x) and a column with totals (n).
#' @param x column with number of successes
#' @param n column with totals
#' @param cred_level level of credible interval to compute. For example
#' and interval of .80 will return a 20th percentile lower interval and a
#' 80th percentile upper interval
#' @param theta_from start of parameter space
#' @param theta_to end of parameter space
#' @param theta_by increment of parameter space
#'
#' @return The original table with the following columns
#' \item{.gest_dist}{Full posterior distribution. Grain may be increased with the theta_* parameters}
#' \item{.raw}{Estimate (success / total)}
#' \item{.gest}{Posterior EB g-estimate}
#' \item{.lo}{Lower bound of credible interval for EB estimate}
#' \item{.hi}{Upper bound of credible interval for EB estimate}
#'
#' @importFrom dplyr mutate
#' @importFrom tibble tibble
#' @importFrom magrittr %>%
#' @importFrom rlang expr !!
#' @importFrom deconvolveR deconv
#' @importFrom purrr pmap map2_dbl map_dbl
#' @importFrom stats dbinom
#'
#' @export
#'
#' @examples
#'
#' library(tibble)
#' set.seed(2017)
#'
#' # simulate 200 random examples from a beta-binomial
#' obs <- 200
#' dat <- tibble(prob = rbeta(obs, 10, 50),
#' n = round(rlnorm(obs, 4, 2)) + 1,
#' x = rbinom(obs, n, prob))
#'
#' result <- add_binom_gest(dat, x, n)
#' result
add_binom_gest <- function(tbl, x, n,
cred_level = .80,
theta_from = 0.01,
theta_to = 0.99,
theta_by = 0.01
){
n <- rlang::expr(n)
x <- rlang::expr(x)
tau <- seq(from = theta_from, to = theta_to, by = theta_by)
data <- data.frame(n = tbl$n, x = tbl$x)
result <- deconv(tau = tau, X = data, family = "Binomial", c0 = 1, pDegree = 6)
d <- data.frame(result$stats)
theta <- result$stats[, 'theta']
gTheta <- result$stats[, 'g']
# just to be clear
n_k <- n
x_k <- x
f_alpha <- function(n_k, x_k) {
## .01 is the delta_theta in the Riemann sum
sum(dbinom(x = x_k, size = n_k, prob = theta) * gTheta) * .01
}
g_theta_hat <- function(n_k, x_k) {
ghat <- gTheta * dbinom(x = x_k, size = n_k, prob = theta) / f_alpha(n_k, x_k)
list_distribution <- tibble(theta = theta, ghat = ghat )
return(list_distribution)
}
# make alpha level
alpha_lo <- ((1 - cred_level) * 100)
alpha_hi <- 100 - alpha_lo
get_mle <- function(.gest_dist){
ghat_mle <- .gest_dist$theta[which(.gest_dist$ghat == max(.gest_dist$ghat))]
return(ghat_mle)
}
get_lo <- function(.gest_dist, lo_val){
ghat_lo <- .gest_dist$theta[which(cumsum(.gest_dist$ghat) >= lo_val)[1]]
return(ghat_lo)
}
get_hi <- function(.gest_dist, hi_val){
ghat_hi <- .gest_dist$theta[which(cumsum(.gest_dist$ghat) >= hi_val)[1]]
return(ghat_hi)
}
df_density <- tbl %>%
dplyr::mutate(.gest_dist = purrr::pmap(.l = list(tbl$n, tbl$x), .f = g_theta_hat)) %>%
dplyr::mutate(.raw = purrr::map2_dbl(.x = !!x, .y = !!n, ~ .x/.y),
.gest = purrr::map_dbl(.gest_dist, get_mle),
.lo = purrr::map_dbl(.gest_dist, get_lo, alpha_lo),
.hi = purrr::map_dbl(.gest_dist, get_hi, alpha_hi)
)
return(df_density)
}
|
2ed4636606f24dcfa12d8db75ec367c8a9557d0b
|
14c981b309a99253d57596fdd00c391fb1aad0e2
|
/Prácticos/Práctico 2/Práctico 2.R
|
3c9e80830dc2b17ad5c1f1e088929de985e2a48c
|
[] |
no_license
|
daczarne/udelar_modelos_lineales
|
411ad1ac0b215cbe23d2db913797d3359e8fa729
|
b8d53156d7a3f37455426f5ad467dbab120b16a4
|
refs/heads/master
| 2023-06-29T15:44:38.099272
| 2021-08-02T23:06:12
| 2021-08-02T23:06:12
| 202,957,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,056
|
r
|
Práctico 2.R
|
####################
#### PRÁCTICO 2 ####
####################
library(pdftools)
#####################
#### EJERCICIO 5 ####
#####################
rm(list = ls())
# Genero los datos
y=c(8,12,21,31,39,58)
x=c(0,10,20,30,40,60)
n=length(x)
# Calculo beta 1 gorro
Sxy=cov(x,y)
Sx=var(x)
beta1=Sxy/Sx
# Calculo el desvío de beta 1 gorro
X=cbind(rep(1,6),x)
XtX=t(X)%*%X
XtXinv=solve(XtX)
beta0=mean(y)-beta1*mean(x)
yhat=NULL
for(i in 1:length(y)){
yhat[i]=beta0 + beta1*x[i]
}
uhat=y-yhat
sigmahat=t(uhat)%*%uhat / (n-2)
vb0=sigmahat * XtXinv[1,1]
vb1=sigmahat * XtXinv[2,2]
sb0=sqrt(vb0)
sb1=sqrt(vb1)
# Construyo el intervalo
t=qt((1-0.05/2), 4, lower.tail=TRUE)
liminf=beta1-t*sb1
limsup=beta1+t*sb1
# Verificación
summary(lm(y ~ x))
#####################
#### EJERCICIO 6 ####
#####################
rm(list = ls())
# Genero los datos
y=c(60,61,62,63,65,67,68,69,70,72,71,70)
x=c(163,163,165,166,168,169,170,170,171,172,172,174)
n=length(x)
# Estimo los beta
X=cbind(rep(1,n),x)
beta=solve(t(X) %*% X) %*% (t(X) %*% y)
# Estimo sigma
yhat=NULL
for(i in 1:length(y)){
yhat[i]=beta[1] + beta[2]*x[i]
}
uhat=y-yhat
sigma2hat=as.numeric(t(uhat)%*%uhat / (n-2))
sigmahat=sqrt(sigma2hat)
# Estimo las varianzas de los beta
s2beta=sigma2hat * solve(t(X) %*% X)
s2beta1=s2beta[2,2]
e=beta[2]/sqrt(s2beta1)
t=qt((1-0.05/2), df=n-2, lower.tail=TRUE)
if(abs(e) > t){
print("Rechazo H0 - No existe evidencia de que el par?metro no sea significativo")
}else{
print("No Rechazo H0 - Existe evidencia de que el par?metro no es significativo")
}
# Verificación
summary(lm(y ~ x))
#####################
#### EJERCICIO 7 ####
#####################
rm(list = ls())
# Genero los datos
x=c(-5,-4,-3,-2,-1,0,1,2,3,4,5)
y=c(1,5,4,7,10,8,9,13,14,13,18)
n=length(y)
alpha=0.05
#### Parte a ####
# Estimo los beta
X = cbind(rep(1,n),x)
beta = solve(t(X) %*% X) %*% (t(X) %*% y)
# Estimo sigma
yhat = NULL
for(i in 1:length(y)){
yhat[i] = beta[1] + beta[2]*x[i]
}
uhat = y - yhat
sigma2hat = as.numeric(t(uhat)%*%uhat / (n-dim(beta)[1]))
sigmahat = sqrt(sigma2hat)
# Estimo las varianzas de los beta
s2beta = sigma2hat * solve(t(X) %*% X)
#### Parte b ####
s2beta2 = s2beta[2,2]
e = beta[2]/sqrt(s2beta2)
t = qt((1-alpha/2), df=n-2, lower.tail=TRUE)
if(abs(e) > t){
print("Rechazo H0 - No existe evidencia de que el par?metro no sea significativo")
}else{
print("No Rechazo H0 - Existe evidencia de que el par?metro no es significativo")
}
#### Parte c ####
x0 = c(1,3)
E_y_3 = x0 %*% beta
(liminf_c = E_y_3 - t * sqrt(sigma2hat * t(x0) %*% solve(t(X)%*%X) %*% x0))
(limsup_c = E_y_3 + t * sqrt(sigma2hat * t(x0) %*% solve(t(X)%*%X) %*% x0))
#### Parte c ####
y_3 = x0 %*% beta
(liminf_d = y_3 - t * sqrt(sigma2hat * (1+t(x0) %*% solve(t(X)%*%X) %*% x0)))
(limsup_d = y_3 + t * sqrt(sigma2hat * (1+t(x0) %*% solve(t(X)%*%X) %*% x0)))
#### Verificación ####
summary(lm(y ~ x))
#$###################
#### EJERCICIO 8 ####
#####################
rm(list = ls())
# txt <- pdf_text("Pr?ctico 2.pdf")
# cat(txt[2])
# Cargo los datos
data <- read.csv("data8.csv", header=T)
n = dim(data)[1]
alpha = 0.05
#### Parte a ####
beta = summary(lm(Velocidad ~ Densidad, data=data))$coefficients[,1]
#### Parte b ####
uhat = summary(lm(Velocidad ~ Densidad, data=data))$residual
sigma2 = as.numeric(t(uhat) %*% uhat / (n - length(beta)))
sdbeta = summary(lm(Velocidad ~ Densidad, data=data))$coefficient[,2]
#### Parte c ####
liminfb1 = beta[1] - qt((1-alpha/2), df=n-length(beta)) * sdbeta[1]
limsupb1 = beta[1] + qt((1-alpha/2), df=n-length(beta)) * sdbeta[1]
liminfb2 = beta[2] - qt((1-alpha/2), df=n-length(beta)) * sdbeta[2]
limsupb2 = beta[2] + qt((1-alpha/2), df=n-length(beta)) * sdbeta[2]
#### Parte d ####
anova(lm(Velocidad ~ Densidad, data=data))
qf((1-alpha), df1=1, df2=n-2)
qt((1-alpha/2), df=n-2)^2
#### Parte e ####
x0 = c(1,50)
E_y_50 = x0 %*% beta
X = cbind(rep(1,n), data$Densidad)
t = qt((1-.1/2), df=n-2)
(liminf_e = E_y_50 - t * sqrt(sigma2 * t(x0) %*% solve(t(X)%*%X) %*% x0))
(limsup_e = E_y_50 + t * sqrt(sigma2 * t(x0) %*% solve(t(X)%*%X) %*% x0))
#### Parte f ####
beta = summary(lm(sqrt(Velocidad) ~ Densidad, data=data))$coefficients[,1]
sigma2 = summary(lm(sqrt(Velocidad) ~ Densidad, data=data))[[6]]^2
sdbeta = summary(lm(sqrt(Velocidad) ~ Densidad, data=data))$coefficient[,2]
liminfb1 = beta[1]-qt((1-alpha/2), df=n-length(beta))*sdbeta[1]
limsupb1 = beta[1]+qt((1-alpha/2), df=n-length(beta))*sdbeta[1]
liminfb2 = beta[2]-qt((1-alpha/2), df=n-length(beta))*sdbeta[2]
limsupb2 = beta[2]+qt((1-alpha/2), df=n-length(beta))*sdbeta[2]
anova(lm(sqrt(Velocidad) ~ Densidad, data=data))
qf((1-alpha), df1=1, df2=n-2)
qt((1-alpha/2), df=n-2)^2
x0 = c(1,sqrt(50))
E_y_50 = x0 %*% beta
X = cbind(rep(1,n), sqrt(data$Densidad))
liminf_f = E_y_50 - t * sqrt(sigma2 * t(x0) %*% solve(t(X)%*%X) %*% x0)
limsup_f = E_y_50 + t * sqrt(sigma2 * t(x0) %*% solve(t(X)%*%X) %*% x0)
################################
#### FIN DE LA PROGRAMACIÓN ####
################################
|
2e647cb17614781e5a97a5cb9c0c3d987d8d206d
|
f8ea4e68549003c04b280ff56ee6c41b1f9b125b
|
/scripts/map_function.R
|
c16bfe469401ead5a8a6b5e6fb151d6ea8bbbacd
|
[] |
no_license
|
epahang/final-project-INFO498F
|
e51e89e317ed1ed0ef02d3ae96b62d2c6f94f121
|
77bb591eb504279e2934ebc6acc5be753d38f0d3
|
refs/heads/master
| 2021-01-10T12:37:50.527234
| 2017-06-03T00:53:29
| 2017-06-03T00:53:29
| 52,189,426
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,260
|
r
|
map_function.R
|
# Final Project group FB7
# Visualization for the distrbution of the number of deaths per year
build_map <- function(data, year, cause) {
map_title <-
paste("Distribution of deaths in", year, "caused by", cause)
new_data <- data %>%
filter(YEAR == year, CAUSE_NAME == cause)
# add new column with state abbreviations to use for the map
new_data$code <- state.abb[match(new_data$STATE, state.name)]
# Removes the rows with NA in it
new_data <- new_data[complete.cases(new_data), ]
# Removes the rows with "x" in it
new_data <- new_data[new_data$DEATHS != "x", ]
# give state boundaries a white border
l <- list(color = toRGB("white"), width = 2)
# specify some map projection/options
g <- list(
bgcolor = "#f8f8ff",
scope = 'usa',
projection = list(type = 'albers usa')
)
# plots map with the distribution of deaths in each state for a given year
plot_ly(
new_data,
z = DEATHS,
text = paste("There were", DEATHS, "total deaths in", STATE),
type = 'choropleth',
locations = code,
locationmode = 'USA-states',
color = "Set3",
marker = list(line = l),
colorbar = list(title = "Number of Deaths")
) %>%
layout(title = map_title, geo = g) %>%
return()
}
|
24bc017d38f4956c5449777df582a62d4277dee0
|
925738441839a562e4d74a5209ca279ddac7a59a
|
/results/dada2/scripts/learn_errors.R
|
254499a7f2c739bab6fc0d9a731fa3e23efa1211
|
[] |
no_license
|
paraslonic/assnake
|
23d168827530ad5363cde2081feb7cf232ad1d9c
|
42c9ee117e951ac70973542b5d19f1154b4d9e61
|
refs/heads/master
| 2020-06-02T13:58:13.805768
| 2019-10-31T11:21:41
| 2019-10-31T11:21:41
| 191,178,863
| 0
| 0
| null | 2019-06-10T17:21:56
| 2019-06-10T14:00:37
|
Python
|
UTF-8
|
R
| false
| false
| 403
|
r
|
learn_errors.R
|
args <- commandArgs(TRUE)
read_table_loc <- c(args[[1]])
out_loc <- c(args[[2]])
strand <- c(args[[3]])
library("dada2")
reads <- read.table(file = read_table_loc, sep = '\t', header = TRUE)
if (strand == 'R1'){
err <- learnErrors(as.character(reads$R1), multithread=TRUE)
} else if (strand == 'R2'){
err <- learnErrors(as.character(reads$R2), multithread=TRUE)
}
saveRDS(err, out_loc)
|
ff840c3a31b0f3ec8db795f8eae416b1b3750f67
|
ecc3f86ed2f437c34c817761799e1179f8bee275
|
/R/years.R
|
8ec00109110e6dab2a203848246be837af49bfcc
|
[] |
no_license
|
cran/relsurv
|
8e6b4821d99fda97e58ab1ac284cbd12e3c9cde1
|
20447b8198707f9cebc678ec567a02c57ea4101b
|
refs/heads/master
| 2023-01-10T22:20:36.163357
| 2022-12-22T12:30:02
| 2022-12-22T12:30:02
| 17,699,136
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60,323
|
r
|
years.R
|
colVars <- function(x, na.rm = FALSE){
f <- function(v, na.rm = na.rm) {
if(is.numeric(v) || is.logical(v) || is.complex(v))
stats::var(v, na.rm = na.rm)
else NA
}
return(unlist(lapply(x, f, na.rm = na.rm)))
}
# Copied function from mstate:::NAfix.
mstateNAfix <- function (x, subst = -Inf)
{
spec <- max(x[!is.na(x)]) + 1
x <- c(spec, x)
while (any(is.na(x))) x[is.na(x)] <- x[(1:length(x))[is.na(x)] -
1]
x[x == spec] <- subst
x <- x[-1]
x
}
# Helper function:
nessie_spi <- function(formula = formula(data), data, ratetable = relsurv::slopop,
tis, starting.time, include.censoring=FALSE,
arg.example=FALSE, rmap){
data_orig <- data
call <- match.call()
if (!missing(rmap)) {
rmap <- substitute(rmap)
}
na.action <- NA
rform <- rformulate(formula, data, ratetable, na.action,
rmap)
data <- rform$data
data$Xs <- rep(1, nrow(data))
n_rows <- nrow(data)
# Fix demographic covariates:
if(starting.time == "left.truncated"){
rform$R[,"year"] <- rform$R[,"year"] - rform$R[,"age"]
rform$R[,"age"] <- 0
}
if(include.censoring){
# browser()
wh <- which(rform$status==1)
rform$Y[wh] <- max(rform$Y)
if(arg.example){
wh2 <- which(rform$status==1 & data$age==18262)
rform$Y[wh2] <- 1826
}
}
else{
rform$Y <- rep(max(rform$Y), length(rform$Y))
# status is not relevant in this case
}
out <- NULL
out$yi <- NULL
out$yidli <- NULL
l_tis <- length(tis)
temps <- lapply(1:n_rows, function(inx) {
temp <- exp.prep(rform$R[inx, , drop = FALSE], rform$Y[inx], rform$ratetable,
rform$status[inx], times = tis, fast = TRUE, cmp=FALSE,ys=data$start[inx])
s_pi <- exp(-cumsum(temp$yidli))
s_pi_helper <- which.min(temp$yidli==0)-1
if(s_pi_helper>1){ s_pi[1:s_pi_helper] <- 0}
if(include.censoring){ s_pi[(s_pi_helper+1):l_tis] <- pmin(s_pi[(s_pi_helper+1):l_tis],
temp$yi[(s_pi_helper+1):l_tis])}
c(s_pi, # s_pi
temp$yidli*s_pi) # l_pi * s_pi
})
temps2 <- do.call("cbind", temps)
temps2 <- rowSums(temps2)
out$yi <- temps2[1:(length(temps2)/2)]
out$yidli <- temps2[(length(temps2)/2+1):length(temps2)]
return(out)
}
# Copied scales::trans_new:
# scales_trans_new <- function (name, transform, inverse, breaks = extended_breaks(),
# minor_breaks = regular_minor_breaks(), format = format_format(),
# domain = c(-Inf, Inf))
# {
# if (is.character(transform))
# transform <- match.fun(transform)
# if (is.character(inverse))
# inverse <- match.fun(inverse)
# structure(list(name = name, transform = transform, inverse = inverse,
# breaks = breaks, minor_breaks = minor_breaks, format = format,
# domain = domain), class = "trans")
# }
#' Compute one of the life years measures
#'
#' Provides an estimate for one of the following measures: years lost (Andersen, 2013), years lost/saved (Andersen, 2017), or
#' life years difference (Manevski, Ruzic Gorenjec, Andersen, Pohar Perme, 2022).
#'
#' The life years difference (\code{measure='yd'}) is taken by default. If other
#' measures are of interest, use the \code{measure} argument.
#'
#' The follow-up time must be specified in days. The \code{ratetable}
#' being used may have different variable names and formats than the user's
#' data set, this is dealt with the \code{rmap} argument. For example, if
#' age is in years in the data but in days in the \code{ratetable} object,
#' age=age*365.241 should be used. The calendar year can be in any date format
#' (date, Date and POSIXt are allowed), the date formats in the
#' \code{ratetable} and in the data may differ.
#'
#' Numerical integration is performed, argument
#' precision is set with argument \code{precision}, which defaults to 30-day
#' intervals for intergration. For higher accuracy take a smaller value (e.g. precision=1 makes
#' the integration on a daily basis).
#'
#' The observed curves are reported at event and censoring times. The
#' population curves are reported at all times used for the numerical integration.
#' Note that for the years lost (Andersen, 2013) measure, only the excess absolute risk is reported.
#'
#' @param formula a formula object, with the response as a \code{Surv} object
#' on the left of a \code{~} operator, and, \code{~1} specified on the right.
#'
#' NOTE: The follow-up time must be in days.
#' @param data a data.frame in which to interpret the variables named in the
#' \code{formula}.
#' @param measure choose which measure is used: 'yd' (life years difference; Manevski, Ruzic Gorenjec, Andersen, Pohar Perme, 2022), 'yl2017' (years lost/saved; Andersen 2017),
#' 'yl2013' (years lost/saved; Andersen 2013).
#' @param ratetable a table of event rates, organized as a \code{ratetable}
#' object, such as \code{slopop}.
#' @param rmap an optional list to be used if the variables are not organized
#' and named in the same way as in the \code{ratetable} object. See details
#' below.
#' @param var.estimator Choose the estimator for the variance ('none', 'bootstrap', 'greenwood'). Default is 'none'.
#' The 'greenwood' option is possible only for \code{measure='yd'}.
#' @param B if \code{var.estimator} is 'bootstrap'. The number of bootstrap replications. Default is 100.
#' @param precision precision for numerical integration of the population curve. Default is 30 (days).
#' The value may be decreased to get a
#' higher precision or increased to achieve a faster calculation.
#' @param add.times specific times at which the curves should be reported.
#' @param na.action a missing-data filter function. Default is \code{na.omit}.
#' @param conf.int the confidence level for a two-sided confidence interval. Default is 0.95.
#' @param timefix the timefix argument in survival::survfit.formula. Default is FALSE.
#' @param is.boot if TRUE, the function \code{years} has been called during a bootstrap replication.
#' @param first.boot if TRUE, this is the first bootstrap replication.
#' @return A list containing the years measure, the observed and population curves (or the excess curve for Andersen 2013).
#' The values are given as separate data.frames through time. Times are given in days, all areas are given in years.
#' For \code{measure='yl2017'} values are reported only at the last time point.
#' Functions \code{plot_f} and \code{plot_years} can be then used for plotting.
#' @seealso \code{\link{plot_f}}, \code{\link{plot_years}}
#' @examples
#'
#' library(relsurv)
#' # Estimate the life years difference for the rdata dataset.
#' mod <- years(Surv(time, cens)~1, data=rdata, measure='yd', ratetable=slopop,
#' rmap=list(age=age*365.241), var.estimator = 'none')
#' # Plot the absolute risk (observed and population curve):
#' plot_f(mod)
#' # Plot the life years difference estimate:
#' plot_years(mod, conf.int=FALSE)
years <- function(
formula=formula(data),
data,
measure=c('yd', 'yl2017', 'yl2013'),
# estimator=c("F_P_final"),#, "F_P_Spi", "F_P_Spi2", "F_P", "F_P2", "all"),
ratetable=relsurv::slopop,
rmap,
var.estimator=c('none', 'bootstrap', 'greenwood'),
B=100,
precision=30,
add.times,
na.action=stats::na.omit,
conf.int=0.95,
timefix=FALSE,
# admin.cens,
# cause.val,
is.boot=FALSE,
first.boot=FALSE
# ,estimator.observed='Kaplan-Meier'
){
# OLD ARGUMENTS:
# F_P_Spi: Tako kot F_P_final, ignorira censoring. Ali pa vzame samo admin cens
# F_P_Spi2: Vzame ves censoring
# @param cause.val for competing risks, to be added.
# @param admin.cens if a Date is supplied, administrative censoring is taken into account at that time
# in the population curve. Works only if there's late entry, e.g. if the formula is \code{Surv(start,stop,event)~1}.
############ #
# PREPARE OBJECTS:
############ #
estimator=c("F_P_final") # #' @param estimator which estimator should be used for calculating
# estimator <- match.arg(estimator)
arg.example <- FALSE # @param arg.example temporary argument, used for checking additionalities.
Call <- match.call()
if(!missing(rmap) & !is.boot & !first.boot) rmap <- substitute(rmap)
measure <- match.arg(measure)
var.estimator <- match.arg(var.estimator)
if(var.estimator=='bootstrap'){
bootstrap <- TRUE
} else if(var.estimator %in% c('none', 'greenwood')){
bootstrap <- FALSE
} else{
stop('Incorrect value provided in argument var.estimator.')
}
if(!is.data.frame(data)) stop('Argument data is not a data.frame object.')
data <- as.data.frame(data)
out <- NULL
late.values <- FALSE
# These were arguments. To be deleted?
exact.hazards <- FALSE # calculate hazards on a daily basis (to be checked)
find.cond.time <- FALSE # if TRUE, return time at which there are at least 5 individuals in the at-risk set.
# if(!missing(cause.val)){
# data$status <- ifelse(data$cause == cause.val, 1, 0)
# # Remove NAs:
# eniNAs <- which(is.na(data$status))
# if(length(eniNAs)>0) data <- data[-eniNAs,]
# }
# data$age <- round(data$age*365.241)
# data$stop <- round(data$stop*365.241)
# If Surv(start,stop, event) (possibly + mstate)
if_start_stop <- length(as.character(formula[[2]])) %in% c(4,5)
if(if_start_stop){
start_col <- as.character(formula[[2]])[2]
stop_col <- as.character(formula[[2]])[3]
status_col <- as.character(formula[[2]])[4]
starting_age <- as.vector(as.matrix(data[, start_col]))
} else{
stop_col <- as.character(formula[[2]])[2]
if(!(stop_col %in% colnames(data))){
stop(paste0('Instead of \'', stop_col, '\', please use a column from the data in the formula.'))
}
}
# Check if no. at risk falls to zero at some point:
if(if_start_stop){
# Prepare at-risk matrix:
find_tajms <- unique(sort(c(data[,start_col], data[,stop_col])))
mat <- lapply(1:nrow(data), function(x) ifelse((data[x, start_col] < find_tajms) & (find_tajms <= data[x, stop_col]), 1, 0))
mat2 <- matrix(unlist(mat), nrow = nrow(data), byrow = TRUE)
# The sum of the individual at-risk processes:
yi_left <- colSums(mat2)
# If there's an empty at-risk at a later timepoint, censor the data:
wh_yi <- which(yi_left==0)
if(length(wh_yi)>1){
if((!is.boot) & (!first.boot)){
warning(paste0('In the time interval ', find_tajms[wh_yi[2]-1], '-', find_tajms[wh_yi[2]],
' the at-risk sample is empty (nobody is followed). Survival cannot be estimated in this time interval.',
' The data is censored at time ', find_tajms[wh_yi[2]-1], '.'))
}
# Censor data:
data <- data[data[,start_col] <= find_tajms[wh_yi[2]-1], ]
wh_cen <- which(data[, stop_col] > find_tajms[wh_yi[2]-1])
data[wh_cen, stop_col] <- find_tajms[wh_yi[2]-1]
data[wh_cen, status_col] <- 0
if(!missing(add.times)){
if(any(add.times > find_tajms[wh_yi[2]-1])) add.times <- add.times[add.times<=find_tajms[wh_yi[2]-1]]
}
}
rm(mat,mat2)
}
data_orig <- data
# if(starting.time=="left.truncated"){
# if(!missing(admin.cens)){
# if(!inherits(admin.cens, 'Date')) warning('Object of class Date should be supplied to admin.cens.')
# end_date <- data$year+(data$stop-data$age)
# if(any(end_date > admin.cens)) warning('There are events that occur after the date of administrative censoring. Please check the values in arguments data and admin.cens.')
# id_admin_cens <- which(admin.cens==end_date)
# }
# }
if(if_start_stop){
starting.time <- 'left.truncated'
} else{
starting.time <- 'zero'
}
# Starting age
starting_age <- rep(0,nrow(data))
if(if_start_stop){
starting_age <- as.vector(as.matrix(data[, start_col]))
}
starting_age <- as.numeric(starting_age)
############ #
# YEARS ON DATA - GENERAL:
############ #
surv_obj <- as.character(formula[[2]])
if(missing(formula)){
stop('Missing formula argument value.')
} else{
if('mstate' %in% surv_obj){
juh <- 1:nrow(data)
mod <- survival::survfit.formula(as.formula(Reduce(paste, deparse(formula))), data=data, timefix=timefix, id = juh, na.action=na.action)
} else{
mod <- survival::survfit.formula(formula, data=data, timefix=timefix, na.action=na.action)
}
}
if('mstate' %in% surv_obj){
surv_obj_new <- paste0(surv_obj[1], '(', surv_obj[2], ',', surv_obj[3])
if(length(surv_obj)==5){
surv_obj_new <- paste0(surv_obj_new, ',', surv_obj[4], ')')
} else{
surv_obj_new <- paste0(surv_obj_new, ')')
}
formula <- paste0(surv_obj_new, '~1')
}
status_obj <- surv_obj[length(surv_obj)]
# if(!missing(cause.val)){
# mod$n.risk <- mod$n.risk[,1]
# mod$n.event <- mod$n.event[,cause.val+1]
# mod$surv <- 1-mod$pstate[,cause.val+1]
# mod$std.err <- mod$std.err[,cause.val+1]
# mod$cumhaz <- mod$cumhaz[,cause.val]
# }
if(!missing(add.times)){
mod_sum <- summary(mod, times = sort(unique(c(mod$time, add.times))))
if(any(!(add.times %in% mod_sum$time))){
if(!is.boot){
if(!first.boot){
warning('Some values in add.times are after the last follow-up time. All measures are extrapolated up to these times. Please consider removing them.')
}
late.values <- TRUE
miss_tajms <- add.times[!(add.times %in% mod_sum$time)]
mod_sum$time <- c(mod_sum$time, miss_tajms)
mod_sum$n.risk <- c(mod_sum$n.risk, rep(mod_sum$n.risk[length(mod_sum$n.risk)], length(miss_tajms)))
mod_sum$n.event <- c(mod_sum$n.event, rep(0, length(miss_tajms)))
mod_sum$surv <- c(mod_sum$surv, rep(mod_sum$surv[length(mod_sum$surv)], length(miss_tajms)))
mod_sum$cumhaz <- c(mod_sum$cumhaz, rep(mod_sum$cumhaz[length(mod_sum$cumhaz)], length(miss_tajms)))
# First fix std.err:
if(is.nan(mod_sum$std.err[length(mod_sum$std.err)])){
mod_sum$std.err[length(mod_sum$std.err)] <- mod_sum$std.err[length(mod_sum$std.err) - 1]
}
mod_sum$std.err <- c(mod_sum$std.err, rep(mod_sum$std.err[length(mod_sum$std.err)], length(miss_tajms)))
}
}
mod$time <- mod_sum$time
mod$n.risk <- mod_sum$n.risk
mod$n.event <- mod_sum$n.event
mod$surv <- mod_sum$surv
mod$std.err <- mod_sum$std.err
mod$cumhaz <- mod_sum$cumhaz
}
if(find.cond.time) return(mod$time[which.min(mod$n.risk<5)])
# Calculate AUC:
if(length(mod$time)>1){
if(if_start_stop){
survs <- c(1, mod$surv[1:(length(mod$surv)-1)])
t_diff <- diff(c(mod$time[1], mod$time))
} else{
survs <- mod$surv
t_diff <- diff(c(0, mod$time))
}
auc_data <- sum(t_diff*(1 - survs))
auc_data_vec <- cumsum(t_diff*(1 - survs))
} else{
auc_data <- mod$time*mod$surv
auc_data_vec <- auc_data
}
out$F_data <- 1-mod$surv
out$auc_data <- auc_data/365.241
out$auc_data_vec <- auc_data_vec/365.241
# Exact hazards:
if(exact.hazards){
mod$time <- seq(min(mod$time), max(mod$time), by=1)
mod$surv <- exp(-cumsum(rep(ratetable[1,1,1], max(mod$time)-min(mod$time)+1)))
out$F_data <- 1-exp(-cumsum(c(0, rep(ratetable[1,1,1], max(mod$time)-min(mod$time)))))
out$auc_data <- sum(out$F_data)/365.241
}
############ #
# SEPARATE YEARS FOR EVERY MEASURE:
############ #
if(measure %in% c('yl2017', 'yl2013')){
# YL_P preparation:
data_yi <- data
rform <- rformulate(formula, data, ratetable, na.action=na.action, rmap = rmap)
data <- rform$data
if(if_start_stop){
if(!(start_col %in% colnames(data))){
data[,start_col] <- data_orig[, start_col]
}
}
# Check covariates:
p <- rform$m
if (p > 0) stop("There shouldn't be any covariates in the formula. This function gives non-parametric estimates of the hazards.")
else data$Xs <- rep(1, nrow(data)) #if no covariates, just put 1
out_n <- table(data$Xs) #table of strata
out$time <- out$haz.excess <- out$haz.pop <- out$std.err <- out$strata <- NULL
kt <- 1 # the only stratum
inx <- which(data$Xs == names(out_n)[kt]) #individuals within this stratum
# tis <- sort(unique(rform$Y[inx])) #unique times
if(!if_start_stop){
tis <- rform$Y[inx] #unique times
tis_seq <- seq(0, max(rform$Y[inx]), precision)
} else{
tis <- sort(unique(c(rform$Y[inx], data[, start_col]))) #unique times
tis_seq <- seq(min(data[, start_col]), max(rform$Y[inx], data[, start_col]), precision)
}
if(!is.boot){
tis <- sort(unique(c(tis, tis_seq)))
}
if(!missing(add.times)){
tis <- sort(unique(c(tis, add.times)))
}
ltis <- length(tis)
# Fix demographic covariates:
if(if_start_stop){
rform$R[,"year"] <- rform$R[,"year"] - rform$R[,"age"]
rform$R[,"age"] <- 0
}
if(measure == 'yl2017'){
# YL_O (used only for yl2017):
if(if_start_stop){
it_auc <- rep(NA, nrow(data_orig))
mod_sum <- summary(mod, times=tis) # unique(sort(c(data_orig[,start_col], data_orig[,stop_col])))
lsurv <- length(mod_sum$surv)
val_mat <- matrix(0, nrow=nrow(data_orig), ncol=lsurv)
for(it in 1:nrow(data_orig)){
it_wh <- which(data_orig[it, start_col] == mod_sum$time)
it_surv <- mod_sum$surv[it_wh:lsurv]/mod_sum$surv[it_wh]
it_auc[it] <- sum(c(0, diff(mod_sum$time[it_wh:lsurv]))*(1 - it_surv))/365.241
val_mat[it, it_wh:lsurv] <- cumsum(c(0, diff(mod_sum$time[it_wh:lsurv]))*(1 - it_surv))/365.241
}
# spodaj <- mod_sum$n.risk + cumsum(mod_sum$n.event) + cumsum(mod_sum$n.censor)
YL_O_vec <- colMeans(val_mat) # colSums(val_mat)/spodaj
YL_O <- mean(it_auc)
F_O_time <- mod_sum$time
F_O_ext <- data.frame(time=F_O_time, area=YL_O_vec)
# Subset:
F_O_ext2 <- subset(F_O_ext, time %in% mod$time)
F_O_time <- F_O_ext2$time
YL_O_vec <- F_O_ext2$area
} else{
YL_O_vec <- out$auc_data_vec
YL_O <- out$auc_data
F_O_time <- mod$time
if(!(0 %in% F_O_time)){
F_O_time <- c(0, F_O_time)
YL_O_vec <- c(0, YL_O_vec)
}
# Prepare extended F_O object:
if(0 %in% mod$time){
F_O_temp <- data.frame(time=mod$time, surv=mod$surv)
} else{
F_O_temp <- data.frame(time=c(0, mod$time), surv=c(1, mod$surv))
}
F_O_ext <- data.frame(time=tis)
F_O_ext <- merge(F_O_ext, F_O_temp, by='time', all.x=TRUE)
F_O_ext$surv <- mstateNAfix(F_O_ext$surv, 0)
tis_diff <- diff(c(0, F_O_ext$time))
F_O_ext$area <- cumsum(tis_diff*(1 - F_O_ext$surv))/365.241
F_O_ext <- F_O_ext[,c('time', 'area')]
}
F_O <- data.frame(time=F_O_time, area=YL_O_vec)
###
# YL_P continue:
it_auc_P <- rep(NA, nrow(data))
it_auc_P_mat <- matrix(0, nrow=nrow(data), ncol=ltis)
for(it in 1:nrow(data)){
temp <- exp.prep(rform$R[it,,drop=FALSE],max(rform$Y),rform$ratetable,rform$status[it],times=tis,fast=FALSE, cmp=FALSE, ys=starting_age[it], netweiDM = FALSE)
if(if_start_stop){
it_wh <- which(data[it, start_col] == tis)
hazs <- temp$yidli[it_wh:ltis]
hazs[1] <- 0
cumhazs <- cumsum(hazs)
F_P <- 1 - exp(-cumhazs)
it_auc_P[it] <- sum(c(tis[it_wh], diff(tis[it_wh:ltis]))*c(0, F_P[1:(length(F_P)-1)]))/365.241
it_auc_P_mat[it,it_wh:ltis] <- cumsum(c(0, diff(tis[it_wh:ltis]))*c(0, F_P[1:(length(F_P)-1)]))/365.241
} else{
# it_wh <- which(data$age[it] == tis)
hazs <- temp$yidli[1:ltis]
hazs[1] <- 0
cumhazs <- cumsum(hazs)
F_P <- 1 - exp(-cumhazs)
it_auc_P[it] <- sum(c(0, diff(tis))*c(0, F_P[1:(length(F_P)-1)]))/365.241
it_auc_P_mat[it,] <- cumsum(c(0, diff(tis))*c(0, F_P[1:(length(F_P)-1)]))/365.241
}
}
YL_P <- mean(it_auc_P)
F_P <- data.frame(time=tis, area=colMeans(it_auc_P_mat))
yd_curve <- data.frame(time=tis, est=F_O_ext$area - F_P$area)
# Bootstrap:
if(bootstrap){
data_b <- data_orig
data_b$id <- 1:nrow(data_b)
yl_boot <- ylboot(theta=ylboot.iter, data=data_b, id="id",
B=B, verbose=0, #all_times = all_times,
ratetable=ratetable#, add.times=add.times
, starting.time, estimator, precision,
add.times = add.times,
formula = formula,
rmap = rmap, measure=measure
)
if(ncol(yl_boot[[2]])>nrow(F_O)){
varsincol <- colVars(yl_boot[[2]], na.rm=TRUE)^(1/2)
varsincol_df <- data.frame(time=yl_boot[[4]], area.se=varsincol)
varsincol_df <- varsincol_df[varsincol_df$time %in% F_O$time,]
F_O$area.se <- varsincol_df$area.se
} else{
F_O$area.se <- colVars(yl_boot[[2]], na.rm=TRUE)^(1/2)
}
F_P$area.se <- colVars(yl_boot[[3]], na.rm=TRUE)^(1/2)
yl_boot <- as.data.frame(t(yl_boot[[1]]))
yd_curve$est.se <- (colVars(yl_boot, na.rm=TRUE))^(1/2)
}
# Add CI:
if((!is.boot) & (!first.boot)){
if(!is.null(yd_curve$est.se)){
yd_curve$lower <- yd_curve$est - yd_curve$est.se*stats::qnorm(0.5+conf.int/2)
yd_curve$upper <- yd_curve$est + yd_curve$est.se*stats::qnorm(0.5+conf.int/2)
}
}
# Values to be reported:
if((!is.boot) & (!first.boot)){
if(if_start_stop){
# Report only at last time point - the values until this time are not suitable to report:
out <- list(years=utils::tail(yd_curve,1), F_O=utils::tail(F_O,1), F_P=utils::tail(F_P,1), measure=measure)
} else{
# Report full measures:
out <- list(years=yd_curve, F_O=F_O, F_P=F_P, measure=measure)
}
} else{
out <- list(years=yd_curve, F_O=F_O, F_P=F_P, measure=measure)
}
return(out)
} else{ # measure == 'yl2013'
temp <- exp.prep(rform$R[,,drop=FALSE],rform$Y[inx],rform$ratetable,rform$status,times=tis, fast=TRUE, cmp=FALSE, ys=starting_age)
temp$yi[temp$yi==0] <- Inf
# Calculate measures:
haz.pop <- temp$yidli/temp$yi
mod_tis <- summary(mod, times = tis)
F_E <- cumsum(mod_tis$surv*(mod_tis$n.event/mod_tis$n.risk - haz.pop))
ltis <- length(tis)
# To be checked, doesn't work ok
# # Var as in Pavlic2018:
# F_E_st <- sapply(1:ltis, function(s){
# (sum(mod_tis$surv[s:ltis]*(mod_tis$n.event[s:ltis]/mod_tis$n.risk[s:ltis] - haz.pop[s:ltis]))/mod_tis$surv[s]) # *c(0, diff(tis[s:ltis])) /365.241
# })
# # Klemnova:
# F_Ese <- (cumsum((mod_tis$surv)^2*(1 - F_E_st)^2*((mod_tis$n.event)/(mod_tis$n.risk^2))*c(0, diff(tis)))/365.241)^(1/2)
# surv_int <- rev(cumsum(rev(c(0, diff(tis))*c(1, mod_tis$surv[1:(length(mod_tis$surv)-1)])))/365.241)
#
# # Moja:
# F_E_int <- rev(cumsum(rev(c(0, diff(tis))*c(0, F_E[1:(length(F_E)-1)])))/365.241)
# F_Ese <- (cumsum((surv_int)^2*(1 - F_E_st)^2*((mod_tis$n.event)/(mod_tis$n.risk^2))*c(0, diff(tis)))/365.241)^(1/2)
#
# # Observed:
# F_Ese <- (cumsum(surv_int^2*((mod_tis$n.event)/(mod_tis$n.risk^2))*c(0, diff(tis)))/365.241)^(1/2)
#
# # Predlog glede na Andersen 2013:
# F_Ese <- (cumsum((surv_int^2*(mod_tis$n.event - temp$yidli) + F_E_int^2*temp$yidli)/(mod_tis$n.risk^2)*c(0, diff(tis)))/365.241)^(1/2)
# Calculate measures:
YL <- cumsum(F_E*c(0, diff(tis)))/365.241
F_E_area <- cumsum(c(0, diff(tis))*c(0, F_E[1:(length(F_E)-1)]))/365.241
F_E_df <- data.frame(time=tis, prob=F_E, area=F_E_area) # , prob.se=F_Ese
yd_curve <- data.frame(time=tis, est=YL)
# Bootstrap:
if(bootstrap){
data_b <- data_orig
data_b$id <- 1:nrow(data_b)
yl_boot <- ylboot(theta=ylboot.iter, data=data_b, id="id",
B=B, verbose=0, #all_times = all_times,
ratetable=ratetable#, add.times=add.times
, starting.time, estimator, precision,
add.times = add.times,
formula = formula,
rmap = rmap, measure=measure
)
# Calculate area.se:
area.se <- yl_boot[[2]]
for(itar in 1:nrow(yl_boot[[2]])){
prob_tmp <- as.vector(as.matrix(yl_boot[[2]][itar,]))
area_tmp <- cumsum(c(0, diff(tis))*c(0, prob_tmp[1:(length(prob_tmp)-1)]))/365.241
area.se[itar,] <- area_tmp
}
area.se <- as.vector(colVars(area.se, na.rm=TRUE))
F_E_df$prob.se <- (colVars(yl_boot[[2]], na.rm=TRUE))^(1/2)
F_E_df$area.se <- area.se
yl_boot <- as.data.frame(t(yl_boot[[1]]))
yd_curve$est.se <- (colVars(yl_boot, na.rm=TRUE))^(1/2)
}
if((!is.boot) & (!first.boot)){
if(!is.null(yd_curve$est.se)){
yd_curve$lower <- yd_curve$est - yd_curve$est.se*stats::qnorm(0.5+conf.int/2)
yd_curve$upper <- yd_curve$est + yd_curve$est.se*stats::qnorm(0.5+conf.int/2)
}
}
out <- list(years=yd_curve, F_E=F_E_df, measure=measure)
return(out)
}
} else{ # measure == 'yd'
################################################### #
# CIF on population:
data_yi <- data
rform <- rformulate(formula, data, ratetable, na.action=na.action, rmap = rmap)
data <- rform$data
if(if_start_stop){
if(!(start_col %in% colnames(data))){
data[,start_col] <- data_orig[, start_col]
}
}
# Check covariates:
p <- rform$m
if (p > 0) stop("There shouldn't be any covariates in the formula. This function gives non-parametric estimates of the hazards.")
else data$Xs <- rep(1, nrow(data)) #if no covariates, just put 1
out_n <- table(data$Xs) #table of strata
out$time <- out$haz.excess <- out$haz.pop <- out$std.err <- out$strata <- NULL
kt <- 1 # the only stratum
inx <- which(data$Xs == names(out_n)[kt]) #individuals within this stratum
if(!if_start_stop) tis <- sort(unique(c(rform$Y[inx], seq(0, max(rform$Y[inx]), precision)))) #unique times
else tis <- sort(unique(c(rform$Y[inx], data[, start_col], seq(min(data[, start_col]), max(rform$Y[inx], data[, start_col]), precision)))) #unique times
if(!missing(add.times)){
tis <- sort(unique(c(tis, add.times)))
}
# Fix demographic covariates:
if(if_start_stop){
rform$R[,"year"] <- rform$R[,"year"] - rform$R[,"age"]
rform$R[,"age"] <- 0
}
### #
# Greenwood Variance of area (not F):
# First prepare objects:
mod_gw <- summary(mod, times = tis)
gw_df <- data.frame(time=mod_gw$time, surv=mod_gw$surv, n.risk=mod_gw$n.risk, n.event=mod_gw$n.event)
# Then calculate:
times_all2 <- c(0, diff(gw_df$time))/365.241
surv_all <- c(1, gw_df$surv[1:(length(gw_df$surv)-1)])
auc_all <- cumsum(times_all2*surv_all)
area_var <- sapply(1:length(auc_all), function(x) {
numer <- gw_df$n.risk[1:x]*(gw_df$n.risk[1:x] - gw_df$n.event[1:x])
numer[numer==0] <- Inf
sum(((auc_all[x] - auc_all[1:x])^2*gw_df$n.event[1:x])/numer)
})
if(is.nan(area_var[length(area_var)])){
area_var[length(area_var)] <- area_var[length(area_var)-1]
}
### #
if(estimator=='F_P' | estimator=="all"){
# Prepare at-risk matrix:
# browser()
# mat <- lapply(1:nrow(data), function(x) ifelse((data$start[x] < tis) & (tis <= data$Y[x]), 1, NA))
# mat2 <- matrix(unlist(mat), nrow = nrow(data_yi), byrow = TRUE)
# # The sum of the individual at-risk processes:
# yi_left <- colSums(mat2)
# yi_left[yi_left == 0] <- Inf
#
# mat3 <- lapply(1:nrow(data), function(x) data$age[x] + c(0, diff(tis)))
if(any(rform$Y[inx]<=starting_age)) browser()
temp <- exp.prep(rform$R[inx,,drop=FALSE],rform$Y[inx],rform$ratetable,rform$status[inx],times=tis,fast=TRUE, cmp=FALSE, ys=starting_age)
# Fix at-risk process, if needed:
temp$yi[temp$yi==0] <- Inf
out$time <- c(out$time, tis) #add times
# Calculate hazards:
haz.pop <- temp$yidli/temp$yi
out$haz.pop <- c(out$haz.pop,haz.pop)
out$cum.haz.pop <- cumsum(out$haz.pop)
out$F_P <- 1-exp(-out$cum.haz.pop)
out$auc_pop <- sum(c(tis[1], diff(tis))*c(0, out$F_P[1:(length(out$F_P)-1)]))/365.241
}
data_spi2 <- data
if(estimator=='F_P_Spi2' | estimator=="all"){
if(any(data_spi2$start>=data_spi2$Y)) browser()
# Take into account censoring:
exp.surv2 <- nessie_spi(Surv(start, Y, stat)~1, data=data_spi2, ratetable=ratetable,
tis=tis, starting.time=starting.time, include.censoring = TRUE,
arg.example)
out$haz.pop.spi2 <- exp.surv2$yidli/exp.surv2$yi
out$cum.haz.pop.spi2 <- cumsum(out$haz.pop.spi2)
out$F_P_Spi2 <- 1-exp(-out$cum.haz.pop.spi2)
out$auc_pop_Spi2 <- sum(c(tis[1], diff(tis))*c(0, out$F_P_Spi2[1:(length(out$F_P_Spi2)-1)]))/365.241
}
if(estimator=='F_P_Spi' | estimator=="all"){
if(TRUE){ # (!missing(admin.cens)) - tega nimamo vec
data_spi2$stat <- 1
# data_spi2$stat[id_admin_cens] <- 0 # - tole ni bilo zakomentirano, ko smo imeli admin.cens
exp.surv <- nessie_spi(Surv(start, Y, stat)~1, data=data_spi2, ratetable=ratetable,
tis=tis, starting.time=starting.time, include.censoring = TRUE,
arg.example)
} else{
# Don't take into account censoring:
exp.surv <- nessie_spi(Surv(start, Y, stat)~1, data=data_spi2, ratetable=ratetable,
tis=tis, starting.time=starting.time, include.censoring = FALSE,
arg.example)
}
out$haz.pop.spi <- exp.surv$yidli/exp.surv$yi
out$cum.haz.pop.spi <- cumsum(out$haz.pop.spi)
out$F_P_Spi <- 1-exp(-out$cum.haz.pop.spi)
out$auc_pop_Spi <- sum(c(tis[1], diff(tis))*c(0, out$F_P_Spi[1:(length(out$F_P_Spi)-1)]))/365.241
}
if(estimator=='F_P_final'){
# Shift all to the end:
if(if_start_stop) data_yi[,stop_col] <- max(data_yi[,stop_col])
rform2 <- rform
rform <- rformulate(formula, data_yi, ratetable, na.action=na.action, rmap = rmap)
# Shift all to the end:
if(!if_start_stop){
rform$Y <- rep(max(rform$Y), length(rform$Y))
rform$data[,"Y"] <- rform$Y
}
data <- rform$data
if(if_start_stop){
if(!(start_col %in% colnames(data))){
data[,start_col] <- data_orig[, start_col]
}
}
# Check covariates:
p <- rform$m
if (p > 0) stop("There shouldn't be any covariates in the formula. This function gives non-parametric estimates of the hazards.")
else data$Xs <- rep(1, nrow(data)) #if no covariates, just put 1
out$haz.pop2 <- NULL
kt <- 1 # the only stratum
inx <- which(data$Xs == names(out_n)[kt]) #individuals within this stratum
# Fix demographic covariates:
if(if_start_stop){
rform$R[,"year"] <- rform$R[,"year"] - rform$R[,"age"]
rform$R[,"age"] <- 0
}
if(any(starting_age>=rform$Y[inx])) browser()
temp <- exp.prep(rform$R[inx,,drop=FALSE],rform$Y[inx],rform$ratetable,rform$status[inx],times=tis,fast=FALSE, cmp=FALSE, ys=starting_age, netweiDM = TRUE)
temp$sidliD[1] <- 0
# temp$sisD[1] <- 1
temp$sisD[temp$sisD==0] <- Inf
haz.pop2 <- temp$sidliD/temp$sisD
out$haz.pop2 <- c(out$haz.pop2, haz.pop2)
out$cum.haz.pop2 <- cumsum(out$haz.pop2)
out$F_P2 <- 1-exp(-out$cum.haz.pop2)
out$auc_pop2 <- sum(c(tis[1], diff(tis))*c(0, out$F_P2[1:(length(out$F_P2)-1)]))/365.241
out$sidli <- temp$sidli
out$sis <- temp$sis
# DODATEK:
haz.pop.ves.cas <- temp$sidli
haz.pop.ves.cas[1] <- 0
haz.pop.ves.cas <- haz.pop.ves.cas/temp$sis
out$cum.haz.pop.ves.cas <- cumsum(haz.pop.ves.cas)
out$F_P_ves_cas <- 1 - exp(-out$cum.haz.pop.ves.cas)
out$auc_pop_ves_cas <- sum(c(tis[1], diff(tis))*c(0, out$F_P_ves_cas[1:(length(out$F_P_ves_cas)-1)]))/365.241
}
if(estimator=='F_P2' | estimator=="all"){
# Shift all to the end:
if(if_start_stop) data_yi[,stop_col] <- max(data_yi[,stop_col])
rform2 <- rform
rform <- rformulate(formula, data_yi, ratetable, na.action=na.action, rmap = rmap)
# Shift all to the end:
if(!if_start_stop){
rform$Y <- rep(max(rform$Y), length(rform$Y))
rform$data[,"Y"] <- rform$Y
}
data <- rform$data
if(if_start_stop){
if(!(start_col %in% colnames(data))){
data[,start_col] <- data_orig[, start_col]
}
}
# Check covariates:
p <- rform$m
if (p > 0) stop("There shouldn't be any covariates in the formula. This function gives non-parametric estimates of the hazards.")
else data$Xs <- rep(1, nrow(data)) #if no covariates, just put 1
out$haz.pop2 <- NULL
kt <- 1 # the only stratum
inx <- which(data$Xs == names(out_n)[kt]) #individuals within this stratum
# Fix demographic covariates:
if(if_start_stop){
rform$R[,"year"] <- rform$R[,"year"] - rform$R[,"age"]
rform$R[,"age"] <- 0
}
if(any(starting_age>=rform$Y[inx])) browser()
# temp <- exp.prep(rform$R[inx,,drop=FALSE],rform$Y[inx],rform$ratetable,rform$status[inx],times=tis,fast=TRUE, cmp=FALSE, ys=0)
temp <- exp.prep(rform$R[inx,,drop=FALSE],rform$Y[inx],rform$ratetable,rform$status[inx],times=tis,fast=TRUE, cmp=FALSE, ys=starting_age)
# Fix at-risk process, if needed:
temp$yi[temp$yi==0] <- Inf
# Calculate hazards:
haz.pop2 <- temp$yidli/temp$yi
out$haz.pop2 <- c(out$haz.pop2, haz.pop2)
out$cum.haz.pop2 <- cumsum(out$haz.pop2)
out$F_P2 <- 1-exp(-out$cum.haz.pop2)
# out$auc_pop2 <- sum(c(tis[1], diff(tis))*out$F_P2)/365.241
out$auc_pop2 <- sum(c(tis[1], diff(tis))*c(0, out$F_P2[1:(length(out$F_P2)-1)]))/365.241
}
###
# Bootstrap:
if(bootstrap){
# browser()
data_b <- data_orig
data_b$id <- 1:nrow(data_b)
yl_boot <- ylboot(theta=ylboot.iter, data=data_b, id="id",
B=B, verbose=0, #all_times = all_times,
ratetable=ratetable#, add.times=add.times
, starting.time, estimator, precision,
add.times = add.times,
formula = formula,
rmap = rmap, measure=measure
)
L_OP <- yl_boot[[3]]
F_boot <- yl_boot[[2]]
yl_boot <- as.data.frame(t(yl_boot[[1]]))
}
###
estimator.orig <- estimator
if(estimator=='F_P_final') estimator = 'F_P2'
out$strata <- c(out$strata, length(tis)) #number of times in this strata
names(out$strata) <- names(out_n)
out$strata <- NULL
out$auc <- c(auc_data=out$auc_data, auc_pop=out$auc_pop, auc_pop2=out$auc_pop2, auc_pop_Spi=out$auc_pop_Spi, auc_pop_Spi2=out$auc_pop_Spi2)
if(estimator=='all'){
F_P_final <- data.frame(time=out$time,F_P=out$F_P, F_P2=out$F_P2, F_P_Spi=out$F_P_Spi, F_P_Spi2=out$F_P_Spi2)
} else if(estimator=='F_P'){
F_P_final <- data.frame(time=tis,prob=out$F_P)
} else if(estimator=='F_P2'){
F_P_final <- data.frame(time=tis,prob=out$F_P2)
} else if(estimator=='F_P_Spi'){
F_P_final <- data.frame(time=tis,prob=out$F_P_Spi)
} else if(estimator=='F_P_Spi2'){
F_P_final <- data.frame(time=tis,prob=out$F_P_Spi2)
}
# YD through time:
F_data_yd <- data.frame(time=mod$time, F_data=out$F_data)
pop.times <- F_P_final$time[!(F_P_final$time %in% mod$time)]
if(length(pop.times) > 0){
F_data_yd_tmp <- data.frame(time=pop.times, F_data=NA)
F_data_yd <- rbind(F_data_yd, F_data_yd_tmp)
F_data_yd <- F_data_yd[order(F_data_yd$time),]
F_data_yd$F_data <- mstateNAfix(F_data_yd$F_data, 0)
}
F_data_yd$var <- area_var
yd_data <- cumsum(c(F_data_yd$time[1], diff(F_data_yd$time))*c(0, F_data_yd$F_data[1:(nrow(F_data_yd)-1)]))/365.241
# Population part:
F_P_yd <- F_P_final
yd_pop <- cumsum(c(F_P_yd$time[1], diff(F_P_yd$time))*c(0, F_P_yd$prob[1:(nrow(F_P_yd)-1)]))/365.241
yd_curve <- data.frame(time=F_data_yd$time, yd=yd_data - yd_pop,
obs_var=F_data_yd$var,
# obs_var22=obs_var_time22,
yd_data=yd_data,
yd_pop=yd_pop
)
###
# Greenwood for prob:
greenwood_est <- (mod$surv^2*cumsum(mod$n.event/((mod$n.risk - mod$n.event)*mod$n.risk)))^(1/2)
# If Surv(t)=0 in the end, take the last var estimate:
if(any(rev(mod$surv)==0)){
greenwood_wh <- which(mod$surv==0)
greenwood_est[greenwood_wh] <- greenwood_est[greenwood_wh[1]-1]
}
F_data_tmp <- data.frame(time=mod$time,
prob=out$F_data,
prob.se=greenwood_est,
area=NA,
area.se=NA)
# Add values at time zero:
F_tmp <- F_data_tmp[1,]
F_tmp$time <- min(starting_age)
F_tmp$prob <- 0
F_tmp$prob.se <- 0
if(!(F_tmp$time %in% F_data_tmp$time)) F_data_tmp <- rbind(F_tmp, F_data_tmp)
if(!if_start_stop){
F_P_final_tmp <- F_P_final[1,]
F_P_final_tmp$time <- min(starting_age)
F_P_final_tmp$prob <- 0
if(!(F_P_final_tmp$time %in% F_P_final$time)) F_P_final <- rbind(F_P_final_tmp, F_P_final)
}
yd_curve_tmp <- yd_curve[1,]
yd_curve_tmp$time <- min(starting_age)
yd_curve_tmp[,2:ncol(yd_curve_tmp)] <- 0
if(!(yd_curve_tmp$time %in% yd_curve$time)) yd_curve <- rbind(yd_curve_tmp, yd_curve)
# Bootstrap:
if(bootstrap){
yd_curve$boot_var <- colVars(yl_boot, na.rm=TRUE)
if(late.values){
last_val <- utils::tail(yd_curve$boot_var[!is.na(yd_curve$boot_var)],1)
yd_curve$boot_var[is.na(yd_curve$boot_var)] <- last_val
}
yl_sd_boot <- stats::sd(yl_boot[, ncol(yl_boot)], na.rm=TRUE)
}
# Add areas:
F_data_tmp$area <- yd_curve$yd_data[yd_curve$time %in% F_data_tmp$time]
F_P_final$area <- yd_curve$yd_pop#[yd_curve$time %in% F_P_final$time]
F_data_tmp$area.se <- yd_curve$obs_var[yd_curve$time %in% F_data_tmp$time]^(1/2)
# If, add boot variance:
if(bootstrap & (!is.boot)){
F_data_tmp$prob.se <- (F_boot$F_data[F_boot$time %in% F_data_tmp$time])^(1/2)
F_P_final$prob.se <- (F_boot$F_P#[F_boot$time %in% F_P_final$time]
)^(1/2)
F_data_tmp$area.se <- L_OP$L_O[L_OP$time %in% F_data_tmp$time]^(1/2)
F_P_final$area.se <- L_OP$L_P^(1/2)
}
# Column order:
F_data_tmp <- F_data_tmp[, c('time', 'prob', 'area', 'prob.se', 'area.se')]
# Choose relevant columns:
if(bootstrap){
yd_curve <- yd_curve[,c('time', 'yd', 'boot_var')]
} else{
yd_curve <- yd_curve[,c('time', 'yd', 'obs_var')]
}
yd_curve[,3] <- yd_curve[,3]^(1/2)
colnames(yd_curve)[2:3] <- c('est', 'est.se')
yd_curve$lower <- yd_curve$est - yd_curve$est.se*stats::qnorm(0.5+conf.int/2)
yd_curve$upper <- yd_curve$est + yd_curve$est.se*stats::qnorm(0.5+conf.int/2)
return_obj <- list(F_data=F_data_tmp,
F_P=F_P_final,
auc=out$auc,
yd_curve=yd_curve,
starting.time=starting.time,
estimator=estimator.orig,
out=out
)
if(bootstrap){
return_obj[[length(return_obj)+1]] <- F_boot
names(return_obj)[length(return_obj)] <- 'F_boot'
return_obj[[length(return_obj)+1]] <- L_OP
names(return_obj)[length(return_obj)] <- 'L_OP'
return_obj <- append(return_obj, yl_sd_boot)
names(return_obj)[length(return_obj)] <- 'yl_sd_boot'
}
return_short <- list(years=return_obj$yd_curve, F_O=return_obj$F_data, F_P=return_obj$F_P, measure=measure)
if((bootstrap & (!is.boot)) #| ((!bootstrap) & (!is.boot))
){
return_obj <- return_short
}
if((!bootstrap) & (!is.boot)){
return_obj <- return_short
}
if(is.boot){
return_obj <- return_short
}
if(var.estimator=='none'){
return_obj$years <- return_obj$years[,1:2]
find_cols <- (!grepl('.se', colnames(return_obj[[2]])))
return_obj[[2]] <- return_obj[[2]][,find_cols]
if(length(return_obj)==4){
find_cols <- (!grepl('.se', colnames(return_obj[[3]])))
return_obj[[3]] <- return_obj[[3]][,find_cols]
}
}
return(return_obj)
}
}
utils::globalVariables(c("time", "prob", "Curve", "est", "lower", "upper"))
# Bootstrap function:
ylboot <- function(theta, data, B = 5, id = "id", verbose = 0,
#all_times,
ratetable=relsurv::slopop, #add.times,
starting.time, estimator, precision,
add.times,
formula,
rmap, measure,
...){
ids <- unique(data[, id])
n <- length(ids)
if(!missing(add.times)){
th <- ylboot.iter(formula, data, starting.time = starting.time, estimator = estimator, precision = precision,
ratetable=ratetable, first=TRUE, add.times = add.times, rmap = rmap, measure=measure, ...)
} else{
th <- ylboot.iter(formula, data, starting.time = starting.time, estimator = estimator, precision = precision,
ratetable=ratetable, first=TRUE, rmap = rmap, measure=measure, ...)
}
simple_par <- TRUE
if(missing(add.times)) simple_par <- FALSE
# Prepare objects:
res <- data.frame(matrix(NA, nrow=B, ncol=nrow(th[[1]])))
if(!missing(add.times)){
add.times <- sort(unique(c(th[[1]]$time, add.times)))
} else{
add.times <- th[[1]]$time
}
Fdata <- data.frame(matrix(NA, nrow=B, ncol=length(add.times)))
Fo <- data.frame(matrix(NA, nrow=B, ncol=nrow(th[[2]])))
Fp <- data.frame(matrix(NA, nrow=B, ncol=length(add.times)))
L_O <- data.frame(matrix(NA, nrow=B, ncol=length(add.times)))
L_P <- data.frame(matrix(NA, nrow=B, ncol=length(add.times)))
F_E <- data.frame(matrix(NA, nrow=B, ncol=length(add.times)))
# Iteration:
for (b in 1:B) {
nek_obj <- ylboot.apply(formula, b, verbose, ids, data, id, add.times, starting.time, estimator, precision, ratetable, th, simple_par, rmap, measure, ...)
res[b,1:length(nek_obj[[1]])] <- nek_obj[[1]]
if(measure=='yl2013'){
F_E[b,1:length(nek_obj[[2]])] <- nek_obj[[2]]
}
if(measure=='yl2017'){
Fo[b,1:length(nek_obj[[2]])] <- nek_obj[[2]]
Fp[b,1:length(nek_obj[[3]])] <- nek_obj[[3]]
}
if(measure=='yd'){
subnek <- subset(nek_obj[[2]], time %in% add.times)
sub_vec <- 1:nrow(subnek)
Fdata[b,sub_vec] <- subnek$F_data
Fp[b,sub_vec] <- subnek$F_P
subnek2 <- subset(nek_obj[[3]], time %in% add.times)
sub2_vec <- 1:nrow(subnek2)
L_O[b,sub2_vec] <- subnek2$yd_data
L_P[b,sub2_vec] <- subnek2$yd_pop
}
}
res <- as.data.frame(t(res))
if(measure == 'yl2013'){
return(list(res, F_E))
}
if(measure == 'yl2017'){
return(list(res, Fo, Fp, add.times))
}
else{
if (verbose)
cat("\n")
F_obj <- data.frame(time=add.times,
F_data=colVars(Fdata, na.rm = TRUE),
F_P=colVars(Fp, na.rm = TRUE))
L_OP <- data.frame(time=add.times,
L_O=colVars(L_O, na.rm = TRUE),
L_P=colVars(L_P, na.rm = TRUE))
return(list(res, F_obj, L_OP))
}
}
ylboot.apply <- function(formula, b, verbose, ids, data, id, add.times, starting.time, estimator, precision, ratetable, th, simple_par,
rmap, measure,
...){
if(starting.time=='left.truncated'){
start_col <- as.character(formula[[2]])[2]
stop_col <- as.character(formula[[2]])[3]
} else{
stop_col <- as.character(formula[[2]])[2]
}
if (verbose > 0) {
cat("\nBootstrap replication", b, "\n")
}
bootdata <- NULL
bids <- sample(ids, replace = TRUE)
bidxs <- unlist(sapply(bids, function(x) which(x ==
data[, id])))
bootdata <- data[bidxs, ]
if (verbose > 0) {
cat("applying theta ...")
}
if(length(unique(bootdata[,id]))==1){
next
}
if(!missing(add.times) & simple_par){
add.times.arg <- sort(unique(c(th[[1]]$time, add.times)))
} else{
add.times.arg <- th[[1]]$time
}
add.times.arg2 <- add.times.arg
# Remove unnecessary times
if(starting.time == 'left.truncated'){
add.times.arg <- add.times.arg[add.times.arg<=max(bootdata[,stop_col])]
} else{
add.times.arg <- add.times.arg[add.times.arg<=max(bootdata[,stop_col])]# - bootdata[,start_col])]
}
thstar <- ylboot.iter(formula, bootdata, starting.time = starting.time, estimator = estimator, precision = precision,
ratetable=ratetable, add.times=add.times.arg, rmap=rmap, measure=measure, ...)
if(measure == 'yl2013'){
return(list(thstar[[1]]$est, thstar[[2]]$prob))
}
if(measure == 'yl2017'){
FoO <- thstar[[2]]
FpP <- thstar[[3]]
thstar <- thstar[[1]]
# if(nrow(th[[1]]) != nrow(thstar)) browser()
if(nrow(FoO) < nrow(th[[2]])){
mis.tajms <- th[[2]]$time[!(th[[2]]$time %in% FoO$time)]
mis.tajms <- mis.tajms[mis.tajms <= max(FoO$time)]
temp_df <- data.frame(time=mis.tajms, area=NA)
FoO <- rbind(FoO, temp_df)
FoO <- FoO[order(FoO$time),]
FoO$area <- mstateNAfix(FoO$area, 0)
}
if(nrow(th[[1]]) < nrow(thstar)){
thstar <- thstar[thstar$time %in% th[[1]]$time, ]
FpP <- FpP[FpP$time %in% th[[1]]$time, ]
foO <- foO[foO$time %in% th[[1]]$time, ]
}
if(length(th[[1]]$time[th[[1]]$time <= max(thstar$time)]) != length(thstar$time)) browser()
pogoj <- any(th[[1]]$time[th[[1]]$time <= max(thstar$time)] != thstar$time)
if(pogoj){
missing_times <- th[[1]]$time[which(!(th[[1]]$time %in% thstar$time))]
if(length(missing_times)>0){
# There are times missing in thstar, add them:
add_df <- thstar[1:length(missing_times),]
add_df$time <- missing_times
add_df$yd <- NA
add_df$obs_var <- NA
add_df$yd_data <- NA
thstar <- rbind(thstar, add_df)
thstar <- thstar[order(thstar$time),] # redundantno
thstar$yd <- mstateNAfix(thstar$yd, 0)
thstar$obs_var <- mstateNAfix(thstar$obs_var, 0)
thstar$yd_data <- mstateNAfix(thstar$yd_data, 0)
if(nrow(th[[1]]) < nrow(thstar)){
thstar <- thstar[thstar$time %in% th[[1]]$time, ]
}
if(nrow(th[[1]]) != nrow(thstar)) browser()
} else{
# This means there's more times in thstar than needed. Remove unnecessary times:
thstar <- thstar[-which(!(thstar$time %in% th[[1]]$time)),]
FpP <- FpP[-which(!(FpP$time %in% th[[1]]$time)),]
foO <- foO[-which(!(foO$time %in% th[[1]]$time)),]
if(nrow(th[[1]]) != nrow(thstar)) browser()
}
}
return(list(thstar$est, FoO$area, FpP$area))
}
L_OP <- thstar[[3]]
Fobj <- thstar[[2]]
thstar <- thstar[[1]]
if(nrow(th[[1]]) < nrow(thstar)){
thstar <- thstar[thstar$time %in% th[[1]]$time, ]
L_OP <- L_OP[L_OP$time %in% th[[1]]$time, ]
Fobj <- Fobj[Fobj$time %in% th[[1]]$time, ]
}
# Ali kaksne vrednosti manjkajo:
if(length(th[[1]]$time[th[[1]]$time <= max(thstar$time)]) != length(thstar$time)) browser()
pogoj <- any(th[[1]]$time[th[[1]]$time <= max(thstar$time)] != thstar$time)
if(pogoj){
missing_times <- th[[1]]$time[which(!(th[[1]]$time %in% thstar$time))]
if(length(missing_times)>0){
# There are times missing in thstar, add them:
add_df <- thstar[1:length(missing_times),]
add_df$time <- missing_times
add_df$yd <- NA
add_df$obs_var <- NA
add_df$yd_data <- NA
thstar <- rbind(thstar, add_df)
thstar <- thstar[order(thstar$time),] # redundantno
thstar$yd <- mstateNAfix(thstar$yd, 0)
thstar$obs_var <- mstateNAfix(thstar$obs_var, 0)
thstar$yd_data <- mstateNAfix(thstar$yd_data, 0)
if(nrow(th[[1]]) < nrow(thstar)){
thstar <- thstar[thstar$time %in% th[[1]]$time, ]
}
if(nrow(th[[1]]) != nrow(thstar)) browser()
} else{
# This means there's more times in thstar than needed. Remove unnecessary times:
thstar <- thstar[-which(!(thstar$time %in% th[[1]]$time)),]
L_OP <- L_OP[-which(!(L_OP$time %in% th[[1]]$time)),]
Fobj <- Fobj[-which(!(Fobj$time %in% th[[1]]$time)),]
if(nrow(th[[1]]) != nrow(thstar)) browser()
}
}
# thstar$b <- b
# Save result:
# res[b,] <-
list(thstar$est, Fobj, L_OP)
}
ylboot.iter <- function(formula, data, #all_times,
starting.time, estimator, precision,
ratetable=relsurv::slopop,
first=FALSE, add.times,
rmap, measure
){
if(!missing(rmap)) rmap <- as.call(rmap)
if(first){
is.boot <- FALSE
first.boot <- TRUE
} else{
is.boot <- TRUE
first.boot <- FALSE
}
# Round, if needed:
tolerance <- 1e-15
if(missing(add.times)){
object <- years(formula = formula, data = data, ratetable = ratetable,
precision=precision, var.estimator='greenwood', is.boot=is.boot, first.boot = first.boot, rmap = rmap, measure=measure)
# estimator = estimator,
} else{
object <- years(formula = formula, data = data, ratetable = ratetable,
precision=precision, var.estimator='greenwood', add.times=add.times, is.boot=is.boot, first.boot = first.boot, rmap = rmap, measure=measure)
# estimator = estimator,
}
if(measure=='yd'){
if(first) return(list(object$years, object$F_O))
else{
# return(object$yd_curve)
Fobj <- merge(object$F_P[,c('time','prob')], object$F_O[,c('time','prob')], by='time', all.x=TRUE)
Fobj <- Fobj[,c(1,3,2)]
colnames(Fobj)[2:3] <- c('F_data','F_P')
L_OP <- merge(object$F_P[,c('time','area')], object$F_O[,c('time','area')], by='time', all.x = TRUE)
L_OP <- L_OP[,c(1,3,2)]
colnames(L_OP)[2:3] <- c('yd_data', 'yd_pop')
return(list(object$years,
Fobj,
L_OP))
}
} else if(measure=='yl2013'){
return(list(object$years, object$F_E))
} else{
return(list(object$years, object$F_O, object$F_P))
}
}
plot.helper <- function(years, obj){
df_poly <- data.frame(time=years[[obj]]$time/365.241,
prob=years[[obj]]$prob)
df_st <- df_poly[1,]
df_st$prob <- 0
df_end <- df_poly[nrow(df_poly),]
df_end$prob <- 0
df_poly <- rbind(df_st, df_poly, df_end)
df_poly
}
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
grDevices::hcl(h = hues, l = 65, c = 100)[1:n]
}
#' Plot the absolute risk (observed and population curve)
#'
#' Plots the estimated observed and population curve for the
#' life years difference (Manevski, Ruzic Gorenjec, Andersen, Pohar Perme, 2022).
#'
#' A ggplot2 implementation for plotting the observed and population curves. The type of curves is
#' dependent upon the measure calculated using \code{years} function (argument \code{measure}).
#' @param years the object obtained using function \code{years}.
#' @param xlab a title for the x axis.
#' @param ylab a title for the y axis.
#' @param xbreak the breaks on the x axis (this is supplied to \code{scale_x_continuous}).
#' @param ybreak the breaks on the y axis (this is supplied to \code{scale_y_continuous}).
#' @param xlimits define the limits on the x axis (this is supplied to \code{scale_x_continuous}).
#' @param ylimits define the limits on the y axis (this is supplied to \code{scale_y_continuous}).
#' @param show.legend if TRUE, the legend is shown on the graph.
#' @return A ggplot object
#' @seealso \code{\link{years}}, \code{\link{plot_years}}
#'
plot_f <- function(years, xlab='Time interval', ylab='Absolute risk', xbreak, ybreak, xlimits, ylimits, show.legend=TRUE){
# years: object given from the years() function
# xlab: define xlab
# ylab: define ylab
# xbreak: The breaks on x axis
# ybreak: The breaks on y axis
# xlimits: Define the limits on the x axis
# ylimits: Define the limits on the y axis
# show.legend: TRUE by default (shows the legend)
# Checks:
if(years$measure != 'yd'){
stop("The plot_f function is available only for the YD measure (argument measure='yd' in the years function).")
}
out <- rbind(
cbind(years$F_O[,c('time', 'prob')], Curve='Observed'),
cbind(years$F_P[,c('time', 'prob')], Curve='Population')
)
if(missing(xlimits)){
xlimits <- c(min(out$time), max(out$time))/365.241
}
if(missing(ylimits)){
ylimits <- c(0,max(out$prob))*1.1
}
colorji <- gg_color_hue(3)
colorji <- colorji[c(1,3)]
g <- ggplot2::ggplot(out)+
ggplot2::geom_step(ggplot2::aes(time/365.241, prob, color=Curve)#, size=1.001
)+
ggplot2::scale_color_manual(values=colorji)+
ggplot2::xlab(xlab)+
ggplot2::ylab(ylab)
poly_data <- plot.helper(years, 'F_O')
poly_P <- plot.helper(years, 'F_P')
g <- g+
pammtools::geom_stepribbon(ggplot2::aes(x=time/365.241, ymin=0, ymax=prob, fill=Curve), alpha=0.3, linetype='dashed')+
ggplot2::scale_fill_manual(values = colorji)
if(!missing(xbreak)){
g <- g +
ggplot2::scale_x_continuous(expand = c(0, 0), limits=xlimits, breaks = xbreak)
} else{
g <- g +
ggplot2::scale_x_continuous(expand = c(0, 0), limits=xlimits)
}
if(!missing(ybreak)){
g <- g +
ggplot2::scale_y_continuous(expand = c(0, 0), limits=ylimits, breaks = ybreak)
} else{
g <- g +
ggplot2::scale_y_continuous(expand = c(0, 0), limits=ylimits)
}
g <- g +
ggplot2::theme_bw()+
ggplot2::theme(legend.position = 'bottom',
legend.title = ggplot2::element_blank())+
ggplot2::theme(text = ggplot2::element_text(size=14))+
ggplot2::theme(
panel.grid.major.x = ggplot2::element_line(linetype='dashed', colour = 'grey85'),
panel.grid.minor.x = ggplot2::element_line(linetype='dashed', colour = 'grey85'),
panel.grid.major.y = ggplot2::element_line(linetype='dashed', colour = 'grey85'),
panel.grid.minor.y = ggplot2::element_line(linetype='dashed', colour = 'grey85'))
if(!show.legend){
g <- g +
ggplot2::theme(legend.position = 'none')
}
g
}
#' Plot the years measure
#'
#' Plot the years measure obtained from the \code{years} function.
#'
#' A ggplot2 implementation for plotting the years measure. The type of curve is
#' dependent upon the measure calculated using the \code{years} function (argument \code{measure}).
#' @param years the object obtained using function \code{years}.
#' @param xlab a title for the x axis.
#' @param ylab a title for the y axis.
#' @param xbreak the breaks on the x axis (this is supplied to \code{scale_x_continuous}).
#' @param ybreak the breaks on the y axis (this is supplied to \code{scale_y_continuous}).
#' @param xlimits define the limits on the x axis (this is supplied to \code{scale_x_continuous}).
#' @param ylimits define the limits on the y axis (this is supplied to \code{scale_y_continuous}).
#' @param conf.int if TRUE, the confidence interval is plotted.
#' @param ymirror mirror the y values (w.r.t. the x axis).
#' @param yminus use function y -> -y when plotting.
#' @return A ggplot object
#' @seealso \code{\link{years}}, \code{\link{plot_f}}
#'
plot_years <- function(years, xlab='Time interval', ylab='Years', xbreak, ybreak, xlimits, ylimits, conf.int=FALSE, ymirror=FALSE, yminus=FALSE){
out <- years$years
if(conf.int){
if(is.null(out$lower)){
stop('Confidence intervals not present in the years object. Please set conf.int=FALSE or use the var.estimator argument in the years function.')
}
}
if(years$measure=='yl2017' & nrow(out)==1){
stop('The years measure is reported at the end of follow-up thus it is not plotted.')
}
if(yminus){
out$est <- -out$est
if(!is.null(out$lower)){
tmp_lower <- out$lower
out$lower <- -out$upper
out$upper <- -tmp_lower
}
}
if(missing(xlimits)){
xlimits <- c(min(out$time[1]), max(out$time))/365.241
}
if(missing(ylimits)){
tmp_vec <- out$est
if(!is.null(out$lower)) tmp_vec <- c(out$est, out$lower, out$upper)
ymax <- max(tmp_vec)
ymin <- min(tmp_vec)
ylimits <- c(ymin,ymax)*1.1
}
g <- ggplot2::ggplot(out)+
ggplot2::geom_step(ggplot2::aes(time/365.241, est)#, size=1.001
)
if(conf.int){
g <- g+
ggplot2::geom_step(ggplot2::aes(time/365.241, lower), linetype='dashed')+
ggplot2::geom_step(ggplot2::aes(time/365.241, upper), linetype='dashed')
}
g <- g+
ggplot2::xlab(xlab)+
ggplot2::ylab(ylab)
if(!missing(xbreak)){
g <- g+
ggplot2::scale_x_continuous(expand = c(0, 0), limits=xlimits, breaks = xbreak)
} else{
g <- g+
ggplot2::scale_x_continuous(expand = c(0, 0), limits=xlimits)
}
# Helper:
trans <- function(x) -x
inv <- function(x) -x
reverse_fun <- scales::trans_new(name = "reverse_new",
transform = trans,
inverse = inv
)
if(!missing(ybreak)){
g <- g +
ggplot2::scale_y_continuous(expand = c(0, 0), limits = ylimits, breaks = ybreak)
} else{
g <- g +
ggplot2::scale_y_continuous(expand = c(0, 0), limits = ylimits)
}
if(ymirror){
g <- g +
ggplot2::coord_trans(y=reverse_fun)
}
g <- g +
ggplot2::theme_bw()+
ggplot2::theme(text = ggplot2::element_text(size=14))+
ggplot2::expand_limits(y = 0)+
ggplot2::theme(
panel.grid.major.x = ggplot2::element_line(linetype='dashed', colour = 'grey85'),
panel.grid.minor.x = ggplot2::element_line(linetype='dashed', colour = 'grey85'),
panel.grid.major.y = ggplot2::element_line(linetype='dashed', colour = 'grey85'),
panel.grid.minor.y = ggplot2::element_line(linetype='dashed', colour = 'grey85'))
g
}
|
8eda172e46ea238e35239298136d63a84bfa8667
|
6cbc6e80ae07b8fb1fff0a5cad4ddcd29c358c0a
|
/R/ezr.crosstab.R
|
7d8a2f3d95777ef2b1e7e91ce73ad948098f5c54
|
[] |
no_license
|
lenamax2355/easyr
|
d99638b84fd9768774fa7ede84d257b10e0bacf6
|
37ab2fe5c28e83b9b5b3c0e3002f2df45708016b
|
refs/heads/master
| 2022-01-09T20:43:17.801623
| 2019-05-13T02:49:48
| 2019-05-13T02:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,172
|
r
|
ezr.crosstab.R
|
#' Title Cross Tab (Table)
#'
#' Generate a cross table of the fields you are interested in. Input of fields should be with names in quotes or as a result of a dplyr pipe similar to mtcars %>% select(vs). dataset$field is not supported
#'
#' @param dataset Dataframe
#' @param row_field Ideally place the grouping variable here.
#' @param column_field Ideally place the target or outcome interested in comparinghere.
#' @param percent_denominator Valid values are 'row','all','col'. What is the demoninator for percents? Row is default so place target in column.
#' @param rounding_digits Default is 0 so whole percents are shown
#' @param add_totals Add the totals? Default is true.
#' @param numerical_breaks N breaks for numerical values
#' @param round_breaks Digits to round numerical values to.
#' @param style Valid values are 'equal','pretty','quantile','percentile'. Default is 'equal'
#' @param position Default is front. Determines what is placed in the ()s
#'
#' @return Returns the cross tab results...
#'
#' @examples ezr.crosstab(mtcars, mtcars %>% select(cyl), mtcars %>% select(vs))
#' ezr.crosstab(mtcars, 'cyl', 'vs')
ezr.crosstab = function(dataset, row_field, column_field, percent_denominator='row', rounding_digits=0, position = 'front', add_totals=TRUE, numerical_breaks = 5, round_breaks=0, style='equal'){
library(janitor)
# converts df %>% select(...)
# if (is.data.frame(row_field)==TRUE){
# row_field = names(row_field)
# }
#
# if (is.data.frame(column_field)==TRUE){
# column_field = names(column_field)
# }
if(is.numeric(dataset[[column_field]])==TRUE & dataset[[row_field]] %>% dplyr::n_distinct() > numerical_breaks){
dataset=ezr.add_bins(dataset = dataset, style = 'equal',n_breaks = numerical_breaks, round_breaks = round_breaks,
column = column_field)
column_field=paste0('bucket_',column_field)
}
if(is.numeric(dataset[[row_field]])==TRUE & dataset[[row_field]] %>% dplyr::n_distinct() > numerical_breaks){
dataset=ezr.add_bins(dataset = dataset, style = 'equal',n_breaks = numerical_breaks, round_breaks = round_breaks,
column = row_field)
row_field=paste0('bucket_',row_field)
}
if(add_totals==TRUE){
result = dataset %>% janitor::tabyl(!!rlang::sym(row_field), !!rlang::sym(column_field),show_missing_levels=TRUE) %>% janitor::adorn_totals(where=c('row','col'),na.rm=FALSE) %>% janitor::adorn_percentages(denominator = percent_denominator, na.rm = FALSE) %>% adorn_pct_formatting(rounding = "half up", digits = rounding_digits) %>% janitor::adorn_ns(position = position)} else {
result = dataset %>% janitor::tabyl(!!rlang::sym(row_field), !!rlang::sym(column_field),show_missing_levels=TRUE) %>% janitor::adorn_percentages(denominator = percent_denominator, na.rm = FALSE) %>% adorn_pct_formatting(rounding = "half up", digits = rounding_digits) %>% janitor::adorn_ns(position = position)
}
print(paste0('Row counts are: ', toupper(row_field), '...... Columns counts are: ', toupper(column_field)))
return(result)
}
|
2c4cb3831c164941db7fbc6250df87d9455a65b9
|
912ce0acd46b9c077abc0c1106aaf9acaca1d126
|
/archive/Archive151004/pytsop/docks_csvs_refresh.R
|
28b7e2f53816a1a71e06791ff839d598f1718757
|
[] |
no_license
|
kerazarek/TaylorLab
|
3cafe4ae514defe541d98d28cfc307bf8bfcd0ca
|
b2230bb3f209ad0fe5f60e229cef19c69bbc02b5
|
refs/heads/master
| 2021-01-18T23:50:17.937434
| 2016-09-22T05:18:10
| 2016-09-22T05:18:10
| 43,645,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,477
|
r
|
docks_csvs_refresh.R
|
# IN
# docks_read <- "~/lab/Docking/docks.xlsx"
home_dir <- commandArgs(trailingOnly = TRUE)
# home_dir <- "~/lab"
docking <- paste(sep = "/", home_dir, "Docking")
docks_read <- paste(sep = "/", docking, "docks.xlsx" )
# docks_read
# OUT
docks_csvs <- paste(sep = "/", docking, "docks_csvs" )
docks_ligsets_csv <- paste(sep = "/", docks_csvs, "docks_ligsets.csv")
docks_gridboxes_csv <- paste(sep = "/", docks_csvs, "docks_gridboxes.csv")
docks_pdbs_csv <- paste(sep = "/", docks_csvs, "docks_pdbs.csv")
docks_p300_csv <- paste(sep = "/", docks_csvs, "docks_p300.csv")
docks_hepi_csv <- paste(sep = "/", docks_csvs, "docks_hepi.csv")
# library(xlsx)
require(xlsx)
docks_ligsets <- read.xlsx(docks_read, sheetName = "ligsets", colIndex = 1:5)
docks_gridboxes <- read.xlsx(docks_read, sheetName = "gridboxes", colIndex = 1:11)
docks_pdbs <- read.xlsx(docks_read, sheetName = "pdbs", colIndex = 1:6)
docks_p300 <- read.xlsx(docks_read, sheetName = "p300", colIndex = 1:12)
docks_hepi <- read.xlsx(docks_read, sheetName = "hepi", colIndex = 1:12)
write.csv(docks_ligsets, file = docks_ligsets_csv)
write.csv(docks_gridboxes, file = docks_gridboxes_csv)
write.csv(docks_pdbs, file = docks_pdbs_csv)
write.csv(docks_p300, file = docks_p300_csv)
write.csv(docks_hepi, file = docks_hepi_csv)
# print(docks_p300)
# if (dock == "p0") {
# dock_sheet <- docks_p300
# }
#
# dock_docks <- dock_sheet["DOCK"]
# # print(dock_docks)
#
# # grep(dock, dock_docks)
# print(dock)
# cbind
|
e50e9a1b9010fd126bbd5f4be59e0b80f5b25756
|
d4cd3909b5c5ff996e405a9dbcdb830a9f18599f
|
/consumer index.R
|
1e67502eeb422b96c764410ce5157b228e43fda8
|
[] |
no_license
|
jevzimite/Projects.compiled
|
6bb39df27ed44871c240fea4408967248f76293d
|
df1fdcaa12bf8d339a2ca782e28c425a44c12409
|
refs/heads/main
| 2023-05-01T13:30:24.549058
| 2021-05-25T22:11:07
| 2021-05-25T22:11:07
| 332,641,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,347
|
r
|
consumer index.R
|
#
library(readxl)
basket <- read_excel("~/Downloads/basket.xlsx")
View(basket)
cln<-c("bathroom","shampoo","deodorant", "toothpaste")
colnames(basket)<- cln
rwn<-c("April","May", "June", "July", "August")
rownames(basket)<- rwn
one2five<-c(1,2,3,4,5)
basket
# months<-c("Apr-20", "May-20", "Jun-20", "Jul-20", "Aug-20")
library(ggplot2)
ggplot(data=basket)+
geom_jitter(mapping=aes(x=rwn, y=bathroom), color="darkorange3")+
geom_smooth(mapping=aes(x=one2five, y=bathroom),method="lm", alpha=0, size=.3, linetype=5, color="darkorange3")+
geom_jitter(mapping=aes(x=one2five, y=shampoo), color="firebrick4")+
geom_smooth(mapping=aes(x=one2five, y=shampoo),method="lm", alpha=0, size=.3, linetype=5, color="firebrick4")+
geom_jitter(mapping=aes(x=one2five, y=deodorant), color="deepskyblue4")+
geom_smooth(mapping=aes(x=one2five, y=deodorant),method="lm", alpha=0, size=.3, linetype=5, color="deepskyblue4")+
geom_jitter(mapping=aes(x=one2five, y=toothpaste), color="palegreen4")+
geom_smooth(mapping=aes(x=one2five, y=toothpaste),method="lm", alpha=0, size=.3, linetype=5, , color="palegreen4")+
geom_vline(xintercept = 0)+ geom_hline(yintercept = 0)+
ggtitle("Price changes across April-August (COVID)")+
labs(x="Months", y="Price ($)")+
theme(panel.background = element_rect(fill = 'white', colour = 1))
|
0aaf905aaf5dc5bb2837e3e554a9bc4183170e8e
|
698c0d49aa96238fed1d46f9ac061c5c0ed7ce9e
|
/Data & Codes/Pairs Trading Strategy/Simulation.R
|
e59a15f7603373eec9971d025a69403a61b0b434
|
[] |
no_license
|
Timmer13/Trading_Strategy_Cryptocurrency
|
4b309996471cd71853efd6e7eaac3c078e4d1016
|
f36e1abef74a34b4c0fc8aee329895aea670273e
|
refs/heads/master
| 2020-05-14T16:48:18.705152
| 2019-04-17T11:44:09
| 2019-04-17T11:44:09
| 181,879,131
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,254
|
r
|
Simulation.R
|
setwd("~/Documents/MATH_5010/project")
library("ggplot2")
library(reshape2)
ETH <- read.csv("ETH.csv")
BTC <- read.csv("BTC.csv")
LTC <- read.csv("LTC.csv")
XRP <- read.csv("XRP.csv")
PETH <- ETH[,"close"]
PBTC <- BTC[,"close"]
PLTC <- LTC[,"close"]
PXRP <- XRP[,"close"]
GBM <- function(mu,s,d,z){#stochastic method
return(mu*d+s*sqrt(d)*z)
}
nGBM <- function(mu,s,z){#nonstochastic method
return(mu+s*z)
}
data1 <- PBTC
data2 <- PETH
r1 <- c()
for(i in 1:(length(data1)-1)){
r1 <- append(r1,log(data1[i+1]/data1[i]))
}#calculate log return
r2 <- c()
for(i in 1:(length(data2)-1)){
r2 <- append(r2,log(data2[i+1]/data2[i]))
}
mu1 <- mean(r1)
sigma1 <- sd(r1)
mu2 <- mean(r2)
sigma2 <- sd(r2)
T <- 250 #total time
delta = 1 #step size
h <- T/delta #num of steps
n=100 #num of paths
x1 <- matrix(nrow = n,ncol = h+1)
x2 <- matrix(nrow = n,ncol = h+1)
for (i in 1:h+1){#simulate future log return
for(j in 1:n){
zz <- rnorm(1)
x1[j,i] <- GBM(mu = mu1,s = sigma1, d = delta, z=zz) #simulate using stochastic method w/ same z
x2[j,i] <- GBM(mu = mu2,s = sigma2, d = delta, z=zz) #simulate using stochastic method w/ same z
}
}
sim1 <- matrix(rep(data1[length(data1)],n*(h+1)),nrow = n, ncol = h+1)#create a matrix w/ init. price
sim2 <- matrix(rep(data2[length(data2)],n*(h+1)),nrow = n, ncol = h+1)#create a matrix w/ init. price
for(i in 1:n){
for(j in 2:h+1){
sim1[i,j] <- sim1[i,j-1]*exp(x1[i,j]) #get back price using log return
sim2[i,j] <- sim2[i,j-1]*exp(x2[i,j]) #get back price using log return
}
}
ndata1 <- matrix(rep(data1,n),byrow = TRUE, nrow = n) #replicate original price for n paths
total1 <- as.matrix(data.frame(ndata1,sim1)) #put original price and simulation together
rownames(total1) <- NULL
colnames(total1) <- NULL
#total 1 is the total price movement+simulated price for data 1
ndata2 <- matrix(rep(data2,n),byrow = TRUE, nrow = n) #replicate original price for n paths
total2 <- as.matrix(data.frame(ndata2,sim2)) #put original price and simulation together
rownames(total2) <- NULL
colnames(total2) <- NULL
#total 2 is the total price movement+simulated price for data 2
sim_BTC <- total1[,2001:ncol(total1)] #take out only simulated values
sim_ETH <- total2[,2001:ncol(total2)]
# Define functions
find.closetime <- function(St, close.target = 0){
#Find all possible close time
close <- c()
for(i in 1:(length(St)-1)){
if(St[i]<=close.target && St[i+1]>=close.target){
close <- append(close,i)
}
if(St[i]>=close.target && St[i+1]<=close.target){
close <- append(close,i)
}
}
return(close)
}
find.low.opentime <- function(St, q = 0){
#Find all possible open(low) position
lopen <- c()
for(i in 1:length(St)){
if(St[i]< -q){
lopen <- append(lopen,i)
}
}
return(lopen)
}
find.high.opentime <- function(St, q = 0){
#Find all possible open(high) position
hopen <- c()
for(i in 1:length(St)){
if(St[i]> q){
hopen <- append(hopen,i)
}
}
return(hopen)
}
profit <- function(St, lopen, hopen, close, i = 1, p = 0, n = 0, coinA, coinB){
while(i < length(St)){
if(i %in% lopen & n==0){ #A overvalued
A <- -1 #Short 1 share of A
B <- coinA[i]/coinB[i] #Long gamma share of B
n <- 1 #Change position
}
if(i %in% hopen & n==0){ #B overvalued
B <- -1 #Short 1 share of B
A <- coinB[i]/coinA[i] #Long gamma share of A
n <- 1 #Change position
}
if(i %in% close & n==1){
p <- p +A*coinA[i] + B*coinB[i]
n <- 0
}
i=i+1
if (i == length(St) & n==1){
p <- p +A*coinA[i] + B*coinB[i]
}
}
return(p)
}
q_var <- matrix(nrow = 100, ncol = 21) #100 paths, 20 possible threshholds
for(i in 1:100){
St <- -lm(log(sim_BTC[i,])~log(sim_ETH[i,]))$residual
close.target <- 0
close <- find.closetime(St = St, close.target = close.target)
threshold <- seq(min(St),max(St),by=abs(max(St)-min(St))/20)
for(t in 1:length(threshold)){
lopen <- find.low.opentime(St = St, q = threshold[t])
hopen <- find.high.opentime(St = St, q = threshold[t])
p <- profit(St = St, lopen = lopen, hopen = hopen, close = close,
i = 1, p = 0, n = 0, coinA = PETH, coinB = PBTC)
q_var[i,t] <- p
}
}
plot(q_var[1,],type = "l") #sample test for first pair of simulated data
rownames(q_var) <- NULL
colnames(q_var) <- NULL
#plot to test threshhold(ignore the actual threshhold value)
q<-melt(as.matrix(q_var))
q$rowid <- 1:100
ggplot(q, aes(Var2, value, group=factor(rowid))) +
geom_line(aes(color=factor(rowid))) +
labs(colour = "Sample",x="threshhold", y="profit",title = "Test for different threshhold")
max <- apply(q_var[, 1:21], 1, max)
mean(max)
#plot of data 1 price movement
data1_sim<-melt(as.matrix(total1))
data1_sim$rowid <- 1:100
ggplot(data1_sim, aes(Var2, value, group=factor(rowid))) +
geom_line(aes(color=factor(rowid)), show.legend=F) +
labs(x="Time",title = "Simulated BTC price")
#plot of data 2 price movement
data2_sim<-melt(as.matrix(total2))
data2_sim$rowid <- 1:100
ggplot(data2_sim, aes(Var2, value, group=factor(rowid))) +
geom_line(aes(color=factor(rowid)), show.legend=F) +
labs(colour = "Sample",x="Time",title = "Simulated ETH price")
|
e3f3e07e7929a9dda24d5a224dffa99b3d3bb809
|
c47a9dab242120ea05ad8877af7c7efdec48dcb8
|
/egssimtools/man/read_arch_network.Rd
|
9fc32334a76e828e72df55d1291a3a121779e55f
|
[] |
no_license
|
rscherrer/ExplicitGenomeSpeciation
|
4ee1b8fdf3c19a085d750e73ce94ae4f2b25a070
|
55b3d4cf79f97bf6c91d366ce7274e411d612510
|
refs/heads/raph
| 2021-08-10T20:54:15.903930
| 2021-04-14T14:12:54
| 2021-04-14T14:12:54
| 184,590,896
| 1
| 1
| null | 2021-04-14T14:12:55
| 2019-05-02T13:59:12
|
C++
|
UTF-8
|
R
| false
| true
| 811
|
rd
|
read_arch_network.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_arch_network.R
\name{read_arch_network}
\alias{read_arch_network}
\title{Read gene network architecture}
\usage{
read_arch_network(folder, filename = "architecture.txt", as_list = FALSE)
}
\arguments{
\item{folder}{Path to the simulation}
\item{filename}{Optional architecture file name}
\item{as_list}{Whether to return the output at a list of two tibbles
instead of a \code{tbl_graph}.}
}
\value{
A \code{tbl_graph} object (useful for plotting using the \code{ggraph}
package), or a list of data frames if applicable
}
\description{
Reads a genetic architecture and stores it into a \code{tbl_graph} object
}
\examples{
\dontrun{
# Location of the simulation folder
root <- "data/example_1"
read_arch_network(root)
}
}
|
f0bf0303adb80b4edc64c88552d0b237eae55d2c
|
3dfb28e48a4d4e9495e3df3b5e7a8c541c2f6494
|
/house-prices/data-prep.R
|
fd8f1ee6e153021c7da839e5eb3cbc2b0c4134ef
|
[] |
no_license
|
amey-joshi/statistics
|
c4ecba0cb124d431b5b1db74278722df9c7c7692
|
830d51a3f67ef0229e0e44ae0070019f8a60e150
|
refs/heads/master
| 2021-07-22T08:38:03.518668
| 2020-10-07T12:56:24
| 2020-10-07T12:56:24
| 222,046,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,606
|
r
|
data-prep.R
|
DROP_COLS <- FALSE # Should columns that are transformed be dropped?
ONLY_COMPLETE_CASES <- FALSE
# The list of variables was generated using the shell command
# cat variables.txt | grep Keep | cut -d":" -f1 | sed 's/\([[:alnum:]][[:alnum:]]*\)/"\1",/g'
var.list <- c(
"Id",
"MSSubClass",
"MSZoning",
"LotShape",
"LandContour",
"LotConfig",
"Neighborhood",
"Condition1",
"BldgType",
"HouseStyle",
"OverallQual",
"OverallCond",
"RoofStyle",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"TotalBsmtSF",
"HeatingQC",
"CentralAir",
"Electrical",
"GrLivArea",
"BsmtFullBath",
"FullBath",
"KitchenQual",
"TotRmsAbvGrd",
"Functional",
"FireplaceQu",
"GarageFinish",
"GarageCond",
"PavedDrive",
"YearBuilt",
"YrSold",
"SaleType",
"SaleCondition",
"SalePrice"
)
# Generated using
# cat level1.vars.txt | grep drop | sed "s/ */ /g" | cut -d" " -f2 | sed 's/\([[:alnum:]][[:alnum:]]*\)/"\1",/g'
drop.list <- c(
"LotShape",
"Condition1",
"BldgType",
"HouseStyle",
"OverallCond",
"RoofStyle",
"ExterQual",
"ExterCond",
"BsmtQual",
"BsmtCond",
"KitchenQual",
"GarageCond",
"GarageFinish",
"FireplaceQu"
)
training.data <- read.csv("train.csv")
raw.data <- training.data[, var.list]
# Compute age and split age into bins
max.age <- max(raw.data$YrSold - raw.data$YearBuilt)
raw.data$Age.Lvl <- cut(raw.data$YrSold - raw.data$YearBuilt,
breaks = seq(from = -10, to = round(max.age, -1), by = 10))
# MSZoning
X <-
aggregate(cbind(avg.price = SalePrice) ~ MSZoning, training.data, mean)
mapper <- data.frame(MSZoning = X[order(X$avg.price), "MSZoning"],
MSZoning.1 = as.factor(c(1, 2, 2, 3, 4)))
raw.data <- merge(raw.data, mapper)
rm(mapper)
drop.list.1 <- c("MSZoning")
# LotConfig
X <-
aggregate(cbind(avg.price = SalePrice) ~ LotConfig, training.data, mean)
mapper <- data.frame(LotConfig = X[order(X$avg.price), "LotConfig"],
LotConfig.1 = as.factor(c(rep(1, 3), 2, 2)))
raw.data <- merge(raw.data, mapper)
rm(mapper)
drop.list.1 <- c(drop.list.1, "LotConfig")
# Neighborhood
X <-
aggregate(cbind(avg.price = SalePrice) ~ Neighborhood, training.data, mean)
mapper <-
data.frame(Neighborhood = X[order(X$avg.price), "Neighborhood"],
Neighborhood.1 = as.factor(c(
rep(1, 3), rep(2, 3), rep(3, 6), rep(4, 5), rep(5, 5), rep(6, 3)
)))
raw.data <- merge(raw.data, mapper)
rm(mapper)
drop.list.1 <- c(drop.list.1, "Neighborhood")
# OverallQual
X <-
aggregate(cbind(avg.price = SalePrice) ~ OverallQual, training.data, mean)
mapper <-
data.frame(OverallQual = X[order(X$avg.price), "OverallQual"],
OverallQual.1 = as.factor(c(2, 2, 3, 4, 5, 6, 7, 8 , 9 , 10)))
raw.data <- merge(raw.data, mapper)
rm(mapper)
drop.list.1 <- c(drop.list.1, "OverallQual")
# Electrical
X <-
aggregate(cbind(avg.price = SalePrice) ~ Electrical, training.data, mean)
mapper <-
data.frame(Electrical = X[order(X$avg.price), "Electrical"],
Electrical.1 = as.factor(c(1, rep(2, 3), 3)))
raw.data <-
merge(raw.data, mapper, all.x = TRUE) # One case has NA for Electrical
rm(mapper)
drop.list.1 <- c(drop.list.1, "Electrical")
# BsmtFullBath
X <-
aggregate(cbind(avg.price = SalePrice) ~ BsmtFullBath, training.data, mean)
mapper <-
data.frame(BsmtFullBath = X[order(X$avg.price), "BsmtFullBath"],
BsmtFullBath.1 = as.factor(c(rep(0, 2), rep(1, 2))))
raw.data <- merge(raw.data, mapper)
rm(mapper)
drop.list.1 <- c(drop.list.1, "BsmtFullBath")
# Functional
X <-
aggregate(cbind(avg.price = SalePrice) ~ Functional, training.data, mean)
mapper <-
data.frame(Functional = X[order(X$avg.price), "Functional"],
Functional.1 = as.factor(c(1, 1, rep(2, 4), 3)))
raw.data <- merge(raw.data, mapper)
rm(mapper)
drop.list.1 <- c(drop.list.1, "Functional")
# SaleType
X <-
aggregate(cbind(avg.price = SalePrice) ~ SaleType, training.data, mean)
mapper <- data.frame(SaleType = X[order(X$avg.price), "SaleType"],
SaleType.1 = as.factor(c(1, rep(2, 3), 3, rep(4, 2), rep(5, 2))))
raw.data <- merge(raw.data, mapper)
rm(mapper)
drop.list.1 <- c(drop.list.1, "SaleType")
# SaleCondition
X <-
aggregate(cbind(avg.price = SalePrice) ~ SaleCondition,
training.data,
mean)
mapper <-
data.frame(SaleCondition = X[order(X$avg.price), "SaleCondition"],
SaleCondition.1 = as.factor(c(1, 2, 2, 3, 3, 4)))
raw.data <- merge(raw.data, mapper)
rm(mapper)
drop.list.1 <- c(drop.list.1, "SaleCondition")
# Drop the old columns
# Convert a few variables into levels, which they are
raw.data$MSSubClass <- as.factor(raw.data$MSSubClass)
raw.data$FullBath <- as.factor(raw.data$FullBath)
raw.data$TotRmsAbvGrd <- as.factor(raw.data$TotRmsAbvGrd)
# Convert GrLivArea into as factor
low.limit <- round(min(raw.data$GrLivArea), -2)
high.limit <- round(max(raw.data$GrLivArea), -2) + 100
raw.data$GrLivArea.Lvl <- cut(raw.data$GrLivArea,
breaks = seq(low.limit, high.limit, by = 100))
drop.list.2 <- c("YearBuilt", "YearSold")
if (DROP_COLS) {
raw.data <- raw.data[,!colnames(raw.data) %in% drop.list]
raw.data <- raw.data[,!(colnames(raw.data) %in% drop.list.1)]
raw.data <- raw.data[, !(colnames(raw.data) %in% drop.list.2)]
}
if (ONLY_COMPLETE_CASES) {
# We will use only complete cases for our analysis.
saveRDS(raw.data[complete.cases(raw.data), ], "model_data.Rds")
} else {
saveRDS(raw.data, "model_data.Rds")
}
rm(list = ls())
|
c59c1fcfa1c5e3ec4d2205bab0d2d912fbddbffd
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610383012-test.R
|
19972961d07de85a567820cf2091d29776c31b5d
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
1610383012-test.R
|
testlist <- list(rates = 0, thresholds = numeric(0), x = c(2.33598865781438e-310, 4.39013136496848e-259, NaN, -2.41844193503962e+24, 2.00134474032677e-256, NaN, 3.05162830045447e-05, NaN, -1.43713943880526e+241, 5.54095540936847e-310, 4.77023936047618e-299, 7.29112201952987e-304, 1.59070796007927e-310, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
4586218e5a3a1e1837d3993e65df5ee14f6236a1
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkTextViewScrollToIter.Rd
|
9582412221da77d388ef37ed371b65ebc70abaeb
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 1,614
|
rd
|
gtkTextViewScrollToIter.Rd
|
\alias{gtkTextViewScrollToIter}
\name{gtkTextViewScrollToIter}
\title{gtkTextViewScrollToIter}
\description{Scrolls \code{text.view} so that \code{iter} is on the screen in the position
indicated by \code{xalign} and \code{yalign}. An alignment of 0.0 indicates
left or top, 1.0 indicates right or bottom, 0.5 means center.
If \code{use.align} is \code{FALSE}, the text scrolls the minimal distance to
get the mark onscreen, possibly not scrolling at all. The effective
screen for purposes of this function is reduced by a margin of size
\code{within.margin}.}
\usage{gtkTextViewScrollToIter(object, iter, within.margin, use.align = FALSE,
xalign = 0.5, yalign = 0.5)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkTextView}}}
\item{\verb{iter}}{a \code{\link{GtkTextIter}}}
\item{\verb{within.margin}}{margin as a [0.0,0.5) fraction of screen size}
\item{\verb{use.align}}{whether to use alignment arguments (if \code{FALSE},
just get the mark onscreen)}
\item{\verb{xalign}}{horizontal alignment of mark within visible area}
\item{\verb{yalign}}{vertical alignment of mark within visible area}
}
\details{Note that this function uses the currently-computed height of the
lines in the text buffer. Line heights are computed in an idle
handler; so this function may not have the desired effect if it's
called before the height computations. To avoid oddness, consider
using \code{\link{gtkTextViewScrollToMark}} which saves a point to be
scrolled to after line validation.}
\value{[logical] \code{TRUE} if scrolling occurred}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
38c593c98cec936fc45dfc24c1a22dd54025ae6a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CVTuningCov/examples/CVTuningCov-package.Rd.R
|
9f854f99a1e2cd28da8aa6a041f0533c347cf26d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 547
|
r
|
CVTuningCov-package.Rd.R
|
library(CVTuningCov)
### Name: CVTuningCov-package
### Title: Select Tuning Parameters based on CV in Regularized Estimators
### of Covariance Matrices
### Aliases: CVTuningCov-package CVTuningCov
### Keywords: banding covariance matrix cross-validation Frobenius norm
### operator norm thresholding
### ** Examples
library(MASS);
n <- 50;
p <- 50;
fold <- 3;
k.grid <- seq(0,2*(p-1),by=1);
Sigma <- AR1(p, rho=0.6);
X <- mvrnorm(n,rep(0,p),Sigma);
CV.F.fit <- regular.CV(X,k.grid, method='Tapering',fold=fold,norm='F');
CV.F.fit$CV.k;
|
fae09ebb86d82bc5b6870abb031fe4698af3a7c7
|
f57e3e8ea8228d7fdd7c745b0f2419ee265794e8
|
/Long_data_transition.r
|
c55c1badeef26a44afcad0bc861239835483c397
|
[] |
no_license
|
STEMpersistence/Cam-Long-Data-Transition
|
16466998af1f89a0b90a1ea5ed204d84c16416a5
|
00d36a999576bd36ff2d83da939922b4f1739d23
|
refs/heads/master
| 2021-04-28T15:36:46.130586
| 2018-03-02T02:45:10
| 2018-03-02T02:45:10
| 121,993,035
| 0
| 1
| null | 2018-02-18T21:53:09
| 2018-02-18T21:37:25
|
R
|
UTF-8
|
R
| false
| false
| 756
|
r
|
Long_data_transition.r
|
library(dplyr)
library(tidyr)
data <- read_excel("stem_persistence_update.xlsx") # read in data
data2 <- data %>% separate("graduationsemester", c("gradyear", "gradsem"), sep = 4, remove = FALSE) # seperate columns into year and (semester and campus)
data4 <- data2 %>% separate("gradsem", c("gradsem", "gradcampus"), sep = 1, remove = TRUE) # seperate columns into semester and campus
data3 <- data2 %>% unite("graduationsemester", c("gradyear", "gradsem"), sep = "", remove = TRUE) # reunite year and semester
#This will reassign numeric semester values to corresponding word values
data4$gradsem[which(data4$gradsem==1)] <- "fall"
data4$gradsem[which(data4$gradsem==2)] <- "summer"
data4$gradsem[which(data4$gradsem==3)] <- "spring"
|
24821c4d5a923e301105e497a4576df24f2bd941
|
a5c5b2178d3c32d41f22fccfe6db98b5f6661d0a
|
/man/meowR.Rd
|
c26e7a0d7511d4469b2bcd2513c2a25791601df5
|
[
"MIT"
] |
permissive
|
IndrajeetPatil/kittyR
|
d3dca59b364135fe2b191f2ebe9bc2a5844462b8
|
12e8258e0335e551f40ff80cbc60022ddc369bc1
|
refs/heads/main
| 2023-05-23T16:39:57.944996
| 2022-08-16T19:03:29
| 2022-08-16T19:03:29
| 169,513,858
| 50
| 7
|
NOASSERTION
| 2022-08-16T18:56:22
| 2019-02-07T03:27:31
|
R
|
UTF-8
|
R
| false
| true
| 729
|
rd
|
meowR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meowR.R
\name{meowR}
\alias{meowR}
\title{Play a short kitty meow sound}
\usage{
meowR(sound = 3)
}
\arguments{
\item{sound}{A character string or a number specifying which sound is to be
played by specifying one of the built-in sounds (stored in \code{.wav} files).
There are currently 6 meows included. The default is third one (\code{3}).}
}
\value{
No return value, called for side effect (sound).
}
\description{
\code{meowR} is inspired by \code{beepr::beep}, which plays a short sound to notify
you, for example, when a script has finished. Instead of a beep, this
function produced a meow.
}
\examples{
\donttest{
kittyR::meowR(sound = 4)
}
}
|
8b0073d05b766374c187ec4e1f593095740d008a
|
be788a483220254205bfe9556c6ae0a45d30c6c5
|
/CVPAT_example.R
|
2f886f92606233d2de93d0013d39ef03e591d162
|
[] |
no_license
|
ECONshare/CVPAT
|
7a974a246309310ed995070fe4ce8e0df20b2aee
|
e9fd077f5561a56378eb362367671a360cf3aa28
|
refs/heads/master
| 2021-07-13T11:30:29.829865
| 2021-03-03T09:57:19
| 2021-03-03T09:57:19
| 239,035,147
| 4
| 2
| null | 2020-02-08T16:12:39
| 2020-02-07T22:40:02
|
R
|
UTF-8
|
R
| false
| false
| 3,193
|
r
|
CVPAT_example.R
|
#IMPORTANT: SET YOUR PATH TO THE WORKING DIRECTORY
#WHERE THE "corp_rep.rda" AND "CVPAT.R" FILES ARE LOCATED
setwd("C:/CVPAT")
# Install required packages --------------------------------------------------
#install.packages("devtools")
#library(devtools)
#install_version("matrixpls", version = "1.0.11", repos = "http://cran.us.r-project.org")
#install.packages("cvTools")
# Load required packages --------------------------------------------------
library(matrixpls)
library(cvTools)
# Load data ---------------------------------------------------------------
load(file="corp_rep.rda")
# matrixpls specification for competing models ----------------------------
var_names <- colnames(corp_rep)
# Model specifications, Established Model (EM)
CSOR = c(0,0,0,0,0,0,0,0)
ATTR = c(0,0,0,0,0,0,0,0)
PERF = c(0,0,0,0,0,0,0,0)
QUAL = c(0,0,0,0,0,0,0,0)
LIKE = c(1,1,1,1,0,0,0,0)
COMP = c(1,1,1,1,0,0,0,0)
CUSA = c(0,0,0,0,1,1,0,0)
CUSL = c(0,0,0,0,1,1,1,0)
inner_EM = rbind(CSOR, ATTR, PERF, QUAL, LIKE, COMP, CUSA, CUSL)
colnames(inner_EM) <- rownames(inner_EM)
# Reflective measurement model
reflective<-matrix(0,length(var_names),ncol(inner_EM))
reflective[22:24,5] <-1
reflective[25:27,6] <-1
reflective[28,7] <-1
reflective[29:31,8] <-1
dimnames(reflective) <- list(var_names,colnames(inner_EM))
# Formative measurement model
formative <- (reflective*0)
formative[1:5,1] <-1
formative[6:8,2] <-1
formative[9:13,3] <-1
formative[14:21,4] <-1
formative <- t(formative)
# Model relations summarized in list for matrixpls package, EM
EM <- list(inner = inner_EM,reflective = reflective,formative = formative)
# Model specifications Alternative Model (AM)
CSOR = c(0,0,0,0,0,0,0,0)
ATTR = c(0,0,0,0,0,0,0,0)
PERF = c(0,0,0,0,0,0,0,0)
QUAL = c(0,0,0,0,0,0,0,0)
LIKE = c(1,1,1,1,0,0,0,0)
COMP = c(1,1,1,1,0,0,0,0)
CUSA = c(1,1,1,1,1,1,0,0)
CUSL = c(1,1,1,1,1,1,1,0)
inner_AM = rbind(CSOR, ATTR, PERF, QUAL, LIKE, COMP, CUSA, CUSL)
colnames(inner_AM) <- rownames(inner_AM)
# Model relations summarized in list for matrixpls package, AM
AM <- list(inner = inner_AM,reflective = reflective,formative = formative)
# Model comparison with CVPAT ---------------------------------------------
# Load CVPAT function
source("CVPAT.R") #The "CVPAT.R" file must be located in the working directory.
# Run CVPAT; change settings as need; see README.txt for details
# The alternative model (AM) is given as Model 1 and the established model (EM) as Model 2.
res_CVPAT <- CVPAT(MV = corp_rep,CVFolds = 10,
Model1 = AM,Model2 = EM,
hypothesis = "M1_better_out_of_sample_than_M2",
BootSamp = 2000,boot.Di = T,seed=42)
# Results extraction ------------------------------------------------------
# Average losses for for Model 1 (M1) = AM
res_CVPAT$losses$avg_losses$avg_losses_M1
# Average losses for Model 2 (M2) = EM
res_CVPAT$losses$avg_losses$avg_losses_M2
# Bootstrapped p-values, testing if AM is significantly better out of sample than EM
res_CVPAT$boot.p.values
# non-bootstrapped p-value, testing if AM is significantly better out of sample than EM
res_CVPAT$p.value
# non-bootstrapped confidence interval
res_CVPAT$conf.int
|
181013b9961cfb6a6db886aa128d6ef500f85d4a
|
0a04c131b8d20e7fef14dd4780cadd9aff3fe673
|
/STAT4201 Advanced Data Analysis/Homework8/Q6.R
|
720ee6b1ac94931604ea1df8238d16ecacf819f6
|
[] |
no_license
|
parth0708/Coursework
|
d49fb62cfcce71e754f3b2fb881a7b7c014757ce
|
749ec8703999706cdd6954fe765c9215628e5b00
|
refs/heads/master
| 2021-01-19T04:27:56.185484
| 2016-06-12T19:51:59
| 2016-06-12T19:51:59
| 53,802,301
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 150
|
r
|
Q6.R
|
library(MASS)
library(leaps)
X <- c(-2,-1,1,2)
Y <- c(0,0,1,1)
dat <- data.frame(X,Y)
model <- glm(Y~X-1,data = dat, family = binomial)
summary(model)
|
3b8982071cbb7002e9e8df938bb4a872987f1be7
|
aa29f617c5d9dbbf581968b1aed01ee7891b9f3f
|
/r/day5/day5_script1.R
|
21df7ab2448850d82c49918bce72390c23e4e845
|
[] |
no_license
|
lhg96/R
|
f0883d9b1f81d9dc0c2cc36058bd5c93f1636884
|
1fea1629cc477fef840e60b242673ea95e95d3c6
|
refs/heads/master
| 2022-12-04T23:47:40.787959
| 2020-08-28T11:12:55
| 2020-08-28T11:12:55
| 289,908,762
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 2,944
|
r
|
day5_script1.R
|
#결측치 표기 - 대문자 NA
df <- data.frame(gender = c("M", "F", NA, "M", "F"),
score = c(5, 4, 3, 4, NA))
df
is.na(df) # 결측치 확인 table(is.na(df)) # 결측치 빈도 출력
#변수별로 결측치 확인하기
table(is.na(df$gender)) # gender 결측치 빈도 출력
table(is.na(df$score)) # score 결측치 빈도 출력
#결측치 포함된 상태로 분석
mean(df$score) # 평균 산출
sum(df$score) # 합계 산출
#결측치 행 제거하기
library(dplyr) # dplyr 패키지 로드
df %>% filter(is.na(score)) # score가 NA인 데이터만 출력
df %>% filter(!is.na(score)) # score 결측치 제거
#결측치 제외한 데이터로 분석하기
df_perfect <- df %>% filter(!is.na(score)) # score 결측치 제거
df_perfect
# score, gender 결측치 제외
df_perfect <- df %>% filter(!is.na(score) & !is.na(gender))
df_perfect
#결측치가 하나라도 있으면 제거하기
df_perfect1 <- na.omit(df) # 모든 변수에 결측치 없는 데이터 추출
mean(df_perfect1$score, na.rm = T)
mean(df$score, na.rm = T)
#이상치(outlier) 제거
outlier <- data.frame(gender = c(1, 2, 1, 3, 2, 1),
score = c(5, 4, 3, 4, 2, 6))
outlier
#이상치 확인하기 table(outlier$score)
#결측 처리하기 - gennder(3이면 NA 할당)
outlier$gender <- ifelse(outlier$gender == 3, NA, outlier$gender)
outlier
#이상치 확인하기
table(outlier$gender)
table(outlier$score)
#결측 처리하기 - gender
#gender가 3이면 NA 할당
outlier$gender <- ifelse(outlier$gender == 3, NA, outlier$gender)
outlier
#결측 처리하기 - score
# score가 1~5 아니면 NA 할당
outlier$score <- ifelse(outlier$score > 5, NA, outlier$score)
outlier
#결측치 제외하고 분석 outlier %>%
filter(!is.na(gender) & !is.na(score)) %>%
group_by(gender) %>%
summarise(mean_score = mean(score))
#이상치 확인하기
table(outlier$gender)
table(outlier$score)
#결측 처리하기 - gender
#gender가 3이면 NA 할당
outlier$gender <- ifelse(outlier$gender == 3, NA, outlier$gender)
outlier
#결측 처리하기 - score
# score가 1~5 아니면 NA 할당
outlier$score <- ifelse(outlier$score > 5, NA, outlier$score)
outlier
#결측치 제외하고 분석 outlier %>%
filter(!is.na(gender) & !is.na(score)) %>%
group_by(gender) %>%
summarise(mean_score = mean(score))
#이상치 제거하기 #상자그림 생성
mpg <- as.data.frame(ggplot2::mpg)
boxplot(mpg$hwy)
#상자그림 통계치 출력 boxplot(mpg$hwy)
boxplot(mpg$hwy)$stats # 상자그림 통계치 출력 boxplot(mpg$hwy)$stats[5,]
#결측 처리하기
# 12~37 벗어나면 NA 할당
mpg$hwy <- ifelse(mpg$hwy < 12 | mpg$hwy > 37, NA, mpg$hwy)
table(is.na(mpg$hwy))
#결측치 제외하고 분석하기 mpg %>%
group_by(drv) %>%
summarise(mean_hwy = mean(hwy, na.rm = T))
|
e93e0e9eced5745e8c18353c0d6e0fe702b6da5e
|
321d36ce4f9b3c1371003ed7620c2aa873032755
|
/R/dwmHelper.R
|
4bb5a98b7d1871cd7307eb3fd838cb174f1a27cd
|
[] |
no_license
|
yangdi1031/rFIA
|
b318f81ec9c79942ddde994239297c23dda6138d
|
aaa0ff429696a1c94f81525d142d269f6f7676a5
|
refs/heads/master
| 2023-03-09T19:14:01.312116
| 2021-03-03T18:50:16
| 2021-03-03T18:50:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58,082
|
r
|
dwmHelper.R
|
dwmHelper1 <- function(x, plts, db, grpBy, byPlot){
## Selecting the plots for one county
db$PLOT <- plts[[x]]
### Only joining tables necessary to produce plot level estimates, adjusted for non-response
data <- db$PLOT %>%
left_join(db$COND, by = c('PLT_CN')) %>%
left_join(db$COND_DWM_CALC, by = c('PLT_CN', 'CONDID'))
## Comprehensive indicator function
data$aDI <- data$landD * data$aD_p * data$aD_c * data$sp
if (byPlot){
grpBy <- c('YEAR', grpBy, 'PLOT_STATUS_CD')
grpSyms <- syms(grpBy)
t <- data %>%
mutate(YEAR = INVYR) %>%
distinct(PLT_CN, CONDID, .keep_all = TRUE) %>%
lazy_dt() %>%
group_by(!!!grpSyms, PLT_CN) %>%
summarize(VOL_1HR = sum(FWD_SM_VOLCF_ADJ * aDI, na.rm = TRUE),
VOL_10HR = sum(FWD_MD_VOLCF_ADJ * aDI, na.rm = TRUE),
VOL_100HR = sum(FWD_LG_VOLCF_ADJ * aDI, na.rm = TRUE),
VOL_1000HR = sum(CWD_VOLCF_ADJ * aDI, na.rm = TRUE),
VOL_PILE = sum(PILE_VOLCF_ADJ * aDI, na.rm = TRUE),
BIO_DUFF = sum(DUFF_BIOMASS* aDI / 2000, na.rm = TRUE),
BIO_LITTER = sum(LITTER_BIOMASS * aDI / 2000, na.rm = TRUE),
BIO_1HR = sum(FWD_SM_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
BIO_10HR = sum(FWD_MD_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
BIO_100HR = sum(FWD_LG_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
BIO_1000HR = sum(CWD_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
BIO_PILE = sum(PILE_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
CARB_DUFF = sum(DUFF_CARBON* aDI / 2000, na.rm = TRUE),
CARB_LITTER = sum(LITTER_CARBON * aDI / 2000, na.rm = TRUE),
CARB_1HR = sum(FWD_SM_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
CARB_10HR = sum(FWD_MD_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
CARB_100HR = sum(FWD_LG_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
CARB_1000HR = sum(CWD_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
CARB_PILE = sum(PILE_CARBON_ADJ * aDI / 2000, na.rm = TRUE)) %>%
mutate(BIO = BIO_LITTER + BIO_DUFF + BIO_1HR + BIO_10HR + BIO_100HR + BIO_1000HR + BIO_PILE,
VOL = VOL_1HR + VOL_10HR+VOL_100HR+VOL_1000HR+VOL_PILE,
CARB = CARB_LITTER + CARB_DUFF + CARB_1HR + CARB_10HR + CARB_100HR + CARB_1000HR + CARB_PILE) %>%
as.data.frame()
} else {
grpSyms <- syms(grpBy)
# Compute estimates
t <- data %>%
distinct(STRATUM_CN, PLT_CN, CONDID, COND_STATUS_CD, .keep_all = TRUE) %>%
lazy_dt() %>%
group_by(STRATUM_CN, PROP_BASIS, PLT_CN, !!!grpSyms) %>%
summarize(vsmPlot = sum(FWD_SM_VOLCF_ADJ * aDI, na.rm = TRUE),
vmdPlot = sum(FWD_MD_VOLCF_ADJ * aDI, na.rm = TRUE),
vlgPlot = sum(FWD_LG_VOLCF_ADJ * aDI, na.rm = TRUE),
vcPlot = sum(CWD_VOLCF_ADJ * aDI, na.rm = TRUE),
vpPlot = sum(PILE_VOLCF_ADJ * aDI, na.rm = TRUE),
bdPlot = sum(DUFF_BIOMASS* aDI / 2000, na.rm = TRUE),
blPlot = sum(LITTER_BIOMASS * aDI / 2000, na.rm = TRUE),
bsmPlot = sum(FWD_SM_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
bmdPlot = sum(FWD_MD_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
blgPlot = sum(FWD_LG_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
bcPlot = sum(CWD_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
bpPlot = sum(PILE_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
cdPlot = sum(DUFF_CARBON* aDI / 2000, na.rm = TRUE),
clPlot = sum(LITTER_CARBON * aDI / 2000, na.rm = TRUE),
csmPlot = sum(FWD_SM_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
cmdPlot = sum(FWD_MD_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
clgPlot = sum(FWD_LG_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
ccPlot = sum(CWD_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
cpPlot = sum(PILE_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
fa = sum(CONDPROP_UNADJ * aDI, na.rm = TRUE),
plotIn = ifelse(sum(aDI > 0, na.rm = TRUE), 1,0)) %>%
mutate(vPlot = vsmPlot + vmdPlot+ vlgPlot+ vcPlot+ vpPlot,
bPlot = bdPlot+ blPlot+ bsmPlot+ bmdPlot+ blgPlot+ bcPlot+ bpPlot,
cPlot = cdPlot+ clPlot+ csmPlot+ cmdPlot + clgPlot + ccPlot+ cpPlot) %>%
as.data.frame()
}
pltOut <- list(t = t)
return(pltOut)
}
dwmHelper2 <- function(x, popState, t, grpBy, method){
## DOES NOT MODIFY OUTSIDE ENVIRONMENT
if (str_to_upper(method) %in% c("SMA", 'EMA', 'LMA', 'ANNUAL')) {
grpBy <- c(grpBy, 'INVYR')
popState[[x]]$P2POINTCNT <- popState[[x]]$P2POINTCNT_INVYR
popState[[x]]$P1POINTCNT <- popState[[x]]$P1POINTCNT_INVYR
popState[[x]]$p2eu <- popState[[x]]$p2eu_INVYR
}
grpSyms = syms(grpBy)
## Strata level estimates
tEst <- t %>%
lazy_dt() %>%
## Rejoin with population tables
inner_join(select(popState[[x]], -c(STATECD)), by = c('STRATUM_CN', 'PLT_CN')) %>%
mutate(
## AREA
aAdj = dplyr::case_when(
## When NA, stay NA
is.na(PROP_BASIS) ~ NA_real_,
## If the proportion was measured for a macroplot,
## use the macroplot value
PROP_BASIS == 'MACR' ~ as.numeric(ADJ_FACTOR_MACR),
## Otherwise, use the subpplot value
PROP_BASIS == 'SUBP' ~ ADJ_FACTOR_SUBP),
fa = fa * aAdj) %>%
group_by(ESTN_UNIT_CN, ESTN_METHOD, STRATUM_CN, !!!grpSyms) %>%
summarize(nh = dplyr::first(P2POINTCNT),
a = dplyr::first(AREA_USED),
w = dplyr::first(P1POINTCNT) / dplyr::first(P1PNTCNT_EU),
p2eu = dplyr::first(p2eu),
## dtplyr is fast, but requires a few extra steps, so we'll finish
## means and variances in subseqent mutate step
aStrat = sum(fa , na.rm = TRUE),
vsmStrat = sum(vsmPlot , na.rm = TRUE),
vmdStrat = sum(vmdPlot , na.rm = TRUE),
vlgStrat = sum(vlgPlot , na.rm = TRUE),
vcStrat = sum(vcPlot , na.rm = TRUE),
vpStrat = sum(vpPlot , na.rm = TRUE),
vStrat = sum(vPlot , na.rm = TRUE),
bdStrat = sum(bdPlot , na.rm = TRUE),
blStrat = sum(blPlot , na.rm = TRUE),
bsmStrat = sum(bsmPlot , na.rm = TRUE),
bmdStrat = sum(bmdPlot , na.rm = TRUE),
blgStrat = sum(blgPlot , na.rm = TRUE),
bcStrat = sum(bcPlot , na.rm = TRUE),
bpStrat = sum(bpPlot , na.rm = TRUE),
bStrat = sum(bPlot , na.rm = TRUE),
cdStrat = sum(cdPlot , na.rm = TRUE),
clStrat = sum(clPlot , na.rm = TRUE),
csmStrat = sum(csmPlot , na.rm = TRUE),
cmdStrat = sum(cmdPlot , na.rm = TRUE),
clgStrat = sum(clgPlot , na.rm = TRUE),
ccStrat = sum(ccPlot , na.rm = TRUE),
cpStrat = sum(cpPlot , na.rm = TRUE),
cStrat = sum(cPlot , na.rm = TRUE),
plotIn = sum(plotIn, na.rm = TRUE),
# ## Strata level variances
av = sum(fa^2, na.rm = TRUE),
vsmVar = sum(vsmPlot^2, na.rm = TRUE),
vmdVar = sum(vmdPlot^2, na.rm = TRUE),
vlgVar = sum(vlgPlot^2, na.rm = TRUE),
vcVar = sum(vcPlot^2, na.rm = TRUE),
vpVar = sum(vpPlot^2, na.rm = TRUE),
vVar = sum(vPlot^2, na.rm = TRUE),
bdVar = sum(bdPlot^2, na.rm = TRUE),
blVar = sum(blPlot^2, na.rm = TRUE),
bsmVar = sum(bsmPlot^2, na.rm = TRUE),
bmdVar = sum(bmdPlot^2, na.rm = TRUE),
blgVar = sum(blgPlot^2, na.rm = TRUE),
bcVar = sum(bcPlot^2, na.rm = TRUE),
bpVar = sum(bpPlot^2, na.rm = TRUE),
bVar = sum(bPlot^2, na.rm = TRUE),
cdVar = sum(cdPlot^2, na.rm = TRUE),
clVar = sum(clPlot^2, na.rm = TRUE),
csmVar = sum(csmPlot^2, na.rm = TRUE),
cmdVar = sum(cmdPlot^2, na.rm = TRUE),
clgVar = sum(clgPlot^2, na.rm = TRUE),
ccVar = sum(ccPlot^2, na.rm = TRUE),
cpVar = sum(cpPlot^2, na.rm = TRUE),
cVar = sum(cPlot^2, na.rm = TRUE),
# ## Strata level co-variances
cvStrat_vsm = sum(vsmPlot*fa, na.rm = TRUE),
cvStrat_vmd = sum(vmdPlot*fa, na.rm = TRUE),
cvStrat_vlg = sum(vlgPlot*fa, na.rm = TRUE),
cvStrat_vc = sum(vcPlot*fa, na.rm = TRUE),
cvStrat_vp = sum(vpPlot*fa, na.rm = TRUE),
cvStrat_v = sum(vPlot*fa, na.rm = TRUE),
cvStrat_bd = sum(bdPlot*fa, na.rm = TRUE),
cvStrat_bl = sum(blPlot*fa, na.rm = TRUE),
cvStrat_bsm = sum(bsmPlot*fa, na.rm = TRUE),
cvStrat_bmd = sum(bmdPlot*fa, na.rm = TRUE),
cvStrat_blg = sum(blgPlot*fa, na.rm = TRUE),
cvStrat_bc = sum(bcPlot*fa, na.rm = TRUE),
cvStrat_bp = sum(bpPlot*fa, na.rm = TRUE),
cvStrat_b = sum(bPlot*fa, na.rm = TRUE),
cvStrat_cd = sum(cdPlot*fa, na.rm = TRUE),
cvStrat_cl = sum(clPlot*fa, na.rm = TRUE),
cvStrat_csm = sum(csmPlot*fa, na.rm = TRUE),
cvStrat_cmd = sum(cmdPlot*fa, na.rm = TRUE),
cvStrat_clg = sum(clgPlot*fa, na.rm = TRUE),
cvStrat_cc = sum(ccPlot*fa, na.rm = TRUE),
cvStrat_cp = sum(cpPlot*fa, na.rm = TRUE),
cvStrat_c = sum(cPlot*fa, na.rm = TRUE)) %>%
mutate(aStrat = aStrat / nh,
vsmStrat = vsmStrat / nh,
vmdStrat = vmdStrat / nh,
vlgStrat = vlgStrat / nh,
vcStrat = vcStrat / nh,
vpStrat = vpStrat / nh,
vStrat = vStrat / nh,
bdStrat = bdStrat / nh,
blStrat = blStrat / nh,
bsmStrat = bsmStrat / nh,
bmdStrat = bmdStrat / nh,
blgStrat = blgStrat / nh,
bcStrat = bcStrat / nh,
bpStrat = bpStrat / nh,
bStrat = bStrat / nh,
cdStrat = cdStrat / nh,
clStrat = clStrat / nh,
csmStrat = csmStrat / nh,
cmdStrat = cmdStrat / nh,
clgStrat = clgStrat / nh,
ccStrat = ccStrat / nh,
cpStrat = cpStrat / nh,
cStrat = cStrat / nh,
adj = nh * (nh-1),
# ## Strata level variances
av = (av - (nh*aStrat^2)) / adj,
vsmVar = (vsmVar - (nh*vsmStrat^2)) / adj,
vmdVar =(vmdVar - (nh*vmdStrat^2)) / adj,
vlgVar = (vlgVar - (nh*vlgStrat^2)) / adj,
vcVar = (vcVar - (nh*vcStrat^2)) / adj,
vpVar = (vpVar - (nh*vpStrat^2)) / adj,
vVar = (vVar - (nh*vStrat^2)) / adj,
bdVar = (bdVar - (nh*bdStrat^2)) / adj,
blVar = (blVar - (nh*blStrat^2)) / adj,
bsmVar = (bsmVar - (nh*bsmStrat^2)) / adj,
bmdVar = (bmdVar - (nh*bmdStrat^2)) / adj,
blgVar = (blgVar - (nh*blgStrat^2)) / adj,
bcVar = (bcVar - (nh*bcStrat^2)) / adj,
bpVar = (bpVar - (nh*bpStrat^2)) / adj,
bVar = (bVar - (nh*bStrat^2)) / adj,
cdVar = (cdVar - (nh*cdStrat^2)) / adj,
clVar = (clVar - (nh*clStrat^2)) / adj,
csmVar = (csmVar - (nh*csmStrat^2)) / adj,
cmdVar = (cmdVar - (nh*cmdStrat^2)) / adj,
clgVar = (clgVar - (nh*clgStrat^2)) / adj,
ccVar = (ccVar - (nh*ccStrat^2)) / adj,
cpVar = (cpVar - (nh*cpStrat^2)) / adj,
cVar = (cVar - (nh*cStrat^2)) / adj,
# ## Strata level co-variances
cvEst_vsm = (cvStrat_vsm - (nh * vsmStrat * aStrat)) / adj,
cvEst_vmd = (cvStrat_vmd - (nh * vmdStrat * aStrat)) / adj,
cvEst_vlg = (cvStrat_vlg - (nh * vlgStrat * aStrat)) / adj,
cvEst_vc = (cvStrat_vc - (nh * vcStrat * aStrat)) / adj,
cvEst_vp = (cvStrat_vp - (nh * vpStrat * aStrat)) / adj,
cvEst_v = (cvStrat_v - (nh * vStrat * aStrat)) / adj,
cvEst_bd = (cvStrat_bd - (nh * bdStrat * aStrat)) / adj,
cvEst_bl = (cvStrat_bl - (nh * blStrat * aStrat)) / adj,
cvEst_bsm = (cvStrat_bsm - (nh * bsmStrat * aStrat)) / adj,
cvEst_bmd = (cvStrat_bmd - (nh * bmdStrat * aStrat)) / adj,
cvEst_blg = (cvStrat_blg - (nh * blgStrat * aStrat)) / adj,
cvEst_bc = (cvStrat_bc - (nh * bcStrat * aStrat)) / adj,
cvEst_bp = (cvStrat_bp - (nh * bpStrat * aStrat)) / adj,
cvEst_b = (cvStrat_b - (nh * bStrat * aStrat)) / adj,
cvEst_cd = (cvStrat_cd - (nh * cdStrat * aStrat)) / adj,
cvEst_cl = (cvStrat_cl - (nh * clStrat * aStrat)) / adj,
cvEst_csm = (cvStrat_csm - (nh * csmStrat * aStrat)) / adj,
cvEst_cmd = (cvStrat_cmd - (nh * cmdStrat * aStrat)) / adj,
cvEst_clg = (cvStrat_clg - (nh * clgStrat * aStrat)) / adj,
cvEst_cc = (cvStrat_cc - (nh * ccStrat * aStrat)) / adj,
cvEst_cp = (cvStrat_cp - (nh * cpStrat * aStrat)) / adj,
cvEst_c = (cvStrat_c - (nh * cStrat * aStrat)) / adj) %>%
as.data.frame() %>%
## Estimation unit
group_by(ESTN_UNIT_CN, .dots = grpBy) %>%
summarize(aEst = unitMean(ESTN_METHOD, a, nh, w, aStrat),
vsmEst = unitMean(ESTN_METHOD, a, nh, w, vsmStrat),
vmdEst = unitMean(ESTN_METHOD, a, nh, w, vmdStrat),
vlgEst = unitMean(ESTN_METHOD, a, nh, w, vlgStrat),
vcEst = unitMean(ESTN_METHOD, a, nh, w, vcStrat),
vpEst = unitMean(ESTN_METHOD, a, nh, w, vpStrat),
vEst = unitMean(ESTN_METHOD, a, nh, w, vStrat),
bdEst = unitMean(ESTN_METHOD, a, nh, w, bdStrat),
blEst = unitMean(ESTN_METHOD, a, nh, w, blStrat),
bsmEst = unitMean(ESTN_METHOD, a, nh, w, bsmStrat),
bmdEst = unitMean(ESTN_METHOD, a, nh, w, bmdStrat),
blgEst = unitMean(ESTN_METHOD, a, nh, w, blgStrat),
bcEst = unitMean(ESTN_METHOD, a, nh, w, bcStrat),
bpEst = unitMean(ESTN_METHOD, a, nh, w, bpStrat),
bEst = unitMean(ESTN_METHOD, a, nh, w, bStrat),
cdEst = unitMean(ESTN_METHOD, a, nh, w, cdStrat),
clEst = unitMean(ESTN_METHOD, a, nh, w, clStrat),
csmEst = unitMean(ESTN_METHOD, a, nh, w, csmStrat),
cmdEst = unitMean(ESTN_METHOD, a, nh, w, cmdStrat),
clgEst = unitMean(ESTN_METHOD, a, nh, w, clgStrat),
ccEst = unitMean(ESTN_METHOD, a, nh, w, ccStrat),
cpEst = unitMean(ESTN_METHOD, a, nh, w, cpStrat),
cEst = unitMean(ESTN_METHOD, a, nh, w, cStrat),
plotIn = sum(plotIn, na.rm = TRUE),
N = dplyr::first(p2eu),
# Estimation of unit variance
aVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, av, aStrat, aEst),
vsmVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, vsmVar, vsmStrat, vsmEst),
vmdVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, vmdVar, vmdStrat, vmdEst),
vlgVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, vlgVar, vlgStrat, vlgEst),
vcVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, vcVar, vcStrat, vcEst),
vpVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, vpVar, vpStrat, vpEst),
vVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, vVar, vStrat, vEst),
bdVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, bdVar, bdStrat, bdEst),
blVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, blVar, blStrat, blEst),
bsmVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, bsmVar, bsmStrat, bsmEst),
bmdVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, bmdVar, bmdStrat, bmdEst),
blgVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, blgVar, blgStrat, blgEst),
bcVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, bcVar, bcStrat, bcEst),
bpVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, bpVar, bpStrat, bpEst),
bVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, bVar, bStrat, cbEst),
cdVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cdVar, cdStrat, cdEst),
clVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, clVar, clStrat, clEst),
csmVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, csmVar, csmStrat, csmEst),
cmdVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cmdVar, cmdStrat, cmdEst),
clgVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, clgVar, clgStrat, clgEst),
ccVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, ccVar, ccStrat, ccEst),
cpVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cpVar, cpStrat, cpEst),
cVar = unitVarNew(method = 'var', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cVar, cStrat, cEst),
# Estimation of unit covariance
cvEst_vsm = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_vsm, vsmStrat, vsmEst, aStrat, aEst),
cvEst_vmd = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_vmd, vmdStrat, vmdEst, aStrat, aEst),
cvEst_vlg = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_vlg, vlgStrat, vlgEst, aStrat, aEst),
cvEst_vc = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_vc, vcStrat, vcEst, aStrat, aEst),
cvEst_vp = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_vp, vpStrat, vpEst, aStrat, aEst),
cvEst_v = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_v, vStrat, vEst, aStrat, aEst),
cvEst_bd = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_bd, bdStrat, bdEst, aStrat, aEst),
cvEst_bl = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_bl, blStrat, blEst, aStrat, aEst),
cvEst_bsm = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_bsm, bsmStrat, bsmEst, aStrat, aEst),
cvEst_bmd = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_bmd, bmdStrat, bmdEst, aStrat, aEst),
cvEst_blg = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_blg, blgStrat, blgEst, aStrat, aEst),
cvEst_bc = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_bc, bcStrat, bcEst, aStrat, aEst),
cvEst_bp = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_bp, bpStrat, bpEst, aStrat, aEst),
cvEst_b = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_b, bStrat, cbEst, aStrat, aEst),
cvEst_cd = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_cd, cdStrat, cdEst, aStrat, aEst),
cvEst_cl = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_cl, clStrat, clEst, aStrat, aEst),
cvEst_csm = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_csm, csmStrat, csmEst, aStrat, aEst),
cvEst_cmd = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_cmd, cmdStrat, cmdEst, aStrat, aEst),
cvEst_clg = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_clg, clgStrat, clgEst, aStrat, aEst),
cvEst_cc = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_cc, ccStrat, ccEst, aStrat, aEst),
cvEst_cp = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_cp, cpStrat, cpEst, aStrat, aEst),
cvEst_c = unitVarNew(method = 'cov', ESTN_METHOD, a, nh, dplyr::first(p2eu), w, cvEst_c, cStrat, cEst, aStrat, aEst))
out <- list(tEst = tEst)
return(out)
}
dwmHelper <- function(x, combos, data, grpBy, totals, SE){
# Update domain indicator for each each column speficed in grpBy
ad = 1
for (n in 1:ncol(combos[[x]])){
# Area domain indicator for each column in
aObs <- as.character(combos[[x]][[grpBy[n]]]) == as.character(data[[grpBy[n]]])
if (length(which(is.na(aObs))) == length(aObs)) aObs <- 1
ad <- data$aDI * aObs * ad
}
if(SE){
data$aDI <- ad
data$aDI[is.na(data$aDI)] <- 0
cwd <- data %>%
distinct(PLT_CN, CONDID, EVALID, COND_STATUS_CD, .keep_all = TRUE) %>%
group_by(ESTN_UNIT_CN, ESTN_METHOD, STRATUM_CN, PLT_CN) %>%
summarize(vsmPlot = sum(FWD_SM_VOLCF_ADJ * aDI, na.rm = TRUE),
vmdPlot = sum(FWD_MD_VOLCF_ADJ * aDI, na.rm = TRUE),
vlgPlot = sum(FWD_LG_VOLCF_ADJ * aDI, na.rm = TRUE),
vcPlot = sum(CWD_VOLCF_ADJ * aDI, na.rm = TRUE),
vpPlot = sum(PILE_VOLCF_ADJ * aDI, na.rm = TRUE),
vPlot = sum(vsmPlot, vmdPlot, vlgPlot, vcPlot, vpPlot, na.rm = TRUE),
bdPlot = sum(DUFF_BIOMASS* aDI / 2000, na.rm = TRUE),
blPlot = sum(LITTER_BIOMASS * aDI / 2000, na.rm = TRUE),
bsmPlot = sum(FWD_SM_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
bmdPlot = sum(FWD_MD_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
blgPlot = sum(FWD_LG_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
bcPlot = sum(CWD_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
bpPlot = sum(PILE_DRYBIO_ADJ * aDI / 2000, na.rm = TRUE),
bPlot = sum(bdPlot, blPlot, bsmPlot, bmdPlot, blgPlot, bcPlot, bpPlot, na.rm = TRUE),
cdPlot = sum(DUFF_CARBON* aDI / 2000, na.rm = TRUE),
clPlot = sum(LITTER_CARBON * aDI / 2000, na.rm = TRUE),
csmPlot = sum(FWD_SM_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
cmdPlot = sum(FWD_MD_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
clgPlot = sum(FWD_LG_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
ccPlot = sum(CWD_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
cpPlot = sum(PILE_CARBON_ADJ * aDI / 2000, na.rm = TRUE),
cPlot = sum(cdPlot, clPlot, csmPlot, cmdPlot, clgPlot, ccPlot, cpPlot, na.rm = TRUE),
fa = sum(CONDPROP_UNADJ * aDI * aAdj, na.rm = TRUE),
plotIn = ifelse(sum(aDI > 0, na.rm = TRUE), 1,0),
a = dplyr::first(AREA_USED),
p1EU = dplyr::first(P1PNTCNT_EU),
p1 = dplyr::first(P1POINTCNT),
p2 = dplyr::first(P2POINTCNT)) %>%
# Stratum level
group_by(ESTN_UNIT_CN, ESTN_METHOD, STRATUM_CN) %>%
summarize(aStrat = mean(fa, na.rm = TRUE),
vsmStrat = mean(vsmPlot, na.rm = TRUE),
vmdStrat = mean(vmdPlot, na.rm = TRUE),
vlgStrat = mean(vlgPlot, na.rm = TRUE),
vcStrat = mean(vcPlot, na.rm = TRUE),
vpStrat = mean(vpPlot, na.rm = TRUE),
vStrat = mean(vPlot, na.rm = TRUE),
bdStrat = mean(bdPlot, na.rm = TRUE),
blStrat = mean(blPlot, na.rm = TRUE),
bsmStrat = mean(bsmPlot, na.rm = TRUE),
bmdStrat = mean(bmdPlot, na.rm = TRUE),
blgStrat = mean(blgPlot, na.rm = TRUE),
bcStrat = mean(bcPlot, na.rm = TRUE),
bpStrat = mean(bpPlot, na.rm = TRUE),
bStrat = mean(bPlot, na.rm = TRUE),
cdStrat = mean(cdPlot, na.rm = TRUE),
clStrat = mean(clPlot, na.rm = TRUE),
csmStrat = mean(csmPlot, na.rm = TRUE),
cmdStrat = mean(cmdPlot, na.rm = TRUE),
clgStrat = mean(clgPlot, na.rm = TRUE),
ccStrat = mean(ccPlot, na.rm = TRUE),
cpStrat = mean(cpPlot, na.rm = TRUE),
cStrat = mean(cPlot, na.rm = TRUE),
plotIn = sum(plotIn, na.rm = TRUE),
a = dplyr::first(a),
w = dplyr::first(p1) / dplyr::first(p1EU), # Stratum weight
nh = dplyr::first(p2), # Number plots in stratum
# Strata level variances
av = ifelse(first(ESTN_METHOD == 'simple'),
var(fa * dplyr::first(a) / nh),
(sum(fa^2) - sum(nh * aStrat^2)) / (nh * (nh-1))),
vsmV = ifelse(first(ESTN_METHOD == 'simple'),
var(vsmPlot * dplyr::first(a) / nh),
(sum(vsmPlot^2) - sum(nh * vsmStrat^2)) / (nh * (nh-1))), # Stratified and double cases
vmdV = ifelse(first(ESTN_METHOD == 'simple'),
var(vmdPlot * dplyr::first(a) / nh),
(sum(vmdPlot^2) - sum(nh * vmdStrat^2)) / (nh * (nh-1))), # Stratified and double cases
vlgV = ifelse(first(ESTN_METHOD == 'simple'),
var(vlgPlot * dplyr::first(a) / nh),
(sum(vlgPlot^2) - sum(nh * vlgStrat^2)) / (nh * (nh-1))), # Stratified and double cases
vcV = ifelse(first(ESTN_METHOD == 'simple'),
var(vcPlot * dplyr::first(a) / nh),
(sum(vcPlot^2) - sum(nh * vcStrat^2)) / (nh * (nh-1))), # Stratified and double cases
vpV = ifelse(first(ESTN_METHOD == 'simple'),
var(vpPlot * dplyr::first(a) / nh),
(sum(vpPlot^2) - sum(nh * vpStrat^2)) / (nh * (nh-1))), # Stratified and double cases
vV = ifelse(first(ESTN_METHOD == 'simple'),
var(vPlot * dplyr::first(a) / nh),
(sum(vPlot^2) - sum(nh * vStrat^2)) / (nh * (nh-1))), # Stratified and double cases
bdV = ifelse(first(ESTN_METHOD == 'simple'),
var(bdPlot * dplyr::first(a) / nh),
(sum(bdPlot^2) - sum(nh * bdStrat^2)) / (nh * (nh-1))), # Stratified and double cases
blV = ifelse(first(ESTN_METHOD == 'simple'),
var(blPlot * dplyr::first(a) / nh),
(sum(blPlot^2) - sum(nh * blStrat^2)) / (nh * (nh-1))), # Stratified and double cases
bsmV = ifelse(first(ESTN_METHOD == 'simple'),
var(bsmPlot * dplyr::first(a) / nh),
(sum(bsmPlot^2) - sum(nh * bsmStrat^2)) / (nh * (nh-1))), # Stratified and double cases
bmdV = ifelse(first(ESTN_METHOD == 'simple'),
var(bmdPlot * dplyr::first(a) / nh),
(sum(bmdPlot^2) - sum(nh * bmdStrat^2)) / (nh * (nh-1))), # Stratified and double cases
blgV = ifelse(first(ESTN_METHOD == 'simple'),
var(blgPlot * dplyr::first(a) / nh),
(sum(blgPlot^2) - sum(nh * blgStrat^2)) / (nh * (nh-1))), # Stratified and double cases
bcV = ifelse(first(ESTN_METHOD == 'simple'),
var(bcPlot * dplyr::first(a) / nh),
(sum(bcPlot^2) - sum(nh * bcStrat^2)) / (nh * (nh-1))), # Stratified and double cases
bpV = ifelse(first(ESTN_METHOD == 'simple'),
var(bpPlot * dplyr::first(a) / nh),
(sum(bpPlot^2) - sum(nh * bpStrat^2)) / (nh * (nh-1))), # Stratified and double cases
bV = ifelse(first(ESTN_METHOD == 'simple'),
var(bPlot * dplyr::first(a) / nh),
(sum(bPlot^2) - sum(nh * bStrat^2)) / (nh * (nh-1))), # Stratified and double cases
cdV = ifelse(first(ESTN_METHOD == 'simple'),
var(cdPlot * dplyr::first(a) / nh),
(sum(cdPlot^2) - sum(nh * cdStrat^2)) / (nh * (nh-1))), # Stratified and double cases
clV = ifelse(first(ESTN_METHOD == 'simple'),
var(clPlot * dplyr::first(a) / nh),
(sum(clPlot^2) - sum(nh * clStrat^2)) / (nh * (nh-1))), # Stratified and double cases
csmV = ifelse(first(ESTN_METHOD == 'simple'),
var(csmPlot * dplyr::first(a) / nh),
(sum(csmPlot^2) - sum(nh * csmStrat^2)) / (nh * (nh-1))), # Stratified and double cases
cmdV = ifelse(first(ESTN_METHOD == 'simple'),
var(cmdPlot * dplyr::first(a) / nh),
(sum(cmdPlot^2) - sum(nh * cmdStrat^2)) / (nh * (nh-1))), # Stratified and double cases
clgV = ifelse(first(ESTN_METHOD == 'simple'),
var(clgPlot * dplyr::first(a) / nh),
(sum(clgPlot^2) - sum(nh * clgStrat^2)) / (nh * (nh-1))), # Stratified and double cases
ccV = ifelse(first(ESTN_METHOD == 'simple'),
var(ccPlot * dplyr::first(a) / nh),
(sum(ccPlot^2) - sum(nh * ccStrat^2)) / (nh * (nh-1))), # Stratified and double cases
cpV = ifelse(first(ESTN_METHOD == 'simple'),
var(cpPlot * dplyr::first(a) / nh),
(sum(cpPlot^2) - sum(nh * cpStrat^2)) / (nh * (nh-1))), # Stratified and double cases
cV = ifelse(first(ESTN_METHOD == 'simple'),
var(cPlot * dplyr::first(a) / nh),
(sum(cPlot^2) - sum(nh * cStrat^2)) / (nh * (nh-1))), # Stratified and double cases
# Strata level covariances
vsmCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,vsmPlot),
(sum(fa*vsmPlot) - sum(nh * aStrat *vsmStrat)) / (nh * (nh-1))), # Stratified and double cases
vmdCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,vmdPlot),
(sum(fa*vmdPlot) - sum(nh * aStrat *vmdStrat)) / (nh * (nh-1))), # Stratified and double cases
vlgCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,vlgPlot),
(sum(fa*vlgPlot) - sum(nh * aStrat *vlgStrat)) / (nh * (nh-1))), # Stratified and double cases
vcCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,vcPlot),
(sum(fa*vcPlot) - sum(nh * aStrat *vcStrat)) / (nh * (nh-1))), # Stratified and double cases
vpCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,vpPlot),
(sum(fa*vpPlot) - sum(nh * aStrat *vpStrat)) / (nh * (nh-1))), # Stratified and double cases
vCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,vPlot),
(sum(fa*vPlot) - sum(nh * aStrat *vStrat)) / (nh * (nh-1))), # Stratified and double cases
bdCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,bdPlot),
(sum(fa*bdPlot) - sum(nh * aStrat *bdStrat)) / (nh * (nh-1))), # Stratified and double cases
blCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,blPlot),
(sum(fa*blPlot) - sum(nh * aStrat *blStrat)) / (nh * (nh-1))), # Stratified and double cases
bsmCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,bsmPlot),
(sum(fa*bsmPlot) - sum(nh * aStrat *bsmStrat)) / (nh * (nh-1))), # Stratified and double cases
bmdCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,bmdPlot),
(sum(fa*bmdPlot) - sum(nh * aStrat *bmdStrat)) / (nh * (nh-1))), # Stratified and double cases
blgCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,blgPlot),
(sum(fa*blgPlot) - sum(nh * aStrat *blgStrat)) / (nh * (nh-1))), # Stratified and double cases
bcCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,bcPlot),
(sum(fa*bcPlot) - sum(nh * aStrat *bcStrat)) / (nh * (nh-1))), # Stratified and double cases
bpCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,bpPlot),
(sum(fa*bpPlot) - sum(nh * aStrat *bpStrat)) / (nh * (nh-1))), # Stratified and double cases
bCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,bPlot),
(sum(fa*bPlot) - sum(nh * aStrat *bStrat)) / (nh * (nh-1))), # Stratified and double cases
cdCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,cdPlot),
(sum(fa*cdPlot) - sum(nh * aStrat *cdStrat)) / (nh * (nh-1))), # Stratified and double cases
clCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,blPlot),
(sum(fa*clPlot) - sum(nh * aStrat *clStrat)) / (nh * (nh-1))), # Stratified and double cases
csmCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,csmPlot),
(sum(fa*csmPlot) - sum(nh * aStrat *csmStrat)) / (nh * (nh-1))), # Stratified and double cases
cmdCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,cmdPlot),
(sum(fa*cmdPlot) - sum(nh * aStrat *cmdStrat)) / (nh * (nh-1))), # Stratified and double cases
clgCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,clgPlot),
(sum(fa*clgPlot) - sum(nh * aStrat *clgStrat)) / (nh * (nh-1))), # Stratified and double cases
ccCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,ccPlot),
(sum(fa*ccPlot) - sum(nh * aStrat *ccStrat)) / (nh * (nh-1))), # Stratified and double cases
cpCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,cpPlot),
(sum(fa*cpPlot) - sum(nh * aStrat *cpStrat)) / (nh * (nh-1))), # Stratified and double cases
cCV = ifelse(first(ESTN_METHOD == 'simple'),
cov(fa,cPlot),
(sum(fa*cPlot) - sum(nh * aStrat *cStrat)) / (nh * (nh-1)))) %>% # Stratified and double cases
## Estimation unit level
group_by(ESTN_UNIT_CN) %>%
summarize(aEst = unitMean(ESTN_METHOD, a, nh, w, aStrat),
vsmEst = unitMean(ESTN_METHOD, a, nh, w, vsmStrat),
vmdEst = unitMean(ESTN_METHOD, a, nh, w, vmdStrat),
vlgEst = unitMean(ESTN_METHOD, a, nh, w, vlgStrat),
vcEst = unitMean(ESTN_METHOD, a, nh, w, vcStrat),
vpEst = unitMean(ESTN_METHOD, a, nh, w, vpStrat),
vEst = unitMean(ESTN_METHOD, a, nh, w, vStrat),
bdEst = unitMean(ESTN_METHOD, a, nh, w, bdStrat),
blEst = unitMean(ESTN_METHOD, a, nh, w, blStrat),
bsmEst = unitMean(ESTN_METHOD, a, nh, w, bsmStrat),
bmdEst = unitMean(ESTN_METHOD, a, nh, w, bmdStrat),
blgEst = unitMean(ESTN_METHOD, a, nh, w, blgStrat),
bcEst = unitMean(ESTN_METHOD, a, nh, w, bcStrat),
bpEst = unitMean(ESTN_METHOD, a, nh, w, bpStrat),
bEst = unitMean(ESTN_METHOD, a, nh, w, bStrat),
cdEst = unitMean(ESTN_METHOD, a, nh, w, cdStrat),
clEst = unitMean(ESTN_METHOD, a, nh, w, clStrat),
csmEst = unitMean(ESTN_METHOD, a, nh, w, csmStrat),
cmdEst = unitMean(ESTN_METHOD, a, nh, w, cmdStrat),
clgEst = unitMean(ESTN_METHOD, a, nh, w, clgStrat),
ccEst = unitMean(ESTN_METHOD, a, nh, w, ccStrat),
cpEst = unitMean(ESTN_METHOD, a, nh, w, cpStrat),
cEst = unitMean(ESTN_METHOD, a, nh, w, cStrat),
plotIn = sum(plotIn, na.rm = TRUE),
# Estimation of unit variance
aVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, av, aStrat, aEst),
vsmVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, vsmV, vsmStrat, vsmEst),
vmdVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, vmdV, vmdStrat, vmdEst),
vlgVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, vlgV, vlgStrat, vlgEst),
vcVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, vcV, vcStrat, vcEst),
vpVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, vpV, vpStrat, vpEst),
vVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, vV, vStrat, vEst),
bdVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, bdV, bdStrat, bdEst),
blVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, blV, blStrat, blEst),
bsmVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, bsmV, bsmStrat, bsmEst),
bmdVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, bmdV, bmdStrat, bmdEst),
blgVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, blgV, blgStrat, blgEst),
bcVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, bcV, bcStrat, bcEst),
bpVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, bpV, bpStrat, bpEst),
bVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, bV, bStrat, cbEst),
cdVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, cdV, cdStrat, cdEst),
clVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, clV, clStrat, clEst),
csmVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, csmV, csmStrat, csmEst),
cmdVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, cmdV, cmdStrat, cmdEst),
clgVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, clgV, clgStrat, clgEst),
ccVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, ccV, ccStrat, ccEst),
cpVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, cpV, cpStrat, cpEst),
cVar = unitVar(method = 'var', ESTN_METHOD, a, nh, w, cV, cStrat, cEst),
# Estimation of unit covariance
vsmCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, vsmCV, vsmStrat, vsmEst, aStrat, aEst),
vmdCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, vmdCV, vmdStrat, vmdEst, aStrat, aEst),
vlgCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, vlgCV, vlgStrat, vlgEst, aStrat, aEst),
vcCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, vcCV, vcStrat, vcEst, aStrat, aEst),
vpCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, vpCV, vpStrat, vpEst, aStrat, aEst),
vCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, vCV, vStrat, vEst, aStrat, aEst),
bdCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, bdCV, bdStrat, bdEst, aStrat, aEst),
blCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, blCV, blStrat, blEst, aStrat, aEst),
bsmCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, bsmCV, bsmStrat, bsmEst, aStrat, aEst),
bmdCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, bmdCV, bmdStrat, bmdEst, aStrat, aEst),
blgCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, blgCV, blgStrat, blgEst, aStrat, aEst),
bcCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, bcCV, bcStrat, bcEst, aStrat, aEst),
bpCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, bpCV, bpStrat, bpEst, aStrat, aEst),
bCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, bCV, bStrat, cbEst, aStrat, aEst),
cdCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, cdCV, cdStrat, cdEst, aStrat, aEst),
clCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, clCV, clStrat, clEst, aStrat, aEst),
csmCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, csmCV, csmStrat, csmEst, aStrat, aEst),
cmdCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, cmdCV, cmdStrat, cmdEst, aStrat, aEst),
clgCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, clgCV, clgStrat, clgEst, aStrat, aEst),
ccCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, ccCV, ccStrat, ccEst, aStrat, aEst),
cpCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, cpCV, cpStrat, cpEst, aStrat, aEst),
cCV = unitVar(method = 'cov', ESTN_METHOD, a, nh, w, cCV, cStrat, cEst, aStrat, aEst)) %>%
# Compute totals
summarize(AREA_TOTAL = sum(aEst, na.rm = TRUE),
VOL_DUFF = NA,
VOL_LITTER = NA,
VOL_1HR = sum(vsmEst, na.rm = TRUE),
VOL_10HR = sum(vmdEst, na.rm = TRUE),
VOL_100HR = sum(vlgEst, na.rm = TRUE),
VOL_1000HR = sum(vcEst, na.rm = TRUE),
VOL_PILE = sum(vpEst, na.rm = TRUE),
VOL = sum(vEst, na.rm = TRUE),
BIO_DUFF = sum(bdEst, na.rm = TRUE),
BIO_LITTER = sum(blEst, na.rm = TRUE),
BIO_1HR = sum(bsmEst, na.rm = TRUE),
BIO_10HR = sum(bmdEst, na.rm = TRUE),
BIO_100HR = sum(blgEst, na.rm = TRUE),
BIO_1000HR = sum(bcEst, na.rm = TRUE),
BIO_PILE = sum(bpEst, na.rm = TRUE),
BIO = sum(bEst, na.rm = TRUE),
CARB_DUFF = sum(cdEst, na.rm = TRUE),
CARB_LITTER = sum(clEst, na.rm = TRUE),
CARB_1HR = sum(csmEst, na.rm = TRUE),
CARB_10HR = sum(cmdEst, na.rm = TRUE),
CARB_100HR = sum(clgEst, na.rm = TRUE),
CARB_1000HR = sum(ccEst, na.rm = TRUE),
CARB_PILE = sum(cpEst, na.rm = TRUE),
CARB = sum(cEst, na.rm = TRUE),
nPlots = sum(plotIn, na.rm = TRUE),
# Per Acre
VOL_DUFF_ACRE = NA,
VOL_LITTER_ACRE = NA,
VOL_1HR_ACRE = VOL_1HR / AREA_TOTAL,
VOL_10HR_ACRE = VOL_10HR / AREA_TOTAL,
VOL_100HR_ACRE = VOL_100HR / AREA_TOTAL,
VOL_1000HR_ACRE = VOL_1000HR / AREA_TOTAL,
VOL_PILE_ACRE = VOL_PILE / AREA_TOTAL,
VOL_ACRE = VOL / AREA_TOTAL,
BIO_DUFF_ACRE = BIO_DUFF / AREA_TOTAL,
BIO_LITTER_ACRE = BIO_LITTER / AREA_TOTAL,
BIO_1HR_ACRE = BIO_1HR / AREA_TOTAL,
BIO_10HR_ACRE = BIO_10HR / AREA_TOTAL,
BIO_100HR_ACRE = BIO_100HR / AREA_TOTAL,
BIO_1000HR_ACRE = BIO_1000HR / AREA_TOTAL,
BIO_PILE_ACRE = BIO_PILE / AREA_TOTAL,
BIO_ACRE = BIO / AREA_TOTAL,
CARB_DUFF_ACRE = CARB_DUFF / AREA_TOTAL,
CARB_LITTER_ACRE = CARB_LITTER / AREA_TOTAL,
CARB_1HR_ACRE = CARB_1HR / AREA_TOTAL,
CARB_10HR_ACRE = CARB_10HR / AREA_TOTAL,
CARB_100HR_ACRE = CARB_100HR / AREA_TOTAL,
CARB_1000HR_ACRE = CARB_1000HR / AREA_TOTAL,
CARB_PILE_ACRE = CARB_PILE / AREA_TOTAL,
CARB_ACRE = CARB / AREA_TOTAL,
# Sampling Errors totals
AREA_TOTAL_SE = sqrt(sum(aVar, na.rm = TRUE)) / AREA_TOTAL * 100,
VOL_DUFF_SE = NA,
VOL_LITTER_SE = NA,
VOL_1HR_SE = sqrt(sum(vsmVar, na.rm = TRUE)) / VOL_1HR * 100,
VOL_10HR_SE = sqrt(sum(vmdVar, na.rm = TRUE)) / VOL_10HR * 100,
VOL_100HR_SE = sqrt(sum(vlgVar, na.rm = TRUE)) / VOL_100HR * 100,
VOL_1000HR_SE = sqrt(sum(vcVar, na.rm = TRUE)) / VOL_1000HR * 100,
VOL_PILE_SE = sqrt(sum(vpVar, na.rm = TRUE)) / VOL_PILE * 100,
VOL_SE = sqrt(sum(vVar, na.rm = TRUE)) / VOL * 100,
BIO_DUFF_SE = sqrt(sum(bdVar, na.rm = TRUE)) / BIO_DUFF * 100,
BIO_LITTER_SE = sqrt(sum(blVar, na.rm = TRUE)) / BIO_LITTER * 100,
BIO_1HR_SE = sqrt(sum(bsmVar, na.rm = TRUE)) / BIO_1HR * 100,
BIO_10HR_SE = sqrt(sum(bmdVar, na.rm = TRUE)) / BIO_10HR * 100,
BIO_100HR_SE = sqrt(sum(blgVar, na.rm = TRUE)) / BIO_100HR * 100,
BIO_1000HR_SE = sqrt(sum(bcVar, na.rm = TRUE)) / BIO_1000HR * 100,
BIO_PILE_SE = sqrt(sum(bpVar, na.rm = TRUE)) / BIO_PILE * 100,
BIO_SE = sqrt(sum(bVar, na.rm = TRUE)) / BIO * 100,
CARB_DUFF_SE = sqrt(sum(cdVar, na.rm = TRUE)) / CARB_DUFF * 100,
CARB_LITTER_SE = sqrt(sum(clVar, na.rm = TRUE)) / CARB_LITTER * 100,
CARB_1HR_SE = sqrt(sum(csmVar, na.rm = TRUE)) / CARB_1HR * 100,
CARB_10HR_SE = sqrt(sum(cmdVar, na.rm = TRUE)) / CARB_10HR * 100,
CARB_100HR_SE = sqrt(sum(clgVar, na.rm = TRUE)) / CARB_100HR * 100,
CARB_1000HR_SE = sqrt(sum(ccVar, na.rm = TRUE)) / CARB_1000HR * 100,
CARB_PILE_SE = sqrt(sum(cpVar, na.rm = TRUE)) / CARB_PILE * 100,
CARB_SE = sqrt(sum(cVar, na.rm = TRUE)) / CARB * 100,
# Per Acre variances
vsmVar = (1/AREA_TOTAL^2) * (sum(vsmVar, na.rm = TRUE) + (VOL_1HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * VOL_1HR_ACRE * sum(vsmCV, na.rm = TRUE))),
vmdVar = (1/AREA_TOTAL^2) * (sum(vmdVar, na.rm = TRUE) + (VOL_10HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * VOL_10HR_ACRE * sum(vmdCV, na.rm = TRUE))),
vlgVar = (1/AREA_TOTAL^2) * (sum(vlgVar, na.rm = TRUE) + (VOL_100HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * VOL_100HR_ACRE * sum(vlgCV, na.rm = TRUE))),
vcVar = (1/AREA_TOTAL^2) * (sum(vcVar, na.rm = TRUE) + (VOL_1000HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * VOL_1000HR_ACRE *sum(vcCV, na.rm = TRUE))),
vpVar = (1/AREA_TOTAL^2) * (sum(vpVar, na.rm = TRUE) + (VOL_PILE_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * VOL_PILE_ACRE * sum(vpCV, na.rm = TRUE))),
vVar = (1/AREA_TOTAL^2) * (sum(vVar, na.rm = TRUE) + (VOL_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * VOL_ACRE * sum(vCV, na.rm = TRUE))),
bdVar = (1/AREA_TOTAL^2) * (sum(bdVar, na.rm = TRUE) + (BIO_DUFF_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * BIO_DUFF_ACRE * sum(bdCV, na.rm = TRUE))),
blVar = (1/AREA_TOTAL^2) * (sum(blVar, na.rm = TRUE) + (BIO_LITTER_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * BIO_LITTER_ACRE * sum(blCV, na.rm = TRUE))),
bsmVar = (1/AREA_TOTAL^2) * (sum(bsmVar, na.rm = TRUE) + (BIO_1HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * BIO_1HR_ACRE * sum(bsmCV, na.rm = TRUE))),
bmdVar = (1/AREA_TOTAL^2) * (sum(bmdVar, na.rm = TRUE) + (BIO_10HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * BIO_10HR_ACRE * sum(bmdCV, na.rm = TRUE))),
blgVar = (1/AREA_TOTAL^2) * (sum(blgVar, na.rm = TRUE) + (BIO_100HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * BIO_100HR_ACRE * sum(blgCV, na.rm = TRUE))),
bcVar = (1/AREA_TOTAL^2) * (sum(bcVar, na.rm = TRUE) + (BIO_1000HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * BIO_1000HR_ACRE * sum(bcCV, na.rm = TRUE))),
bpVar = (1/AREA_TOTAL^2) * (sum(bpVar, na.rm = TRUE) + (BIO_PILE_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * BIO_PILE_ACRE * sum(bpCV, na.rm = TRUE))),
bVar = (1/AREA_TOTAL^2) * (sum(bVar, na.rm = TRUE) + (BIO_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * BIO_ACRE * sum(bCV, na.rm = TRUE))),
cdVar = (1/AREA_TOTAL^2) * (sum(cdVar, na.rm = TRUE) + (CARB_DUFF_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * CARB_DUFF_ACRE * sum(cdCV, na.rm = TRUE))),
clVar = (1/AREA_TOTAL^2) * (sum(clVar, na.rm = TRUE) + (CARB_LITTER_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * CARB_LITTER_ACRE * sum(clCV, na.rm = TRUE))),
csmVar = (1/AREA_TOTAL^2) * (sum(csmVar, na.rm = TRUE) + (CARB_1HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * CARB_1HR_ACRE * sum(csmCV, na.rm = TRUE))),
cmdVar = (1/AREA_TOTAL^2) * (sum(cmdVar, na.rm = TRUE) + (CARB_10HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * CARB_10HR_ACRE * sum(cmdCV, na.rm = TRUE))),
clgVar = (1/AREA_TOTAL^2) * (sum(clgVar, na.rm = TRUE) + (CARB_100HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * CARB_100HR_ACRE * sum(clgCV, na.rm = TRUE))),
ccVar = (1/AREA_TOTAL^2) * (sum(ccVar, na.rm = TRUE) + (CARB_1000HR_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * CARB_1000HR_ACRE * sum(ccCV, na.rm = TRUE))),
cpVar = (1/AREA_TOTAL^2) * (sum(cpVar, na.rm = TRUE) + (CARB_PILE_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * CARB_PILE_ACRE * sum(cpCV, na.rm = TRUE))),
cVar = (1/AREA_TOTAL^2) * (sum(cVar, na.rm = TRUE) + (CARB_ACRE^2 * sum(aVar, na.rm = TRUE) - 2 * CARB_ACRE * sum(cCV, na.rm = TRUE))),
# Per acre sampling errors
VOL_DUFF_ACRE_SE = NA,
VOL_LITTER_ACRE_SE = NA,
VOL_1HR_ACRE_SE = sqrt(sum(vsmVar, na.rm = TRUE)) / VOL_1HR_ACRE * 100,
VOL_10HR_ACRE_SE = sqrt(sum(vmdVar, na.rm = TRUE)) / VOL_10HR_ACRE * 100,
VOL_100HR_ACRE_SE = sqrt(sum(vlgVar, na.rm = TRUE)) / VOL_100HR_ACRE * 100,
VOL_1000HR_ACRE_SE = sqrt(sum(vcVar, na.rm = TRUE)) / VOL_1000HR_ACRE * 100,
VOL_PILE_ACRE_SE = sqrt(sum(vpVar, na.rm = TRUE)) / VOL_PILE_ACRE * 100,
VOL_ACRE_SE = sqrt(sum(vVar, na.rm = TRUE)) / VOL_ACRE * 100,
BIO_DUFF_ACRE_SE = sqrt(sum(bdVar, na.rm = TRUE)) / BIO_DUFF_ACRE * 100,
BIO_LITTER_ACRE_SE = sqrt(sum(blVar, na.rm = TRUE)) / BIO_LITTER_ACRE * 100,
BIO_1HR_ACRE_SE = sqrt(sum(bsmVar, na.rm = TRUE)) / BIO_1HR_ACRE * 100,
BIO_10HR_ACRE_SE = sqrt(sum(bmdVar, na.rm = TRUE)) / BIO_10HR_ACRE * 100,
BIO_100HR_ACRE_SE = sqrt(sum(blgVar, na.rm = TRUE)) / BIO_100HR_ACRE * 100,
BIO_1000HR_ACRE_SE = sqrt(sum(bcVar, na.rm = TRUE)) / BIO_1000HR_ACRE * 100,
BIO_PILE_ACRE_SE = sqrt(sum(bpVar, na.rm = TRUE)) / BIO_PILE_ACRE * 100,
BIO_ACRE_SE = sqrt(sum(bVar, na.rm = TRUE)) / BIO_ACRE * 100,
CARB_DUFF_ACRE_SE = sqrt(sum(cdVar, na.rm = TRUE)) / CARB_DUFF_ACRE * 100,
CARB_LITTER_ACRE_SE = sqrt(sum(clVar, na.rm = TRUE)) / CARB_LITTER_ACRE * 100,
CARB_1HR_ACRE_SE = sqrt(sum(csmVar, na.rm = TRUE)) / CARB_1HR_ACRE * 100,
CARB_10HR_ACRE_SE = sqrt(sum(cmdVar, na.rm = TRUE)) / CARB_10HR_ACRE * 100,
CARB_100HR_ACRE_SE = sqrt(sum(clgVar, na.rm = TRUE)) / CARB_100HR_ACRE * 100,
CARB_1000HR_ACRE_SE = sqrt(sum(ccVar, na.rm = TRUE)) / CARB_1000HR_ACRE * 100,
CARB_PILE_ACRE_SE = sqrt(sum(cpVar, na.rm = TRUE)) / CARB_PILE_ACRE * 100,
CARB_ACRE_SE = sqrt(sum(cVar, na.rm = TRUE)) / CARB_ACRE * 100)
if (totals) {
cwd <- cwd %>%
select(names(cwd)[str_detect(names(cwd), 'Var', negate = TRUE)], nPlots)
} else {
cwd <- cwd %>%
select(names(cwd)[str_detect(names(cwd), 'Var', negate = TRUE) & str_detect(names(cwd), 'ACRE')], nPlots)
}
# Rejoin w/ groupby names
cwd <- data.frame(combos[[x]], cwd)
} else {
### BELOW DOES NOT PRODUCE SAMPLING ERRORS, use EXPNS instead (much quicker)
cwd <- data %>%
filter(EVAL_TYP == 'EXPDWM') %>%
distinct(ESTN_UNIT_CN, STRATUM_CN, PLT_CN, CND_CN, .keep_all = TRUE) %>%
group_by(.dots = grpBy, PLT_CN) %>%
summarize(VOL_1HR = sum(FWD_SM_VOLCF_ADJ * aDI * EXPNS, na.rm = TRUE),
VOL_10HR = sum(FWD_MD_VOLCF_ADJ * aDI * EXPNS, na.rm = TRUE),
VOL_100HR = sum(FWD_LG_VOLCF_ADJ * aDI * EXPNS, na.rm = TRUE),
VOL_1000HR = sum(CWD_VOLCF_ADJ * aDI * EXPNS, na.rm = TRUE),
VOL_PILE = sum(PILE_VOLCF_ADJ * aDI * EXPNS, na.rm = TRUE),
VOL = sum(VOL_1HR, VOL_10HR, VOL_100HR, VOL_1000HR, VOL_PILE, na.rm = TRUE),
BIO_DUFF = sum(DUFF_BIOMASS* aDI * EXPNS / 2000, na.rm = TRUE),
BIO_LITTER = sum(LITTER_BIOMASS * aDI * EXPNS / 2000, na.rm = TRUE),
BIO_1HR = sum(FWD_SM_DRYBIO_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
BIO_10HR = sum(FWD_MD_DRYBIO_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
BIO_100HR = sum(FWD_LG_DRYBIO_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
BIO_1000HR = sum(CWD_DRYBIO_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
BIO_PILE = sum(PILE_DRYBIO_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
BIO = sum(BIO_DUFF, BIO_LITTER, BIO_1HR, BIO_10HR, BIO_100HR, BIO_1000HR, BIO_PILE, na.rm = TRUE),
CARB_DUFF = sum(DUFF_CARBON* aDI * EXPNS / 2000, na.rm = TRUE),
CARB_LITTER = sum(LITTER_CARBON * aDI * EXPNS / 2000, na.rm = TRUE),
CARB_1HR = sum(FWD_SM_CARBON_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
CARB_10HR = sum(FWD_MD_CARBON_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
CARB_100HR = sum(FWD_LG_CARBON_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
CARB_1000HR = sum(CWD_CARBON_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
CARB_PILE = sum(PILE_CARBON_ADJ * aDI * EXPNS / 2000, na.rm = TRUE),
CARB = sum(CARB_DUFF, CARB_LITTER, CARB_1HR, CARB_10HR, CARB_100HR, CARB_1000HR, CARB_PILE, na.rm = TRUE),
fa = sum(CONDPROP_UNADJ * aDI * aAdj * EXPNS, na.rm = TRUE),
plotIn = ifelse(sum(aDI > 0, na.rm = TRUE), 1,0)) %>%
group_by(.dots = grpBy) %>%
summarize(AREA_TOTAL = sum(fa, na.rm = TRUE),
VOL_DUFF = NA,
VOL_LITTER = NA,
VOL_1HR = sum(VOL_1HR, na.rm = TRUE),
VOL_10HR = sum(VOL_10HR, na.rm = TRUE),
VOL_100HR = sum(VOL_100HR, na.rm = TRUE),
VOL_1000HR = sum(VOL_1000HR, na.rm = TRUE),
VOL_PILE = sum(VOL_PILE, na.rm = TRUE),
VOL = sum(VOL, na.rm = TRUE),
BIO_DUFF = sum(BIO_DUFF, na.rm = TRUE),
BIO_LITTER = sum(BIO_LITTER, na.rm = TRUE),
BIO_1HR = sum(BIO_1HR, na.rm = TRUE),
BIO_10HR = sum(BIO_10HR, na.rm = TRUE),
BIO_100HR = sum(BIO_100HR, na.rm = TRUE),
BIO_1000HR = sum(BIO_1000HR, na.rm = TRUE),
BIO_PILE = sum(BIO_PILE, na.rm = TRUE),
BIO = sum(BIO, na.rm = TRUE),
CARB_DUFF = sum(CARB_DUFF, na.rm = TRUE),
CARB_LITTER = sum(CARB_LITTER, na.rm = TRUE),
CARB_1HR = sum(CARB_1HR, na.rm = TRUE),
CARB_10HR = sum(CARB_10HR, na.rm = TRUE),
CARB_100HR = sum(CARB_100HR, na.rm = TRUE),
CARB_1000HR = sum(CARB_1000HR, na.rm = TRUE),
CARB_PILE = sum(CARB_PILE, na.rm = TRUE),
CARB = sum(CARB, na.rm = TRUE),
VOL_DUFF_ACRE = NA,
VOL_LITTER_ACRE = NA,
VOL_1HR_ACRE = VOL_1HR / AREA_TOTAL,
VOL_10HR_ACRE = VOL_10HR / AREA_TOTAL,
VOL_100HR_ACRE = VOL_100HR / AREA_TOTAL,
VOL_1000HR_ACRE = VOL_1000HR / AREA_TOTAL,
VOL_PILE_ACRE = VOL_PILE / AREA_TOTAL,
VOL_ACRE = VOL / AREA_TOTAL,
BIO_DUFF_ACRE = BIO_DUFF / AREA_TOTAL,
BIO_LITTER_ACRE = BIO_LITTER / AREA_TOTAL,
BIO_1HR_ACRE = BIO_1HR / AREA_TOTAL,
BIO_10HR_ACRE = BIO_10HR / AREA_TOTAL,
BIO_100HR_ACRE = BIO_100HR / AREA_TOTAL,
BIO_1000HR_ACRE = BIO_1000HR / AREA_TOTAL,
BIO_PILE_ACRE = BIO_PILE / AREA_TOTAL,
BIO_ACRE = BIO / AREA_TOTAL,
CARB_DUFF_ACRE = CARB_DUFF / AREA_TOTAL,
CARB_LITTER_ACRE = CARB_LITTER / AREA_TOTAL,
CARB_1HR_ACRE = CARB_1HR / AREA_TOTAL,
CARB_10HR_ACRE = CARB_10HR / AREA_TOTAL,
CARB_100HR_ACRE = CARB_100HR / AREA_TOTAL,
CARB_1000HR_ACRE = CARB_1000HR / AREA_TOTAL,
CARB_PILE_ACRE = CARB_PILE / AREA_TOTAL,
CARB_ACRE = CARB / AREA_TOTAL,
nPlots = sum(plotIn, na.rm = TRUE))
# Remove the total values if told to do so
if (totals) {
cwd <- cwd %>%
select(grpBy, names(cwd)[str_detect(names(cwd), 'Var', negate = TRUE)], nPlots)
} else {
cwd <- cwd %>%
select(grpBy, names(cwd)[str_detect(names(cwd), 'Var', negate = TRUE) & str_detect(names(cwd), 'ACRE')], nPlots)
}
} # End SE Conditional
# Do some cleanup
#gc()
#Return a dataframe
cwd
}
|
c43c7adf9b6af88790c91678d727fdfead166ec2
|
d48e34adc6063a5ca3dbfd772ad186fb93922f50
|
/testPackage/checkTestsPackage.R
|
c1dad96768c309129fd18dcc791882bba5eb5945
|
[] |
no_license
|
Lion666/clinDataReview
|
080832a95b74bebb595d59796758b9e8b4cf4e18
|
2876140f36c6bfe94d8626038d32b2f3f9477697
|
refs/heads/master
| 2023-08-05T18:55:26.658847
| 2021-10-01T16:55:27
| 2021-10-02T10:41:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,317
|
r
|
checkTestsPackage.R
|
# Run tests and check tests coverage for the 'medicalMonitoring' package
#
# Author: Laure Cougnaud
###############################################################################
packagePath <- "../package/medicalMonitoring/"
## create reference figures for vdiffr::expect_doppelganger
library(vdiffr)
# The same collate statement should be used than the R CMD check
# at the creation of the reference figures with 'manage_cases'
# this affect the order of the elements with 'reorder'
# if different, the order of the parameters in the y-axis of the plot might differ
Sys.setlocale(category = "LC_COLLATE", locale = "C")
# create reference figures in 'tests/fig' package
validate_cases(collect_cases(package = packagePath))
## create the package
library(devtools)
pkgTarballPath <- build(pkg = packagePath, args = "--resave-data")
## check the package
checkDir <- normalizePath(dirname(packagePath))
check_built(
path = pkgTarballPath,
args = paste0("--output=", checkDir)
)
## check the package coverage:
library(covr)
# test coverage:
pc <- package_coverage(
path = packagePath,
type = c("tests", "vignettes", "examples"),
# function_exclusions = "^(?!subjectProfile.*Plot)",
combine_types = TRUE # report coverage for each type
)
report(x = pc, file = "testCoverage-medicalMonitoring.html")
|
7c9475418d9f01472ff93b91e1d2004d0b63f66b
|
35de14603463a45028bd2aca76fa336c41186577
|
/man/find_topConsensus.Rd
|
fd8790a7e683d30555a21240ddf383e8c7626bf0
|
[
"MIT"
] |
permissive
|
UKDRI/echolocatoR
|
e3cf1d65cc7113d02b2403960d6793b9249892de
|
0ccf40d2f126f755074e731f82386e4e01d6f6bb
|
refs/heads/master
| 2023-07-14T21:55:27.825635
| 2021-08-28T17:02:33
| 2021-08-28T17:02:33
| 416,442,683
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 436
|
rd
|
find_topConsensus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_topConsensus.R
\name{find_topConsensus}
\alias{find_topConsensus}
\title{Find the top Consensus SNP}
\usage{
find_topConsensus(dat, top_N = 1, grouping_vars = c("Locus"))
}
\description{
Identify the \code{top_N} Consensus SNP(s) per Locus,
defined as the Consensus SNPs with the highest mean PP across all fine-mapping tools used.
}
\keyword{internal}
|
865dd751fd311d5c14f4bace1725b2689c413430
|
7d40b1c2f63e2860712493d156aaf5c6f51e8d25
|
/man/Tasmania.Rd
|
58245c84c7de8401abcaa3a8170784f48ec5f2e9
|
[] |
no_license
|
Epiconcept-Paris/sftoolkit
|
2fcb1d31ac41a74e16c9a40b13edc7c0ab62d58b
|
e46ab4fa781d7ebb0566759091ba0b9b4a5401bb
|
refs/heads/master
| 2020-04-26T22:26:41.753060
| 2019-03-21T08:45:54
| 2019-03-21T08:45:54
| 173,872,662
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 676
|
rd
|
Tasmania.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Tasmania}
\alias{Tasmania}
\title{Map of Tasmania (AUS)}
\format{sf, data frame with 30 rows and 6 variables + geometry.
\describe{
\item{ISO}{ISO code of Australia: AUS}
\item{NAME_0}{"Australia"}
\item{NAME_1}{"Tasmania"}
\item{NAME_2}{Names of Municipalities}
\item{TYPE_2}{"Municipality"}
\item{ENGTYPE_2}{"Municipality"}
\item{geometry}{sfc_MULTIPOLYGON}
}}
\source{
\url{http://www.diamondse.info/}
}
\usage{
Tasmania
}
\description{
An object of class "sf" "data.frame" with geometry and other attributes
of the map of Tasmnia.
}
\keyword{datasets}
|
8ec572c9f287a06865ccf2d3a035fff11980d2e8
|
de135f7c6453465b6b56a0bcb8dd8d421cf92908
|
/R/HiCcompare.r
|
f724d0dff029ec733ded1144db6ffa7bcf735da3
|
[
"MIT"
] |
permissive
|
dozmorovlab/HiCcompare
|
f555032a9ae9cbcc101ccb199f8b96bf5c63857f
|
310a927372e836fdd0438452e4790d4d2a611c1b
|
refs/heads/master
| 2023-06-08T14:25:06.340538
| 2023-06-04T01:46:25
| 2023-06-04T01:46:25
| 95,918,953
| 14
| 5
|
NOASSERTION
| 2022-11-28T21:25:44
| 2017-06-30T19:44:45
|
R
|
UTF-8
|
R
| false
| false
| 710
|
r
|
HiCcompare.r
|
#' HiCcompare
#'
#' HiCcompare provides functions for joint normalization and difference detection in multiple Hi-C datasets.
#' HiCcompare operates on processed Hi-C data in the form of chromosome-specific chromatin interaction matrices.
#' It accepts three-column tab-separated text files storing chromatin interaction matrices in a sparse matrix format
#' which are available from several sources. HiCcompare is designed to give the user the ability to perform a comparative
#' analysis on the 3-Dimensional structure of the genomes of cells in different biological states.
#'
#' To learn more about HiCcompare, start with the vignettes:
#' `browseVignettes(package = "HiCcompare")`
"_PACKAGE"
|
2685975133a4e9e7416d0bd37a5dde6ed0f21252
|
ddbfc51c7f00e40d6f8b0ced2b07ec31b25489f0
|
/Graph Plotter/ggplot graph.R
|
b2b66328a854d60178769e458a8cec568503e942
|
[] |
no_license
|
mygit-kapil/R_Codes
|
53d894a4c56403d0284a9e99fe5c3378c165f24a
|
8f6747ea1473029de0817f2fa6dfdf59b26c20d6
|
refs/heads/master
| 2020-03-15T16:46:08.076412
| 2018-05-05T11:38:18
| 2018-05-05T11:38:18
| 132,242,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,482
|
r
|
ggplot graph.R
|
ggplot_graphs <- function(file)
{
library(ggplot2)
library(quantreg)
graph_type <- readline(prompt="Enter the type of ggplot graph (point/line/histogram): ")
if(graph_type == "point")
{
xline <- readline(prompt="Enter the x co-ordinate: ")
x <- nrow(file)
x <- as.integer(x)
DF <- data.frame(A=numeric(x), B=numeric(x))
DF$A <- file[[xline]]
yline <- readline(prompt="Enter the y co-ordinate: ")
DF$B <- file[[yline]]
DF <- DF[order(DF$A),]
print(DF)
c <- ggplot(DF, aes(x=A, y=B)) + geom_point() + labs(x=xline, y=yline)
print(c)
#print("graph printed")
}
else if(graph_type == "line")
{
library(ggplot2)
library(quantreg)
xline <- readline(prompt="Enter the x co-ordinate: ")
x <- nrow(file)
x <- as.integer(x)
DF <- data.frame(A=numeric(x), B=numeric(x))
DF$A <- file[[xline]]
yline <- readline(prompt="Enter the y co-ordinate: ")
DF$B <- file[[yline]]
DF <- DF[order(DF$A),]
print(DF)
c <- ggplot(DF, aes(x=A, y=B)) + geom_line() + labs(x=xline, y=yline)
print(c)
}
else if (graph_type == "histogram")
{
xline <- readline(prompt="Enter the x co-ordinate: ")
x <- nrow(file)
x <- as.integer(x)
DF <- data.frame(A=numeric(x))
DF$A <- file[[xline]]
c <- ggplot(DF, aes(x=A)) + geom_histogram() + labs(x=xline) + stat_bin(binwidth = 0.01)
print(c)
}
else
{
print("Invalid Input")
ggplot_graphs(file)
}
}
|
eae7198d28038578bede2c2eb3dd15330093d1be
|
ff6d90e2f2cb21dea429c55838054d01071d6cd7
|
/CNNModels/faceModelCNN.R
|
f86990594e3b1a277690ecabf3b57857250445e5
|
[] |
no_license
|
JeffSandov6/facialRecognitionUCI
|
8d764b0e6e7c5c2aa207a1c1f3d4c7f858ba143d
|
7bbe4b0a4a6fb6b7a0ae595d085597ac54cd42b2
|
refs/heads/master
| 2020-04-10T01:32:43.493238
| 2018-12-06T19:07:54
| 2018-12-06T19:07:54
| 160,718,837
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
faceModelCNN.R
|
# Model for Faces CNN
use_session_with_seed(1)
Face_hiddenLayerNodes = (499+20)/2
faceModel<-keras_model_sequential()
#configuring the Model
faceModel %>%
layer_conv_2d(filter=32,kernel_size=c(3,3),padding="same",input_shape=c(128,120,1),activation = "relu") %>%
layer_max_pooling_2d(pool_size=c(2,2)) %>%
layer_conv_2d(filter=32,kernel_size=c(3,3), activation = "relu") %>%
layer_max_pooling_2d(pool_size=c(2,2)) %>%
layer_conv_2d(filter=32,kernel_size=c(3,3), activation = "relu") %>%
layer_max_pooling_2d(pool_size=c(2,2)) %>%
layer_flatten() %>%
layer_dense(units = Face_hiddenLayerNodes,activation = "relu") %>%
layer_dense(units = ncol(train_faces_onehot),activation = 'softmax')
compile(faceModel,
optimizer ='adam',
loss='categorical_crossentropy',
metrics = 'accuracy')
faceHistory <- fit(model,
train_images_CNN, train_faces_onehot,
validation_split = 0.2, batch_size=32,
epochs = 500,
callbacks = list(early_stop))
score <- evaluate(faceModel,
test_images_CNN, test_faces_onehot)
cat('Test loss:', score$loss, "\n")
cat('Test accuracy:', score$acc, "\n")
|
d70480c8b80c2cfe518deb4b04f6a00d8cf21f10
|
976b640f30bfa8d8c2b65ca804e2baeb05e40f4b
|
/man/vulnerability_signature.Rd
|
64dc69546cc159126bd05c02b96b8fbd2e20f2cf
|
[] |
no_license
|
mrtnnrdlnd/hrgraphsign
|
eea1a81eed8c00e08ea30a9f1123fc5527321a21
|
8e1c9ead8f445e8ed1d79d51fb9d461f5d76425d
|
refs/heads/main
| 2023-06-03T15:16:43.455542
| 2021-06-21T07:56:35
| 2021-06-21T07:56:35
| 363,876,487
| 0
| 0
| null | 2021-06-21T07:56:36
| 2021-05-03T09:18:14
|
R
|
UTF-8
|
R
| false
| true
| 573
|
rd
|
vulnerability_signature.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signatures.R
\name{vulnerability_signature}
\alias{vulnerability_signature}
\title{Extract Vulnerability Signature}
\usage{
vulnerability_signature(graph, membership, weights = NULL)
}
\arguments{
\item{graph}{The input graph.}
\item{membership}{Vertex attribute to cluster by.}
\item{weights}{Weight vector. If the graph has a weight edge attribute, then this is used by default.}
}
\value{
A tibble of vertices with high vulnerability score
}
\description{
Extract Vulnerability Signature
}
|
473d0f10d776fd5ef014ae618e9b80092b02dd9a
|
f7e7df7cb9bd7a18ceead5dcc366c8fd1bd216d4
|
/scraper/events/eventshigh.R
|
acc72b34f69395369f6515e8fd5f2aae9d045b4d
|
[] |
no_license
|
vm-natarajan/r-scraper
|
72d007919d61f0fc244dec944be325e83bc12c4c
|
b61efbe50445c2ae63d6104f6ccda1427bd78dbb
|
refs/heads/master
| 2020-04-01T07:23:13.704269
| 2018-11-26T04:22:27
| 2018-11-26T04:22:27
| 152,987,946
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,997
|
r
|
eventshigh.R
|
getEventsHighEventDetails <- function(location,url_section,section){
stories <- data.frame(matrix(ncol = 10, nrow = 0),stringsAsFactors = FALSE);
col_names <- c("source","section","date","time","price","location","place","title","url","img");
colnames(stories) <- col_names;
url <- paste0('https://www.eventshigh.com/',location,'/',url_section);
print(url);
page_source <- read_html(x = url);
event_date <- page_source %>% html_nodes(css = 'div.browse-card.d-inline-block>a:nth-of-type(2) div:nth-child(6)') %>% html_text(trim = TRUE) %>% strsplit(split = ' , ');
event_time <- sapply(event_date, "[", 2);
event_date <- sapply(event_date, "[", 1);
event_date <- as.Date(strptime(event_date,format = '%a, %d %b'));
event_date <- format(event_date, format="%Y-%m-%d");
current_date <- Sys.Date();
ifelse(is.na(event_date),yes = event_date <- current_date , no = current_date <- current_date)
for(x in 1:length(event_date)){
if(event_date[x] < current_date){
tmp <- as.POSIXlt(event_date[x]);
tmp$year <- tmp$year+1;
event_date[x] <- as.Date(tmp)
}
}
event_price <- page_source %>% html_nodes(css = 'div.browse-card.d-inline-block>a:nth-of-type(2) div:nth-child(8)') %>% html_text(trim = TRUE) %>% gsub(pattern = '\u20b9',replacement = 'Rs.') %>% gsub(pattern = 'Free',replacement = '0')
event_place <- page_source %>% html_nodes(css = 'div.browse-card.d-inline-block>a:nth-of-type(2) div:nth-child(4)') %>% html_text(trim = TRUE);
event_title <- page_source %>% html_nodes(css = 'div.browse-card.d-inline-block>a:nth-of-type(2) div:nth-child(2)') %>% html_text(trim = TRUE);
event_img <- page_source %>% html_nodes(css = 'div.browse-card.d-inline-block div.lazy-load-image') %>% html_attr('data-src') %>% gsub(pattern = '=.*',replacement = '');
event_url <- page_source %>% html_nodes(css = 'div.browse-card.d-inline-block>a:nth-child(1)') %>% html_attr('href') %>% gsub(pattern = '\\?.*',replacement = '');
event_url <- paste0('https://www.eventshigh.com',event_url );
events <- as.data.frame(cbind(source = 'EventsHigh',section = section,date = event_date,time = event_time,price = event_price, location = location,place = event_place,title = event_title,url = event_url,img = event_img));
return(events);
}
getEHEvents <- function(){
locations <- c('chennai','mumbai');
url_sections <- c('food','concerts+and+shows','parties+and+nightlife','adventure+and+sports','classes+and+workshops');
sections <- c('food-and-drinks','entertainment','entertainment','sports-and-wellness','conference-and-workshops');
#'Food & Drinks','Entertainment','Sports & Wellness','Meetups','Conference & Workshops','Education','Art & Music'
eh_set <- data.frame(matrix(ncol = 10, nrow = 0));
for(li in 1:length(locations)){
for(x in c(1:length(sections))){
section_set <- getEventsHighEventDetails(locations[li],url_sections[x],sections[x]);
eh_set <- rbind(eh_set,section_set);
}
}
return(eh_set);
}
|
72301d3fb1b4b9adbd5823885db1a2667513b25f
|
e63967e692fb616e22685b1b5792f1b5e53ae330
|
/20170117-Putschko-ShinyR-SMS-Analysis/app.R
|
c39ca97e5f9171bc20ae3c146b7a07afe11a6e4d
|
[
"MIT"
] |
permissive
|
WestMichiganRUserGroup/Meeting-Materials
|
27d9a0bb8fd9b3923664b3e4e84b2f13e042a72d
|
6a792773b7e8e8cc23f78d6d92239410fbb1ae60
|
refs/heads/master
| 2022-11-13T10:42:58.616639
| 2022-11-12T15:08:00
| 2022-11-12T15:08:00
| 79,477,514
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39,739
|
r
|
app.R
|
# SMS Analysis ------------------------------------------------------------
# ---- The Setup ----
# Install these packages if you haven't already
list.of.packages <- c("dplyr", "tidyr", "readr", "lubridate", "stringr", "tibble", "forcats",
"ggplot2", "viridis", "ggthemes",
"plotly","pacman",
"shiny", "shinydashboard", "DT")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
# --- Packages ---
pacman::p_load(dplyr, tidyr, readr, lubridate, stringr, tibble, forcats,
ggplot2, viridis, ggthemes,
plotly,
shiny, shinydashboard, DT)
# ---- Loading Data ----
SMS_Analysis <-
# - Original Data -
# read_csv(list.files(pattern = "Full SMS.csv")) %>% tbl_df()
# - Anonymous Data -
# read_csv(list.files(pattern = "Anon SMS.csv")) %>% tbl_df()
# - Load in RDS file -
readRDS("Anonymous SMS.RDS")
# Date Range
Data_Date_Range <-
SMS_Analysis %>%
distinct(Date) %>%
summarize(Max = max(as.Date(Date)),
Min = min(as.Date(Date)))
# ---- Dashboard Header ----
Header <-
dashboardHeader(title = "Message Exploration")
# ---- Dashboard Sidebar ----
Sidebar <- dashboardSidebar(
# * Sidebar Links ----
sidebarMenu(
menuItem(
"Descriptives",
tabName = "Descriptives"
),
menuItem(
"Top Contacts",
tabName = "Top_Contacts"
),
menuItem(
"Selected Contact",
tabName = "Selected_Contact"
),
menuItem(
"Messages Sent",
tabName = "Messages_Sent"
)
),
# * Sidebar Inputs ----
# Selected Contact
selectInput("Contact_Select",
"Select Contact",
choices = "None Selected"),
# Number of Top Contacts
sliderInput("Top_Contacts_N", label = "Number of Top Contacts",
min = 10, max = 40, value = 20),
# Date Slider
dateRangeInput(
"Slider_Date_Range",
label = "Date Range",
start = Data_Date_Range$Min,
end = Data_Date_Range$Max,
startview = "year"),
# Date Buttons
column(10,
align = "center",
offset = 1,
actionButton("Default_Dates",
label = "All Dates",
width = "100%")),
column(10,
align = "center",
offset = 1,
actionButton("Last_Month",
width = "100%",
label = "Last Month")),
column(10,
align = "center",
offset = 1,
actionButton("Three_Months",
width = "100%",
label = "Last Three Months")),
column(10,
align = "left",
offset = 1,
actionButton("Six_Months",
width = "100%",
label = "Last Six Months"))
)
# Dashboard Body ----
Body <- dashboardBody(
tabItems(
# * Descriptives ----
tabItem(tabName = "Descriptives",
# First Row
fluidRow(
infoBoxOutput("Date_Updated"),
infoBoxOutput("Days_Recorded"),
infoBoxOutput("Contacts_Recorded")
),
# Second Row
fluidRow(
valueBoxOutput("Texts_Number"),
valueBoxOutput("Sent_Number"),
valueBoxOutput("Received_Number")
),
# Third Row
dataTableOutput("Data_Descriptives")
),
# * Top Contacts ----
tabItem(tabName = "Top_Contacts",
# First Row
fluidRow(
box(
plotlyOutput("Line_Chart"),
width = 12,
title = "Message Count by Day",
collapsible = T
)
),
# Second Row
fluidRow(
# First Column
tabBox(width = 9,
# Panel 1
tabPanel(
"Sent vs Received",
plotlyOutput("Bar_Charts", height = 700)),
# Panel 2
tabPanel(
"Date Ranges",
plotOutput("Range_Chart", height = 650)),
# Panel 3
tabPanel(
"Differences",
# Hover Overlay
div(
style = "position:relative",
plotOutput("Difference_Chart",
height = 650,
hover = hoverOpts("plot_hover",
delay = 100,
delayType = "debounce")),
uiOutput("hover_info")))
),
# Second Column
box(
width = 3,
solidHeader = T,
title = "Top Contacts",
div(style = "font-size:90%",
dataTableOutput("Data_Top_Contacts")))
)
),
# * Selected Contact ----
tabItem(tabName = "Selected_Contact",
# First Row
fluidRow(
# First Column
valueBoxOutput("Info_Selected_Contact",
width = 3),
# Second Column
box(
width = 9,
collapsible = TRUE,
title = "Contact Details",
div(style = "font-size:100%",
dataTableOutput("Data_Contact_Details")))
),
# Second Row
fluidRow(
# First Column
box(
width = 4,
collapsible = T,
title = "Sent and Received",
plotOutput("Pie_Chart",
height = 200,
width = "100%")),
# Second Column
box(
width = 8,
collapsible = T,
title = "Messages per Day",
plotOutput("Echo_Chart",
height = 150,
width = "100%"))
),
# Third Row
fluidRow(
tabBox(
width = 12,
title = "Messages by Week",
selected = "Message Length",
# First Panel
tabPanel("Message Length",
plotOutput("Line_Chart_Single_Count")),
# Second Panel
tabPanel("Mean Message Length",
plotOutput("Line_Chart_Single_Length")))
),
# Fourth Row
fluidRow(
tabBox(
width = 12,
title = "Weekday by Hour",
# First Panel
tabPanel("Heatmap",
plotOutput("Heatmap",
height = 500)),
# Second Panel
tabPanel("Scatter Plot",
plotOutput("Hour_Scatter",
height = 350)),
# Third Panel
tabPanel("Line Plot",
plotOutput("Hour_Line",
width = 500,
height = 500)))
)
),
# * Me ----
tabItem(tabName = "Messages_Sent",
# First Row
fluidRow(
valueBoxOutput("Info_Me_Count",
width = 3),
valueBoxOutput("Info_Me_Mean_Count_Daily",
width = 3),
valueBoxOutput("Info_Me_Mean_Length_Daily",
width = 3),
valueBoxOutput("Info_Me_Mean_Message_Length",
width = 3)
),
# Second Row
fluidRow(
box(plotOutput("Me_Line"),
width = 12,
title = "Sent Messages",
collapsible = T,
collapsed = T)
),
# Third Row
fluidRow(
box(plotOutput("Me_Overtime",
height = 500),
width = 12,
title = "Measures Over Time",
collapsible = T,
collapsed = T)
),
# Fourth Row
fluidRow(
box(plotOutput("Me_Heatmap"),
width = 12,
title = "Message Count",
collapsible = T,
collapsed = F))
)
)
)
# --- Create UI ---
UI <- dashboardPage(Header, Sidebar, Body)
# Server ------------------------------------------------------------------
Server <- function(input, output, session) {
# Action: Update Dates ----
observeEvent(
input$Last_Month, {
updateDateRangeInput(
session,
"Slider_Date_Range",
label = "Date Range",
start = floor_date(floor_date(Data_Date_Range$Max, "month") - 1, "month"),
end = floor_date(Data_Date_Range$Max, "month") - 1
)
})
# Default Dates ---
observeEvent(
input$Default_Dates, {
updateDateRangeInput(
session,
"Slider_Date_Range",
label = "Date Range",
start = Data_Date_Range$Min,
end = Data_Date_Range$Max
)
})
# Three Months ---
observeEvent(
input$Three_Months, {
updateDateRangeInput(
session,
"Slider_Date_Range",
label = "Date Range",
start = floor_date(Data_Date_Range$Max %m-% months(3), "month"),
end = floor_date(Data_Date_Range$Max, "month") %m-% days(1)
)
})
# Six Months ---
observeEvent(
input$Six_Months, {
updateDateRangeInput(
session,
"Slider_Date_Range",
label = "Date Range",
start = floor_date(Data_Date_Range$Max %m-% months(6), "month"),
end = floor_date(Data_Date_Range$Max, "month") %m-% days(1)
)
})
# Data: Raw Date Filter ----
Data_Raw_Date_Filter <-
reactive({
SMS_Analysis %>%
filter(Date >= input$Slider_Date_Range[1],
Date <= input$Slider_Date_Range[2])
})
# Data: Full Dates ----
Data_Full_Dates <- reactive({
data_frame(
Day = seq.Date(input$Slider_Date_Range[1],
input$Slider_Date_Range[2],
"days"),
Week = floor_date(Day, "week"),
Month = floor_date(Day, "month")
)
})
# Data: Descriptives ----
Data_Descriptives <-
reactive({
Data_Raw_Date_Filter() %>%
group_by(Contact, Type) %>%
summarise(
Count = n(),
Length = str_count(Text) %>% sum(),
Mean_Length = str_count(Text) %>% mean() %>% round(digits = 2)
) %>%
full_join(
Data_Raw_Date_Filter() %>%
mutate(Date = floor_date(Date, unit = "days")) %>%
distinct(Date, Contact) %>%
count(Contact) %>%
rename(Days_Active = n)
) %>%
mutate(Messages_per_Day = round(Count / Days_Active, 2)) %>%
full_join(
Data_Raw_Date_Filter() %>%
mutate(Day = floor_date(Date, "day")) %>%
group_by(Contact, Type) %>%
summarise(First_Contact = format(min(Day), "%Y-%m-%d"),
Last_Contact = format(max(Day), "%Y-%m-%d"))
)
})
# Data: Top Contacts ----
Data_Top_Contacts <- reactive({
Data_Raw_Date_Filter() %>%
count(Contact) %>%
arrange(desc(n)) %>%
top_n(input$Top_Contacts_N) %>%
left_join(Data_Descriptives()) %>%
select(Contact,
Days_Active,
Total = n,
Type,
Count,
Length,
Mean_Length)
})
# Data: Daily ----
Data_Daily <- reactive({
Data_Raw_Date_Filter() %>%
mutate(Date = as.Date(floor_date(Date, unit = "day")),
Text_Length = str_count(Text)) %>%
group_by(Date, Contact) %>%
summarise(Count = n(),
Length = sum(Text_Length),
Mean_Length = mean(Text_Length) %>% round(digits = 2)) %>%
right_join(Data_Top_Contacts() %>%
distinct(Contact))
})
# Data: Difference ----
Data_Difference <- reactive({
Data_Top_Contacts() %>%
gather(Attribute, Value, Count:Mean_Length) %>%
arrange(Contact) %>%
mutate(Attribute_2 = str_c(Attribute, Type, sep = "_")) %>%
select(Contact, Attribute_2, Value) %>%
spread(Attribute_2, Value) %>%
mutate(Count_Difference = Count_Sent - Count_Received,
Length_Difference = Length_Sent - Length_Received,
Mean_Length_Difference = Mean_Length_Sent - Mean_Length_Received)
})
# Data: Order ----
Data_Order_Date <- reactive({
Data_Raw_Date_Filter() %>%
distinct(Contact) %>%
rownames_to_column("Date_Order_A") %>%
mutate(Date_Order_D = rev(Date_Order_A))
})
Data_Order_Rank <- reactive({
Data_Top_Contacts() %>%
distinct(Contact) %>%
rownames_to_column("Rank_Order_A") %>%
mutate(Rank_Order_D = rev(Rank_Order_A))
})
Data_Order <- reactive({
full_join(Data_Order_Date(), Data_Order_Rank()) %>%
group_by(Contact) %>%
mutate_all(as.numeric) %>%
select(Contact, contains("Date"), contains("Rank"))
})
# Data: Daily Top ----
Data_Daily_Top <- reactive({
Data_Daily() %>%
group_by(Date, Contact) %>%
summarise(Count = sum(Count),
Length = sum(Length)) %>%
right_join(Data_Order()) %>%
filter(!is.na(Rank_Order_A))
})
# Data: Contact Daily ----
Data_Contact_Daily <- reactive({
Data_Daily() %>%
ungroup() %>%
filter(Contact == input$Contact_Select) %>%
right_join(Data_Full_Dates() %>% select(Day), by = c("Date" = "Day")) %>%
complete(Date, Type, fill = list(Count = 0,
Length = 0,
Mean_Length = 0)) %>%
arrange(Date)
})
# Data: Echo Chart ----
Data_Echo_Chart <- reactive({
Data_Raw_Date_Filter() %>%
filter(Contact == input$Contact_Select) %>%
mutate(Date = as.Date(floor_date(Date, unit = "day"))) %>%
group_by(Date, Contact) %>%
summarise(Count = n()) %>%
right_join(Data_Full_Dates() %>% select(Day), by = c("Date" = "Day")) %>%
arrange(Date) %>%
replace_na(replace = list(Count = 0))
})
# Data: Single Line Chart ----
Data_Single_Line_Chart <- reactive({
# -- Weekly --
# Data_Raw_Date_Filter() %>%
# filter(Contact == input$Contact_Select) %>%
# mutate(Date = as.Date(floor_date(Date, unit = "week")),
# Text_Length = str_count(Text)) %>%
# group_by(Date, Contact, Type) %>%
# summarise(Count = n(),
# Length = sum(Text_Length),
# Mean_Length = mean(Text_Length) %>% round(digits = 2)) %>%
# ungroup() %>%
# right_join(Data_Full_Dates() %>% select(Week), by = c("Date" = "Week")) %>%
# complete(Date, Type, fill = list(Count = 0,
# Length = 0,
# Mean_Length = 0)) %>%
# arrange(Date) %>%
# distinct()
# -- Daily --
Data_Raw_Date_Filter() %>%
filter(Contact == input$Contact_Select) %>%
mutate(Date = as.Date(floor_date(Date, unit = "day")),
Text_Length = str_count(Text)) %>%
group_by(Date, Contact, Type) %>%
summarise(Count = n(),
Length = sum(Text_Length),
Mean_Length = mean(Text_Length) %>% round(digits = 2)) %>%
ungroup() %>%
right_join(Data_Full_Dates() %>% select(Day), by = c("Date" = "Day")) %>%
complete(Date, Type, fill = list(Count = 0,
Length = 0,
Mean_Length = 0)) %>%
arrange(Date) %>%
distinct()
})
# Data: Heatmap / Boxplot ----
Data_Heatmap_Boxplot <- reactive({
Data_Raw_Date_Filter() %>%
filter(Contact == input$Contact_Select) %>%
transmute(
Day = floor_date(Date, unit = "days"),
Weekday = wday(Date, label = T, abbr = F),
Time = hour(Date) + minute(Date) / 60,
Hour = hour(Date),
Type,
Length = str_count(Text))
})
# Data: Me ----
Data_Me <- reactive({
Data_Raw_Date_Filter() %>%
filter(Type == "Sent") %>%
mutate(Date = floor_date(Date, "days") %>% as.Date()) %>%
group_by(Date, Type) %>%
summarise(
Message_Count = n(),
Message_Length = str_count(Text) %>% sum(),
Mean_Length = str_count(Text) %>% mean() %>% round(digits = 2)
) %>%
full_join(
Data_Raw_Date_Filter() %>%
filter(Type == "Sent") %>%
mutate(Date = floor_date(Date, "day") %>% as.Date()) %>%
distinct(Date, Contact) %>%
group_by(Date) %>%
summarise(Contacts = n())
) %>%
select(Date, Type, Contacts,
Message_Count, Message_Length, Mean_Length) %>%
right_join(Data_Full_Dates() %>% select(Day),
by = c("Date" = "Day")) %>%
replace_na(
list(Type = "Sent",
Contacts = 0,
Message_Count = 0,
Message_Length = 0,
Mean_Length = 0))
})
# Data: Me Descriptive ----
Data_Me_Descriptive <- reactive({
Data_Me() %>%
filter(Type == "Sent") %>%
summarise(Total_Count = sum(Message_Count),
Mean_Count_Daily = mean(Message_Count),
Mean_Length_Daily = mean(Message_Length),
Mean_Message_Length = mean(Mean_Length))
})
# Menu: Filter Contact ----
observe({
input$Top_Contacts_N
updateSelectInput(session,
"Contact_Select",
choices = c("None Selected",
Data_Top_Contacts() %>% distinct(Contact))
)
})
# Value: Top N Order ----
Order <- reactive({
Data_Top_Contacts() %>% distinct(Contact)
})
# * Data View: Placeholder ----
# output$Placeholder <- renderDataTable({
# datatable(
# SMS_Me_Hourly_Heatmap()
# )
# })
# Data View: Descriptives ----
output$Data_Descriptives <- renderDataTable({
datatable(Data_Descriptives(),
caption = "Contact Details",
options = list(pageLength = 10),
colnames = c("Contact", "Message Type", "Message Count",
"Message Length", "Mean Message Length",
"Days Active", "Messages per Day",
"First Contact", "Last Contact"))
})
# Data View: Top Contacts ----
output$Data_Top_Contacts <- renderDataTable({
datatable(Data_Top_Contacts() %>% distinct(Contact, Total),
rownames = T,
options = list(paging = F,
searching = F))
})
# Data View: Contact Details ----
output$Data_Contact_Details <- renderDataTable({
datatable(
Data_Descriptives() %>%
filter(Contact == input$Contact_Select) %>%
ungroup() %>%
select(-Contact),
rownames = F,
options = list(paging = F,
searching = F),
colnames = c("Message Type", "Message Count",
"Message Length", "Mean Message Length",
"Days Active", "Messages per Day",
"First Contact", "Last Contact")
)
})
# Value View: Date Updated ----
output$Date_Updated <- renderInfoBox({
infoBox(
"Date Updated",
value = Data_Date_Range$Max,
icon = icon("download"),
color = "olive"
)
})
# Value View: Contacts Recorded ----
output$Contacts_Recorded <- renderInfoBox({
infoBox(
"Contacts Recorded",
value = Data_Raw_Date_Filter() %>% distinct(Contact) %>% nrow(),
icon = icon("users"),
color = "olive"
)
})
# Value View: Days Recorded ----
output$Days_Recorded <- renderInfoBox({
infoBox(
"Days Recorded",
value = Data_Raw_Date_Filter() %>%
mutate(Date = as.Date(Date)) %>%
distinct(Date) %>%
nrow(),
icon = icon("calendar"),
color = "olive"
)
})
# Value View: Text Count ----
output$Texts_Number <- renderValueBox({
valueBox(
"Total Messages",
value = Data_Raw_Date_Filter() %>% nrow() %>%
formatC(format = "d", big.mark = ","),
icon = icon("list"),
color = "blue"
)
})
# Value View: Sent Count ----
output$Sent_Number <- renderValueBox({
valueBox(
"Messages Sent",
value = Data_Raw_Date_Filter() %>% filter(Type == "Sent") %>% nrow() %>%
formatC(format = "d", big.mark = ","),
icon = icon("sign-out"),
color = "blue"
)
})
# Value View: Received Count ----
output$Received_Number <- renderValueBox({
valueBox(
"Messages Received",
value = Data_Raw_Date_Filter() %>% filter(Type == "Received") %>% nrow() %>%
formatC(format = "d", big.mark = ","),
icon = icon("sign-in"),
color = "blue"
)
})
# Value View: Selected Contact ----
output$Info_Selected_Contact <- renderValueBox({
infoBox(
"Selected Contact",
value = input$Contact_Select,
icon = icon("user"),
color = "olive"
)
})
# Value View: Me Count ----
output$Info_Me_Count <- renderValueBox(
valueBox("Messages Sent",
value =
Data_Me_Descriptive() %>%
transmute(Total_Count %>% formatC(format = "d", big.mark = ",")),
icon = icon("sign-out"),
color = "blue")
)
output$Info_Me_Mean_Count_Daily <- renderValueBox(
valueBox("Messages per Day",
value =
Data_Me_Descriptive() %>%
transmute(Mean_Count_Daily %>% round(digits = 2)),
color = "blue",
icon = icon("envelope"))
)
output$Info_Me_Mean_Length_Daily <- renderValueBox(
valueBox("Characters per Day",
value =
Data_Me_Descriptive() %>%
transmute(Mean_Length_Daily %>% round(digits = 2)),
color = "blue",
icon = icon("edit"))
)
output$Info_Me_Mean_Message_Length <- renderValueBox(
valueBox("Characters per Message",
value = Data_Me_Descriptive() %>%
transmute(Mean_Message_Length %>% round(digits = 2)),
color = "blue",
icon = icon("edit"))
)
# Plotly: Top - Bar Charts ----
output$Bar_Charts <- renderPlotly({
Bar_Chart_Base <-
Data_Top_Contacts() %>%
group_by(Contact) %>%
mutate(Prop_Count = round(Count / sum(Count), 2),
Prop_Length = round(Length / sum(Length), 2)) %>%
ungroup() %>%
plot_ly(
y = ~Contact,
color = ~Type,
colors = c("#FF6347", "#4682B4")) %>%
layout(barmode = "stack",
yaxis = list(title = "",
tickangle = 0,
categoryorder = "trace",
autorange = "reversed"),
margin = list(l = 150,
b = 100,
pad = 10))
subplot(
Bar_Chart_Base %>% add_bars(x = ~Count) %>%
layout(xaxis = list(title = "Count")),
Bar_Chart_Base %>% add_bars(x = ~Length) %>%
layout(xaxis = list(title = "Length")),
shareX = FALSE,
shareY = TRUE,
titleX = T,
nrows = 1
)
})
# Plotly: Top - Line Charts ----
output$Line_Chart <- renderPlotly({
Plotly_Line_Group <-
Data_Daily() %>%
ungroup() %>%
group_by(Contact) %>%
plot_ly(x = ~Date, y = ~Count) %>%
add_lines(alpha = 0.2,
name = "All Contacts",
hoverinfo = "none")
Plotly_Line_Group %>%
filter(Contact == input$Contact_Select) %>%
add_lines(name = input$Contact_Select,
line = list(shape = "spline")) %>%
layout(showlegend = F,
title = str_c("Selected Contact: ", input$Contact_Select))
})
# ggPlot: Top - Range Chart ----
output$Range_Chart <- renderPlot(
Data_Daily_Top() %>%
ggplot(aes(x = Date, y = reorder(Contact, Date_Order_D))) +
geom_point(shape = 15,
alpha = .4) +
scale_x_date(date_labels = "%b '%y",
date_minor_breaks = "1 month",
position = "top",
name = NULL) +
scale_y_discrete(name = NULL) +
theme_minimal() +
theme(
panel.grid.major.y = element_line(linetype = 1, color = "gray"),
panel.grid.major.x = element_line(linetype = 3, color = "black"),
panel.grid.minor.x = element_line(linetype = 3, color = "gray"),
axis.line.y = element_line(linetype = 1),
plot.margin = unit(c(.5, 1, .5, .5), "cm"),
axis.text = element_text(size = rel(1.1)),
axis.title = element_text(size = rel(1.1))
)
)
# ggPlot: Top - Difference Chart ----
output$Difference_Chart <- renderPlot({
# GG_Difference <-
Data_Difference() %>%
ggplot(aes(x = Mean_Length_Received, y = Mean_Length_Sent,
fill = abs(Count_Difference), size = abs(Length_Difference),
text = Contact)) +
geom_point(shape = 22, alpha = .75) +
scale_fill_viridis(name = "Count \nDifference", direction = -1, option = "plasma") +
scale_size(name = "Length \nDifference", range = c(2, 12)) +
geom_abline(slope = 1, intercept = 0, linetype = 2) +
theme_minimal() +
labs(
x = "Received",
y = "Sent",
title = "Average Number of Characters per Message"
) +
theme(
axis.text = element_text(size = rel(1.1)),
axis.title.x = element_text(size = rel(1.1),
face = "bold"),
axis.title.y = element_text(size = rel(1.1),
face = "bold")
)
# GG_Difference_Count %>% ggplotly()
})
# * ggPlot: Hover Info ----
output$hover_info <- renderUI({
hover <- input$plot_hover
point <- nearPoints(Data_Difference(), hover, threshold = 5, maxpoints = 1, addDist = TRUE)
if (nrow(point) == 0) return(NULL)
# calculate point position INSIDE the image as percent of total dimensions from left (horizontal) and from top (vertical)
left_pct <- (hover$x - hover$domain$left) / (hover$domain$right - hover$domain$left)
top_pct <- (hover$domain$top - hover$y) / (hover$domain$top - hover$domain$bottom)
# calculate distance from left and bottom side of the picture in pixels
left_px <- hover$range$left + left_pct * (hover$range$right - hover$range$left)
top_px <- hover$range$top + top_pct * (hover$range$bottom - hover$range$top)
# create style property fot tooltip
# background color is set so tooltip is a bit transparent
# z-index is set so we are sure are tooltip will be on top
style <- str_c("position:absolute; z-index:100; background-color: rgba(245, 245, 245, 0.85); ",
"left:", left_px + 2, "px; top:", top_px + 2, "px;")
# actual tooltip created as wellPanel
wellPanel(
style = style,
p(HTML(str_c("<b> Contact: </b>", point$Contact, "<br/>",
"<b> Mean Received Length: </b>", point$Mean_Length_Received, "<br/>",
"<b> Mean Sent Length: </b>", point$Mean_Length_Sent, "<br/>",
"<b> Count Difference: </b>", abs(point$Count_Difference), "<br />",
"<b> Length Difference: </b>", abs(point$Length_Difference), "<br />"
)))
)
})
# ggPlot: Single - Echo Chart ----
output$Echo_Chart <- renderPlot({
Data_Echo_Chart() %>%
group_by(Date) %>%
summarise(
Count = Count %>% sum()
) %>%
ggplot(
aes(
x = Date,
y = factor(1))) +
geom_point(
aes(size = Count),
shape = 15,
alpha = .1,
color = "black") +
scale_size_area(
max_size = 24) +
scale_x_date(
name = NULL,
date_labels = "%b %Y") +
scale_y_discrete(
name = NULL,
breaks = NULL) +
theme_minimal() +
guides(size = F) +
theme(
panel.background = element_rect(color = "gray"),
panel.grid.major.x = element_line(color = "gray", linetype = 2),
panel.grid.minor.x = element_line(color = "gray", linetype = 2),
axis.text = element_text(size = 12)
)
})
# ggPlot: Single - Pie Chart ----
output$Pie_Chart <- renderPlot({
Data_Raw_Date_Filter() %>%
ungroup() %>%
filter(Contact == input$Contact_Select) %>%
group_by(Contact, Type) %>%
summarise(
Count = n(),
`Mean Length` = str_count(Text) %>% mean() %>% round(digits = 2)) %>%
gather(Measure, Value, Count:`Mean Length`) %>%
ggplot(aes(x = Measure,
y = Value,
fill = Type,
label = Value)) +
geom_bar(stat = "identity",
position = "fill",
color = "black",
width = 1) +
geom_text(position = position_fill(vjust = .5),
size = 5,
fontface = "bold",
color = "white") +
facet_grid(. ~ Measure, scales = "free") +
coord_polar(theta = "y", start = 0) +
theme_minimal() +
labs(x = NULL, y = NULL) +
theme(axis.text = element_blank(),
strip.text = element_text(face = "bold", size = 11),
legend.position = "bottom",
legend.title = element_blank(),
strip.placement = "inside")
})
# ggPlot: Single - Line Chart - Mean Length ----
output$Line_Chart_Single_Length <- renderPlot({
Data_Single_Line_Chart() %>%
ggplot(aes(x = Date, y = Mean_Length)) +
geom_point(aes(fill = Type, size = Count),
alpha = .5,
color = "black",
shape = 22) +
scale_size_area(max_size = 12) +
geom_smooth(span = 0.25,
se = F,
method = "loess",
color = "black",
fullrange = T) +
scale_x_date(name = NULL,
date_minor_breaks = "1 month") +
scale_y_log10(name = NULL) +
theme_minimal() +
guides(size = guide_legend(title = "Message Count",
title.position = "top"),
fill = guide_legend(title = "Message Type",
title.position = "top")) +
theme(
legend.position = "bottom"
)
})
# ggPlot: Single - Line Chart - Length ----
output$Line_Chart_Single_Count <- renderPlot({
Data_Single_Line_Chart() %>%
ggplot(aes(x = Date, y = Length)) +
geom_point(aes(fill = Type,
size = Count),
alpha = .4,
shape = 22) +
scale_size_area(max_size = 12) +
geom_smooth(
span = 0.25,
se = F,
method = "loess",
color = "black") +
scale_x_date(
name = NULL,
date_minor_breaks = "1 month") +
guides(
size = guide_legend(title = "Message Count",
title.position = "top"),
fill = guide_legend(title = "Message Length",
title.position = "top")) +
labs(y = NULL) +
theme_minimal() +
theme(
legend.position = "bottom"
)
})
# ggPlot: Single - Heatmap ----
output$Heatmap <- renderPlot({
Data_Heatmap_Boxplot() %>%
group_by(Weekday, Hour, Type) %>%
summarise(Hourly_Count = n(),
Hourly_Length = sum(Length),
Mean_Hourly_Length = mean(Length)) %>%
full_join(tibble(Hour = seq(0, 23, by = 1))) %>% ungroup() %>%
complete(Hour, Weekday, Type,
fill = list(Hourly_Count = 0,
Hourly_Length = 0,
Mean_Hourly_Length = 0)) %>%
mutate(Hour = as.factor(str_pad(Hour, 2, "left", "0"))) %>%
ggplot(aes(x = Hour, y = Weekday, fill = Hourly_Length)) +
geom_tile(color = "white", size = .5) +
coord_equal() +
facet_grid(Type ~ .) +
scale_fill_viridis(name = "Message\nLength", option = "D", direction = -1) +
theme_tufte(base_family = "Helvetica", ticks = F) +
scale_y_discrete(limits = rev(levels(Data_Heatmap_Boxplot()$Weekday))) +
labs(x = NULL, y = NULL, title = "Message Length by Weekday and Hour") +
theme(
legend.title = element_text(size = 12),
legend.text = element_text(size = 12),
legend.key.height = unit(10, "mm"),
legend.title.align = 1,
strip.text = element_text(size = 12),
axis.text = element_text(size = 12)
)
})
# ggPlot: Single - Hour Scatter ----
output$Hour_Scatter <- renderPlot({
Data_Heatmap_Boxplot( ) %>%
ggplot(aes(x = Type, y = Time, fill = Type,
alpha = Length, size = Length)) +
geom_count(position = position_jitter(width = .4),
shape = 22) +
scale_alpha_continuous(range = c(.25, .75)) +
scale_size_area(max_size = 6) +
facet_grid(~ Weekday) +
labs(x = NULL, y = NULL, "Message Length by Weekday and Hour") +
guides(size = guide_legend(title = "Message Length"),
fill = guide_legend(title = "Message Type"),
alpha = F) +
scale_x_discrete(breaks = NULL) +
scale_y_reverse(breaks = seq(0, 24, by = 6)) +
theme_minimal() +
theme(
panel.background = element_rect(color = "gray"),
legend.title = element_text(size = 12),
legend.text = element_text(size = 12),
legend.key.height = unit(10, "mm"),
legend.title.align = 1,
strip.text = element_text(size = 12),
axis.text = element_text(size = 12)
)
})
# ggPlot: Single - Hour Line ----
output$Hour_Line <- renderPlot({
Data_Heatmap_Boxplot() %>%
group_by(
Weekday,
Hour) %>%
summarise(
Count = n(),
Length = sum(Length),
Mean_Length = mean(Length)) %>%
ggplot(
aes(x = Hour,
y = Length,
alpha = Count)) +
geom_pointrange(
aes(ymin = 0, ymax = Length),
fatten = 2.5,
linetype = 1) +
scale_alpha(
range = c(.2, .8)) +
guides(
alpha = F) +
scale_y_continuous(
name = NULL,
breaks = NULL) +
theme_minimal() +
theme(
strip.text.y = element_text(size = 12, angle = 180, face = "bold"),
panel.background = element_rect(color = "gray"),
axis.text = element_text(size = 12)) +
facet_grid(
Weekday ~ .,
switch = "both") +
scale_x_continuous(
name = NULL,
breaks = seq(0, 24, by = 3))
})
# ggPlot: Me - Line ----
output$Me_Line <- renderPlot({
Data_Me() %>%
filter(Type == "Sent") %>%
ggplot(aes(x = Date, y = Message_Count)) +
geom_point(shape = 22,
aes(fill = Message_Length,
size = Contacts),
alpha = .75) +
geom_smooth(se = F, method = "loess", span = .4) +
scale_fill_viridis(direction = -1) +
guides(fill = guide_colorbar(title = "Message \nLength"),
size = guide_legend(title = "Number of \nContacts")) +
theme_minimal() +
labs(x = NULL, y = "Count") +
scale_x_date(date_minor_breaks = "1 month",
date_labels = "%b %Y") +
theme(
legend.text = element_text(size = 12),
legend.title = element_text(size = 12, face = "bold"),
legend.title.align = .5,
legend.key.height = unit(10, "mm"),
axis.text = element_text(size = 12),
axis.title = element_text(size = 12, face = "bold")
)
})
# ggPlot: Me - Over Time ----
output$Me_Overtime <- renderPlot({
Data_Me() %>%
filter(Type == "Sent") %>%
gather(Measure, Value, Contacts:Mean_Length) %>%
ggplot(aes(x = Date, y = Value)) +
facet_grid(Measure ~ ., scales = "free_y", switch = "y",
labeller = as_labeller(
c("Contacts" = "Number of \nContacts Messaged",
"Mean_Length" = "Average Length \nof Message",
"Message_Count" = "Number \nof Messages",
"Message_Length" = "Length \nof Messages"))) +
geom_line(alpha = .5) +
geom_smooth(se = F, method = "loess", span = .1) +
scale_x_date(date_minor_breaks = "1 month",
date_labels = "%b %Y") +
scale_y_continuous(expand = c(0.15, 0)) +
theme_minimal() +
theme(
strip.text.y = element_text(angle = 180, face = "bold", size = 12),
panel.background = element_rect(color = "gray"),
panel.grid.major.x = element_line(color = "gray", linetype = 2),
panel.grid.minor.x = element_line(color = "gray", linetype = 3),
strip.placement = "outside",
axis.title = element_blank(),
axis.text.x = element_text(size = 12)
)
})
# ggPlot: Me - Heatmap ----
SMS_Me_Hourly <- reactive({
Data_Raw_Date_Filter() %>%
filter(Type == "Sent") %>%
transmute(
Day = floor_date(Date, unit = "days"),
Weekday = wday(Date, label = T, abbr = F),
Time = hour(Date) + minute(Date) / 60,
Hour = hour(Date),
Type,
Length = str_count(Text))
})
SMS_Me_Hourly_Heatmap <- reactive({
SMS_Me_Hourly() %>%
group_by(Weekday, Hour) %>%
summarise(Hourly_Count = n(),
Hourly_Length = sum(Length),
Mean_Hourly_Length = mean(Length)) %>%
full_join(tibble(Hour = seq(0, 23, by = 1))) %>%
complete(Hour,
Weekday,
fill = list(Mean_Hourly_Length = 0,
Hourly_Count = 0,
Hourly_Length = 0)) %>%
mutate(Hour = as.factor(str_pad(Hour, 2, "left", "0"))) %>%
ungroup() %>%
distinct()
})
output$Me_Heatmap <- renderPlot({
SMS_Me_Hourly_Heatmap() %>%
ggplot(
aes(x = Hour,
y = Weekday,
fill = Hourly_Count)) +
geom_tile(
color = "white",
size = .5) +
coord_equal() +
scale_fill_viridis(
name = "Message \nCount",
option = "D",
direction = -1) +
theme_tufte(base_family = "Helvetica",
ticks = F) +
scale_y_discrete(
limits = rev(levels(SMS_Me_Hourly_Heatmap()$Weekday))) +
labs(
x = NULL,
y = NULL,
title = "By Weekday and Hour") +
theme(
legend.title = element_text(size = 12),
legend.text = element_text(size = 12),
legend.title.align = .5,
legend.key.height = unit(10, "mm"),
axis.text = element_text(size = 12)
)
})
}
# --- Run the App ---
shinyApp(UI, Server)
|
b67a94baeab3bbbb497a593b77c52013fdf84784
|
914c515a6c85356ee148950f5d93be59ee112b4c
|
/tests/testthat/test_reg_ladlasso.R
|
24fd682936f7510a65a4df099f6fe0b9fe39662b
|
[] |
no_license
|
Mufabo/Rrobustsp
|
917fb8e9a353ffc7a838544fa5cd1d39eae34f6c
|
1700f4fed27e63cec6dfb67a14a208d8f46e2418
|
refs/heads/master
| 2022-06-14T22:15:50.045970
| 2022-06-05T12:13:32
| 2022-06-05T12:13:32
| 222,286,547
| 0
| 3
| null | 2019-12-19T14:17:46
| 2019-11-17T17:36:00
|
R
|
UTF-8
|
R
| false
| false
| 2,898
|
r
|
test_reg_ladlasso.R
|
library(Rrobustsp)
library(MASS)
library(Matrix)
test_that('ladlasso1 p>1, no intcpt', {
data('images')
load(path_test('Blas'))
load(path_test('sol1'))
y20n <- unlist(unname(images['y20n']))
lambda1 <- 124
intcpt <- F
Rsol <- ladlasso(y20n, diag(1, 400, 400), lambda1, intcpt, Blas)
R_sol <- Rsol[[1]]
R_it <- Rsol[[2]]
expect_equal(R_it, 11)
expect_equal(round(R_sol - sol, digits = 3), rep(0, length(sol)))
})
test_that('ladlasso2 intcpt, p>1', {
data('images')
y20n <- unlist(unname(images['y20n']))
lambda1 <- 124
intcpt <- T
load(path_test('sol2'))
Rsol <- ladlasso(y20n, diag(1, 400, 400), lambda1, intcpt)
R_it <- Rsol[[2]]
R_sol <- Rsol[[1]] # N x 1 matrix, whereas sol is N
expect_equal(round(R_sol - sol, digits = 3), rep(0,length(sol))) # one of 401 mismatches with a difference of 0.003
expect_equal(R_it, 20) # returns 41 instead of 20 ...
})
#test_that('ladlasso 3, intcpt, p=1'{
# eye-check, is correct
#})
test_that('rankflasso test 1 long', {
skip('takes forever')
# Args ----
X <- diag(1, 400, 400)
load('~/Rrobustsp/data/images.RData')
load(path_test('blas'))
y <- unlist(unname(images['y20n']))
lambda2 <- 340
lambda1 <- 124
b0 <- Blas
printitn <- 0
# ----
n <- nrow(X)
p <- ncol(X)
intcpt <- F
if(is.null(b0)) {
b0 <- MASS::ginv(cbind(rep(1, n), X)) %*% y #qr.solve(cbind(rep(1, n), X), y)
b0 <- bo[2:length(b0)]
}
B <- repmat(1:n, n)
A <- t(B)
a <- A[A < B]
b <- B[A < B]
D <- diag(-1, p-1, p-1)
D[seq(p, (p-1)^2, p)] <- 1
D <- cbind(D, c(rep(0, p-2), 1))
ytilde <- c(y[a] - y[b], rep(0, p-1))
Xtilde <- rbind( X[a,] - X[b,], lambda2 * D)
if(printitn > 0) sprintf('rankflasso: starting iterations\n')
r <- ladlasso(ytilde, Xtilde, lambda1, F, b0, printitn = printitn)
iter <- r[[2]]
r <- r[[1]]
r[abs(r) < 1e-7] <- 0
expect_equal(iter, 11)
load(path_test('reg_rankflasso_1'))
expect_equal(r, reg_rankflasso_1)
})
test_that('rankflasso short', {
skip('takes forever')
# Args ----
X <- diag(1, 6,3)
y <- c(0.01739562 ,-1.28630053, -1.64060553 , 0.45018710, -0.01855983 ,-0.31806837)
lambda2 <- 3
lambda1 <- 1
b0 <- NULL
printitn <- 0
# ----
n <- nrow(X)
p <- ncol(X)
if(is.null(b0)) {
b0 <- qr.solve(cbind(rep(1, n), X), y)
b0 <- b0[2:length(b0)]
}
B <- repmat(1:n, n)
A <- t(B)
a <- A[A < B]
b <- B[A < B]
D <- diag(-1, p-1, p-1)
D[seq(p, (p-1)^2, p)] <- 1
D <- cbind(D, c(rep(0, p-2), 1))
ytilde <- c(y[a] - y[b], rep(0, p-1)) # stimmt
Xtilde <- rbind( X[a,] - X[b,], lambda2 * D) # stimmt
if(printitn > 0) sprintf('rankflasso: starting iterations\n')
r <- ladlasso(ytilde, Xtilde, lambda1, F, b0, printitn)
iter <- r[[2]]
r <- r[[1]]
r[abs(r) < 1e-7] <- 0
expect_equal(round(r, digits = 4), c(0, -1.2255, -1.3225))
expect_equal(iter, 49)
})
|
718803f1a4e8ff249c207e5aef784e1b448d4f52
|
70ea7b064ab882412346d701ee9896f6a0728b0f
|
/code.R
|
e190e6e4adfee23d1bbe62bcb9cf4e6397338e83
|
[] |
no_license
|
AkshayaA8/Policy-coverage-option-prediction
|
e4c350c40deba0eb3d9ecd099e9427cf91abfbab
|
2ad6b68e67c5f9b17e5ab25b96b43dc27afc3f6c
|
refs/heads/master
| 2020-04-13T07:11:40.740773
| 2018-12-25T03:47:09
| 2018-12-25T03:47:09
| 163,043,569
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,140
|
r
|
code.R
|
#get the train data, datatable is my_data
plcy_data <- my_data[my_data$record_type==1,]
plcy_data <- plcy_data[,-c(1:7)]
str(plcy_data)
#converting independent variables to appropriate datatype
col_names <- colnames(plcy_data[,c(2,8)])
for(i in col_names){
plcy_data[[i]] <- as.factor(plcy_data[[i]])
}
plcy_data$C_previous <- ordered(plcy_data$C_previous)
#converting dependent variables to appropriate datatype
col_names <- colnames(plcy_data[,c(11:17)])
for(i in col_names){
plcy_data[[i]] <- ordered(plcy_data[[i]])
}
#sampling the data
plcy_data_sample = plcy_data[sample(nrow(plcy_data),20000),]
#checking for missing values
sum(is.na(plcy_data_sample$group_size))
sum(is.na(plcy_data_sample$homeowner))
sum(is.na(plcy_data_sample$car_age))
sum(is.na(plcy_data_sample$car_value))
sum(is.na(plcy_data_sample$risk_factor)) #7028 missing values
sum(is.na(plcy_data_sample$age_oldest))
sum(is.na(plcy_data_sample$age_youngest))
sum(is.na(plcy_data_sample$married_couple))
sum(is.na(plcy_data_sample$C_previous)) #175 missing values
sum(is.na(plcy_data_sample$duration_previous)) #175 missing values
sum(is.na(plcy_data_sample$cost))
#handling missing values
library(mice)
set.seed(1000)
plcy_data_mice = mice(plcy_data_sample,maxit = 3,m=3, method = 'pmm')
final_data = data.frame(complete(plcy_data_mice,1))
str(final_data)
#Removing age_oldest & age_youngest
final_data$age_diff = final_data$age_oldest - final_data$age_youngest
final_data <- final_data[,-c(6:7)]
#Grouping car value b,c & i to a and h
final_data$car_value[final_data$car_value=='b' | final_data$car_value=='c'] = 'a'
final_data$car_value[final_data$car_value=='i'] = 'h'
final_data$car_value <- factor(final_data$car_value,levels = c('a','d','e','f','g','h'))
table(final_data$car_value)
#checking for outliers
bp = boxplot(final_data$car_age)
summary(bp)
length(boxplot.stats(final_data$car_age)$out) #203 outliers
hist(final_data$car_age)
sort(bp$out,decreasing = TRUE)
#correlation
library(corrplot)
library(RColorBrewer)
corr = cor(final_data[,c(1,3,8,17)])
corr_m = cor.mtest(final_data[,c(1,3,8,17)],conf.level = 0.95)
corrplot(corr, type='upper',method='number',p.mat = corr_m$p, col = brewer.pal(n = 8, name = "RdYlBu"))
View(corr)
#------------ORDINAL LOGISTIC REGRESSION to predict coverage options-----------
require(MASS)
require(caret)
#Product A
modelA <- polr(A ~ group_size+ homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous, data = train_data, Hess=TRUE)
summary(modelA)
ctable <- coef(summary(modelA))
p <- pnorm(abs(ctable[, "t value"]), lower.tail = FALSE) * 2
(ctable <- cbind(ctable, "p value" = p))
(ci <- confint(modelA))
confint.default(modelA)
exp(coef(modelA))
exp(cbind(OR = coef(modelA), ci))
confusionMatrix(predict(modelA, newdata = test_data), test_data$A, dnn = c("Predictions", "Actual Values"))
#Product B
modelB <- glm(B ~ group_size+ homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous,family=binomial(link='logit'),data=train_data)
pred = predict(modelB, newdata=test_data)
accuracy <- table(pred, test_data$B)
sum(diag(accuracy))/sum(accuracy)
#Product C
modelC <- polr(C ~ group_size+ homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous, data = train_data, Hess=TRUE)
summary(modelC)
confusionMatrix(predict(modelC, newdata = test_data), test_data$C, dnn = c("Predictions", "Actual Values"))
#Product D
modelD <- polr(D ~ group_size+ homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous, data = train_data, Hess=TRUE)
summary(modelD)
confusionMatrix(predict(modelD, newdata = test_data), test_data$D, dnn = c("Predictions", "Actual Values"))
#Product E
modelE <- glm(E ~ group_size+ homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous,family=binomial(link='logit'),data=train_data)
pred = predict(modelE, newdata=test_data)
accuracy <- table(pred, test_data$B)
sum(diag(accuracy))/sum(accuracy)
#Product F
modelF <- polr(F ~ group_size+ homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous, data = train_data, Hess=TRUE)
summary(modelF)
confusionMatrix(predict(modelF, newdata = test_data), test_data$F, dnn = c("Predictions", "Actual Values"))
#Product G
modelG <- polr(G ~ group_size+ homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous, data = train_data, Hess=TRUE)
summary(modelG)
confusionMatrix(predict(modelG, newdata = test_data), test_data$G, dnn = c("Predictions", "Actual Values"))
for (i in 1:nrow(train_data)){
if (train_data$G[i] == 2){
train_data$G_new[i] = 1
} else {train_data$G_new[i] = train_data$G[i]}
}
summary(train_data$G_new)
for (i in 1:nrow(test_data)){
if (test_data$G[i] == 2){
test_data$G_new[i] = 1
} else {test_data$G_new[i] = test_data$G[i]}
}
summary(test_data$G_new)
train_data$G_new = factor(train_data$G_new)
modelG_new <- polr(G_new ~ group_size+ homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous, data = train_data, Hess=TRUE)
summary(modelG_new)
confusionMatrix(predict(modelG_new, newdata = test_data), test_data$G, dnn = c("Predictions", "Actual Values"))
#---------------LINEAR MODEL to predict cost----------------------------
set.seed(1000)
index = sample(2,nrow(final_data),replace=TRUE,prob = c(0.7,0.3))
train_data = final_data[index==1,]
test_data = final_data[index==2,]
train_data$cost = log10(train_data$cost)
test_data$cost = log10(test_data$cost)
lm_model = lm(cost~.,data=train_data)
summary(lm_model)
no_model = lm(cost~1,data=train_data)
step(lm_model,scope = list(lower = no_model,upper = lm_model),direction = "backward")
lm_model_new = lm(cost ~ homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous + A +
C + D + E + F + G + age_diff, data = train_data)
summary(lm_model_new)
plot(lm_model_new)
pred_lm = predict(lm_model_new,test_data)
sqrt(mean(pred_lm-test_data$cost)^2)
#------------RANDOM FOREST to predict cost-------------------
library(randomForest)
rf_model = randomForest(cost ~ group_size + homeowner + car_age + car_value + risk_factor +
married_couple + C_previous + duration_previous + A +
C + E + F + G + age_diff, data = train_data, mtry = 10, ntree=100)
pred_rf = predict(rf_model,test_data)
sqrt(mean(pred_rf-test_data$cost)^2)
#extracting some decision rules
library(inTrees)
X <- train_data[,c(2:15,17)]
treeList = RF2List(rf_model)
exec <- extractRules(treeList, X)
ruleMetric <- getRuleMetric(exec,X,train_data$cost)
readableRules <- presentRules(ruleMetric, colnames(X))
|
7770f92e29bbb0e9427af89d1baa51c33bdaafa7
|
0ab9d648721011f72953ae128d3c8595a91cdd97
|
/codes/create_grid.R
|
9390e02bed263afae77b2a8b67c32de0a2b67fb0
|
[] |
no_license
|
jms5151/Fore-C
|
9484e0759a25917252dbaf40668b3faf89092690
|
4de43fdf9696edacbc8ebe2fba70450e8c45c209
|
refs/heads/master
| 2021-08-04T10:39:18.746093
| 2021-07-12T04:40:09
| 2021-07-12T04:40:09
| 215,233,961
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,258
|
r
|
create_grid.R
|
# create grid for fore-c map ----------------------------------------------------
library(raster)
library(leaflet)
# Read in reef mask CSV. 0 = water, 1 = reef
# dims are 3600 rows (lats) 7200 cols (lons)
reefs <- read.csv("Data/Reef_grid/reef_plus_survey_gbr_deep_reef_remove_20210308.csv", header = FALSE)
regions_df <- read.csv("Data/big_region_coords.csv")
# Make 5km Grid
lon5km <- -179.975+0.05*seq(0,7199) # Columns
lat5km <- -89.975+0.05*seq(0,3599) # Rows
# Get reef pixel Lats and Lons
inds <- which(reefs == 1, arr.ind=TRUE)
reefLat <- lat5km[inds[,1]]
reefLon<- lon5km[inds[,2]]
reefsDF <- data.frame("Longitude" = reefLon, "Latitude" = reefLat)
# add region and ID
set_regional_ids <- function(df, minLat, maxLat, minLon, maxLon, regionName, regionNumber){
x <- subset(df, Latitude > minLat & Latitude < maxLat & Longitude > minLon & Longitude < maxLon)
x$Region <- regionName
startingId <- as.numeric(paste0(as.character(regionNumber), "0001"))
x$ID <- seq(startingId, startingId+nrow(x)-1)
x
}
for(i in 1:nrow(regions_df)){
x <- set_regional_ids(reefsDF, regions_df$lat1[i], regions_df$lat2[i], regions_df$lon1[i], regions_df$lon2[i], regions_df$region[i], regions_df$region_id[i])
assign(regions_df$region[i], x)
x <- NULL
}
# combine above datasets
reefsDF <- do.call(rbind, list(gbr, guam_cnmi, hawaii, johnston, prias, samoas, wake))
# save grid as csv
save(reefsDF, file = "Compiled_data/grid.RData")
# to display over antimeridian in leaflap maps, add +360 to longitudes below zero
reefsDF$Longitude <- ifelse(reefsDF$Longitude < 0, reefsDF$Longitude + 360, reefsDF$Longitude)
# create raster from point data
reefsDF2 <- rasterFromXYZ(reefsDF, crs = "+init=epsg:4326")
# add sim prev data
reefsDF$drisk <- rnorm(nrow(reefsDF), mean = 0.10, sd = 0.05)
reefsDF$drisk[reefsDF$drisk < 0] <- 0
rr <- rasterize(reefsDF[,c("Longitude", "Latitude")], reefsDF2, field = reefsDF[,c("ID", "drisk")])
# create spatial polygon from raster
reefs2 <- as(rr, "SpatialPolygonsDataFrame") # reefsDF2 go back to this when removing simulated prevalence
# save spatial polygon
save(reefs2, file = "Compiled_data/spatial_grid.Rds")
# check map
leaflet() %>%
addTiles(group = "OpenStreetMap") %>%
addPolygons(data = reefs2)
|
bcdfc4d973582c9d1c0d1ec314decb38b44dd4e1
|
402733edf0ee3946671f7c8272b9fbd51f21420e
|
/good_data/frog_reg7/check_outlier.r
|
59f682744f188902f3b2eb8cd608680da484807c
|
[] |
no_license
|
amphibian-exeff/vanmeter_bodyburdens_envpoll2014
|
a55cb6cf5fb2ce793c9a23f5033bd0d3705b5273
|
ddfeaf59f7408f718010e75d6c053acd768ccdb5
|
refs/heads/master
| 2023-07-12T12:48:26.664855
| 2021-08-09T03:59:00
| 2021-08-09T03:59:00
| 41,526,957
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,176
|
r
|
check_outlier.r
|
library(mvoutlier)
frog_root<-"D:\\Dropbox\\Robin_data_test\\frog_reg6\\" #PC path
setwd(frog_root)
raw_data<-read.table(paste(frog_root,"good_data.csv",sep=""), header = TRUE, sep = ",") #Read data into a variable
raw_data_t1 = raw_data[which(raw_data$good==1),] #select good data
Chemical_selec_index=is.element(raw_data_t1$Chemical,c('Imidacloprid','Pendimethalin','Total Atrazine',
'Total Fipronil','Total Triadimefon'))
raw_data_t2 = raw_data_t1[Chemical_selec_index,] #select only five chemicals
row.names(raw_data_t2)<-NULL
raw_data_t2$bodyweight2=raw_data_t2$bodyweight/raw_data_t2$X #average the weight by number of frogs
raw_data_t2$SA_cm2_2=raw_data_t2$SA_cm2/raw_data_t2$X #average the surface area by number of frogs
mlrfrog = subset(raw_data_t2, select=c(Species, app_rate_g_cm2, Chemical, AppFactor, TissueConc, SoilConc, logKow, BCF, VapPrs_mPa, Koc_Lab,
HabFac, Hlaw, bodyweight2, SA_cm2_2, molmass_gmol, Solat20C_mgL, Density_gcm3))
mlrfrog$Koc_Lab=as.numeric(as.character(mlrfrog$Koc_Lab)) #convert factors to numerics
mlrfrog$Koc_Lab=log(mlrfrog$Koc_Lab)
names(mlrfrog)[10]='logkoc'
mlrfrog$AppFactor=log(mlrfrog$AppFactor)
names(mlrfrog)[4]='logAppFactor'
mlrfrog$SoilConc=log(mlrfrog$SoilConc)
names(mlrfrog)[6]='logSoilConc'
mlrfrog$TissueConc=log(mlrfrog$TissueConc)
names(mlrfrog)[5]='logTissueConc'
mlrfrog$BCF=log(mlrfrog$BCF)
names(mlrfrog)[8]='logBCF'
data_Imi=mlrfrog[which(mlrfrog$Chemical=="Imidacloprid"),]
test_Imi= subset(data_Imi, select=c(logAppFactor, logBCF))
data_Imi$outliers=aq.plot(test_Imi, alpha=0.01)$outliers
covr <- covMcd(test_Imi, alpha = 0.5)
dist <- mahalanobis(test_Imi, center = covr$center, cov = covr$cov)
data_Imi$MD=dist
# s <- sort(dist, index = TRUE)
# q <- (0.5:length(dist))/length(dist)
# qchi <- qchisq(q, df = 2)
# data_Imi$qchi=qchi
data_Pen=mlrfrog[which(mlrfrog$Chemical=="Pendimethalin"),]
test_Pen= subset(data_Pen, select=c(logAppFactor, logBCF))
data_Pen$outliers=aq.plot(test_Pen, alpha=0.01)$outliers
covr <- covMcd(test_Pen, alpha = 0.5)
dist <- mahalanobis(test_Pen, center = covr$center, cov = covr$cov)
data_Pen$MD=dist
data_Atr=mlrfrog[which(mlrfrog$Chemical=="Total Atrazine"),]
test_Atr= subset(data_Atr, select=c(logAppFactor, logBCF))
data_Atr$outliers=aq.plot(test_Atr, alpha=0.01)$outliers
covr <- covMcd(test_Atr, alpha = 0.5)
dist <- mahalanobis(test_Atr, center = covr$center, cov = covr$cov)
data_Atr$MD=dist
data_Fip=mlrfrog[which(mlrfrog$Chemical=="Total Fipronil"),]
test_Fip= subset(data_Fip, select=c(logAppFactor, logBCF))
data_Fip$outliers=aq.plot(test_Fip, alpha=0.01)$outliers
covr <- covMcd(test_Fip, alpha = 0.5)
dist <- mahalanobis(test_Fip, center = covr$center, cov = covr$cov)
data_Fip$MD=dist
data_Tri=mlrfrog[which(mlrfrog$Chemical=="Total Triadimefon"),]
test_Tri= subset(data_Tri, select=c(logAppFactor, logBCF))
data_Tri$outliers=aq.plot(test_Tri, alpha=0.01)$outliers
covr <- covMcd(test_Tri, alpha = 0.5)
dist <- mahalanobis(test_Tri, center = covr$center, cov = covr$cov)
data_Tri$MD=dist
|
f289528b79725572a209354368d30651dcf205aa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ade4/examples/fission.Rd.R
|
b27459ec33538c58d3d3a80e06c289ced2e16946
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
fission.Rd.R
|
library(ade4)
### Name: fission
### Title: Fission pattern and heritable morphological traits
### Aliases: fission
### Keywords: datasets
### ** Examples
data(fission)
fis.phy <- newick2phylog(fission$tre)
table.phylog(fission$tab[names(fis.phy$leaves),], fis.phy, csi = 2)
gearymoran(fis.phy$Amat, fission$tab)
|
a2646b4ba7975ee90e2dc64112d24904d54c2894
|
fb44f713c154e24a8059d47ec5d2d57d3c36e817
|
/Lab7/2.3.r
|
c9f7b741cd1f6c32dc3871c57c755b537f6a2bae
|
[] |
no_license
|
danghermang/ps
|
98448f6e6975cdb11306298fa6593890b61d943d
|
8f8badc78144beead27f0f09b056c267b068708b
|
refs/heads/master
| 2020-06-21T08:09:17.257427
| 2019-07-17T13:05:23
| 2019-07-17T13:05:23
| 197,391,140
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 433
|
r
|
2.3.r
|
multinomial_test=function(alfa,o,p)
{
s=length(o)
n=sum(o)
E=n*p
critical_Chi_square=qchisq(1-alfa,s-1)
Chi_square_score=sum((o-E)^2/E)
print(critical_Chi_square)
print(Chi_square_score)
if(Chi_square_score>critical_Chi_square){
print("Ipoteza nula respinsa")
}else{
print("Nu se poate respinge ipoteza")
}
}
o=c(21,36,12,19)
p=c(0.16,0.44,0.27,0.13)
multinomial_test(0.05,o,p)
multinomial_test(0.01,o,p)
|
efd72143d2119f578e10ccd8a01b4555beed146f
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/falsequ_falsequ_1344/falsequ_falsequ_1344.R
|
c492c6da3f7406b2587cbd8406907aaed634a62e
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 65
|
r
|
falsequ_falsequ_1344.R
|
0726a4c6f669df200ada40747e772389 falsequ_falsequ_1344.qdimacs 1 1
|
1f76a573d1dff9dc671193c097d6555b7d8f387e
|
8ef3e422e2f5618fdb0bcb99574cc771962ee99e
|
/man/Indcpr.Rd
|
957547f7b2f33d245eb3b3a89078879f680f8e09
|
[] |
no_license
|
cran/tdm
|
f09134e94f9fb4d60133486d72dc84e1c80f99d2
|
293cc0a7e6d9641de189077cb5f46f6c146b5689
|
refs/heads/master
| 2016-08-03T21:51:21.507848
| 2014-12-16T00:00:00
| 2014-12-16T00:00:00
| 17,700,401
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
rd
|
Indcpr.Rd
|
\encoding{UTF-8}
\name{Indcpr}
\alias{Indcpr}
\title{Equation for the calculated conc. of indinavir at steady-state}
\description{
Equation for the calculated conc. of indinavir at steady-state
}
\keyword{misc}
|
24097eb061397edba936784b15c7b2a074d37a1b
|
c38d689c67e668c1bb7b04002eadfbcdffd72d5f
|
/plot4.R
|
15ea69c29b20f32e81d2f6a9f9966df28a27c2cb
|
[] |
no_license
|
jefriyang/ExData_Plotting1
|
1c3b5b7f958bc4fef8df0e717123ee424ba60c7c
|
54b83b26d099dc3ec20d28ee2e6cc2b448ca16e9
|
refs/heads/master
| 2021-01-15T18:41:29.999528
| 2015-02-08T06:46:38
| 2015-02-08T06:46:38
| 30,439,289
| 0
| 0
| null | 2015-02-07T00:23:08
| 2015-02-07T00:23:07
| null |
UTF-8
|
R
| false
| false
| 1,364
|
r
|
plot4.R
|
#Read Data
file <- "./household_power_consumption.txt"
obs <- read.table(file, header=TRUE,sep=";",na.strings="?", stringsAsFactors = FALSE)
# Convert to Date/Time format
obs$Date <- as.Date(obs$Date, "%d/%m/%Y")
#Subset Data
reqobs <- subset(obs,obs$Date=="2007-02-01")
reqobs <- rbind(reqobs,subset(obs,obs$Date=="2007-02-02"))
#Add new DateTime Column
DateTime <-paste(reqobs$Date,reqobs$Time)
DateTime <-as.POSIXct(DateTime,format="%Y-%m-%d %H:%M:%S")
reqobs <-cbind(reqobs,DateTime)
#Plot linechart and save to png file
png(file="plot4.png",bg="transparent")
par(mfcol=c(2,2))
#topleft plot
plot(reqobs$DateTime,reqobs$Global_active_power,type="l",
xlab="",ylab="Global Active Power")
#bottomleft plot
plot(reqobs$DateTime,reqobs$Sub_metering_1,type="n",
xlab="",ylab="Energy sub metering")
lines(reqobs$DateTime,reqobs$Sub_metering_1,col="black")
lines(reqobs$DateTime,reqobs$Sub_metering_2,col="red")
lines(reqobs$DateTime,reqobs$Sub_metering_3,col="blue")
legend("topright",lty=c(1,1,1),col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),text.font=2,cex=0.75,bty="n")
#topright plot
plot(reqobs$DateTime,reqobs$Voltage,xlab="datetime",ylab="Voltage",type="l")
#bottomright plot
plot(reqobs$DateTime,reqobs$Global_reactive_power,xlab="datetime",ylab="Global_reactive_power",type="l")
dev.off()
|
db00d6fb988b4a751ef0eec5108eddb30e589d5c
|
23d811eebd90cd24b622e8ef808ce09eb943e05f
|
/melt_and_munge_cases-and-interventions.R
|
aee511cf01d3f4223005c98d60517d7c47228572
|
[] |
no_license
|
stevenpollack/TMR
|
6c8fda79bbc733d33332832a06d5dfcf28385cfb
|
c98b5a877ee5de4c69d90be7fc2576a0817b1594
|
refs/heads/master
| 2022-05-28T04:11:15.358636
| 2020-05-01T03:26:14
| 2020-05-01T03:26:14
| 256,673,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,740
|
r
|
melt_and_munge_cases-and-interventions.R
|
library(googlesheets4)
library(reshape2)
library(dplyr)
library(stringr)
library(data.table)
library(lubridate)
sheets_auth()
ssID <- "1gR03lPiOBDiQOd3XFhSkYWJWi1vG8Wf_J-nmtSRaXbk"
worksheets <- c("daily new infections", "interventions", "melted viz data")
dailyNewInfections <- read_sheet(ss = ssID,sheet = worksheets[1])
interventions <- read_sheet(ss = ssID, sheet = worksheets[2])
epidemicStart <- ymd("2020-01-24")
idVars <- c("Date")
keyVars <- c(idVars, "region")
regions <- c("NSW", "WA", "VIC", "SA", "ACT", "NT", "QLD", "TAS", "AUS")
newInfections <- str_c(regions, c(" new"))
totalCases <- reshape2::melt(
dailyNewInfections,
measure.vars = regions,
id.vars = idVars,
variable.name = "region",
value.name = "total confirmed cases"
) %>% data.table(key = keyVars)
newDailyCases <- reshape2::melt(
dailyNewInfections,
measure.vars = newInfections,
id.vars = idVars,
variable.name = "region",
value.name = "new cases"
) %>% data.table(., key = keyVars)
# strip "new" from levels of "region"
levels(newDailyCases$region) <- regions
meltedInterventions <- reshape2::melt(
interventions,
measure.vars = regions,
id.vars = idVars,
variable.name = "region",
value.name = "interventions"
) %>% data.table(., key = keyVars)
totalCases[newDailyCases[meltedInterventions]]
totalNewAndInterventions <-
left_join(totalCases, newDailyCases, by = keyVars) %>%
left_join(y = meltedInterventions, by = keyVars) %>%
data.table
totalNewAndInterventions %>%
write.csv(file="./TMR stuff/TMR/covid-19/code/meltedData.csv",
row.names = FALSE)
# write melted data to gsheet
ss <- googledrive::drive_get(id=ssID)
sheet_write(data = totalNewAndInterventions, ss = ss, sheet = worksheets[3])
|
d4322a3d179ca4fdc0359deaf1087a726756caac
|
ef9ffa3aebc43e28724a7ccb22f1839932a4818b
|
/listdemo-R.R
|
b087c1d532df330835f5339985588a19a8a51d0d
|
[] |
no_license
|
KJROH/proj180714
|
341fd2824aa5d943fced3aaeaa3dd5b99de1dc09
|
2c48b126fe582d50d7ca2499c13bb88174533440
|
refs/heads/master
| 2020-03-23T01:44:12.861540
| 2018-07-14T09:41:34
| 2018-07-14T09:41:34
| 140,934,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
r
|
listdemo-R.R
|
# list is recursive vectors
# vectors is atomic vectors
# 점(스칼라) a return(......)
# 선 - 면 - 큐브
## p81
x <- c(1,2,3)
x
x <- c("홍길동",175,6,5000,T)
lst <- list("홍길동",175,6,5000,T)
x
lst
lst[[1]]
lst <- list(name="홍길동",height=175,6,sal=5000,
employee=T) # metadata=attribute=column
lst
lst$name # $=list 안의 속성만 골라...
length(lst)
|
8e1a74f5eedcc1392a86bfa75bc27635d3a0832e
|
9800983fb761b6cd588c93c719a97f3c9269d18b
|
/man/getTensorList.Rd
|
e694efc1a43ae5924e8f319d834a92b313c3859c
|
[] |
no_license
|
priscillafialho/RcppSimpleTensor
|
551f10d4115ddad928ebc76422961c9574934501
|
358d491e6a2c8c4f630850993108ca2cc5e84b03
|
refs/heads/master
| 2021-01-12T22:21:19.781510
| 2013-08-29T12:39:19
| 2013-08-29T12:39:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 296
|
rd
|
getTensorList.Rd
|
\name{getTensorList}
\alias{getTensorList}
\title{Returns the list of current available compiled C++ tensors}
\usage{
getTensorList()
}
\description{
Returns the list of current available compiled C++
tensors
}
\examples{
getTensorList()
}
\keyword{compile}
\keyword{cpp}
\keyword{tensor}
|
8d669240cad5e9857606a5a96e78df06cb02ae34
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlepredictionv15.auto/man/Analyze.Rd
|
198480555be087c461a113f98487f6ae7c544cea
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,897
|
rd
|
Analyze.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prediction_objects.R
\name{Analyze}
\alias{Analyze}
\title{Prediction API Objects
Lets you access a cloud hosted machine learning service that makes it easy to build smart apps}
\usage{
Analyze(Analyze.dataDescription = NULL,
Analyze.dataDescription.features = NULL,
Analyze.dataDescription.features.categorical = NULL,
Analyze.dataDescription.features.categorical.values = NULL,
Analyze.dataDescription.features.numeric = NULL,
Analyze.dataDescription.features.text = NULL,
Analyze.dataDescription.outputFeature = NULL,
Analyze.dataDescription.outputFeature.numeric = NULL,
Analyze.dataDescription.outputFeature.text = NULL,
Analyze.modelDescription = NULL,
Analyze.modelDescription.confusionMatrix = NULL,
Analyze.modelDescription.confusionMatrixRowTotals = NULL,
dataDescription = NULL, errors = NULL, id = NULL,
modelDescription = NULL, selfLink = NULL)
}
\arguments{
\item{Analyze.dataDescription}{The \link{Analyze.dataDescription} object or list of objects}
\item{Analyze.dataDescription.features}{The \link{Analyze.dataDescription.features} object or list of objects}
\item{Analyze.dataDescription.features.categorical}{The \link{Analyze.dataDescription.features.categorical} object or list of objects}
\item{Analyze.dataDescription.features.categorical.values}{The \link{Analyze.dataDescription.features.categorical.values} object or list of objects}
\item{Analyze.dataDescription.features.numeric}{The \link{Analyze.dataDescription.features.numeric} object or list of objects}
\item{Analyze.dataDescription.features.text}{The \link{Analyze.dataDescription.features.text} object or list of objects}
\item{Analyze.dataDescription.outputFeature}{The \link{Analyze.dataDescription.outputFeature} object or list of objects}
\item{Analyze.dataDescription.outputFeature.numeric}{The \link{Analyze.dataDescription.outputFeature.numeric} object or list of objects}
\item{Analyze.dataDescription.outputFeature.text}{The \link{Analyze.dataDescription.outputFeature.text} object or list of objects}
\item{Analyze.modelDescription}{The \link{Analyze.modelDescription} object or list of objects}
\item{Analyze.modelDescription.confusionMatrix}{The \link{Analyze.modelDescription.confusionMatrix} object or list of objects}
\item{Analyze.modelDescription.confusionMatrixRowTotals}{The \link{Analyze.modelDescription.confusionMatrixRowTotals} object or list of objects}
\item{dataDescription}{Description of the data the model was trained on}
\item{errors}{List of errors with the data}
\item{id}{The unique name for the predictive model}
\item{modelDescription}{Description of the model}
\item{selfLink}{A URL to re-request this resource}
}
\value{
Analyze object
}
\description{
Auto-generated code by googleAuthR::gar_create_api_objects
at 2017-03-05 20:06:39
filename: /Users/mark/dev/R/autoGoogleAPI/googlepredictionv15.auto/R/prediction_objects.R
api_json: api_json
}
\details{
Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
Analyze Object
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other Analyze functions: \code{\link{Analyze.dataDescription.features.categorical.values}},
\code{\link{Analyze.dataDescription.features.categorical}},
\code{\link{Analyze.dataDescription.features.numeric}},
\code{\link{Analyze.dataDescription.features.text}},
\code{\link{Analyze.dataDescription.features}},
\code{\link{Analyze.dataDescription.outputFeature.numeric}},
\code{\link{Analyze.dataDescription.outputFeature.text}},
\code{\link{Analyze.dataDescription.outputFeature}},
\code{\link{Analyze.dataDescription}},
\code{\link{Analyze.modelDescription.confusionMatrixRowTotals}},
\code{\link{Analyze.modelDescription.confusionMatrix}},
\code{\link{Analyze.modelDescription}}
}
|
6d6a4950813f006431487905baa30ef27b4e2ef4
|
6c7f2ba2dc6618164b635995e65c07c9efc3504f
|
/TP01/scripts/notes.r
|
cece3c8bd296653f17331aef0b29bc26aaa6d2d4
|
[] |
no_license
|
laetibouud/SY09
|
b37f564325d0c805c5550053cc83e1fdd28c7390
|
fc6239459ceb0251f30dd7891a3e8f278fdac3d3
|
refs/heads/master
| 2020-12-31T00:12:40.750167
| 2017-03-29T08:34:31
| 2017-03-29T08:34:31
| 86,553,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,406
|
r
|
notes.r
|
notes <- read.csv("sy02-p2016.csv", na.strings="", header=T)
notes$nom <- factor(notes$nom, levels=notes$nom)
notes$correcteur.median <- factor(notes$correcteur.median, levels=c("Cor1","Cor2","Cor3","Cor4","Cor5","Cor6","Cor7","Cor8"))
notes$correcteur.final <- factor(notes$correcteur.final, levels=c("Cor1","Cor2","Cor3","Cor4","Cor5","Cor6","Cor7","Cor8"))
notes$niveau <- factor(notes$niveau, ordered=T)
notes$resultat <- factor(notes$resultat, levels=c("F","Fx","E","D","C","B","A"), ordered=T)
x11()
par(mar=c(5, 4, 4, 8), xpd=TRUE)
boxplot(notes$note.total~notes$specialite,col=c("blue","pink","green","yellow","red","purple","white","orange","cyan3"), main="Notes finales des etudiants en fonction
de leur branche")
legend(11,16,xpd=NA,inset=0.05, title="Legende",c("GB", "GI","GM", "GP","GSM" ,"", "Hutech","","TC"),
fill=c("blue","pink","green","yellow","red","purple","white","orange","cyan3"), horiz=FALSE)
x11()
counts <- table(notes$dernier.diplome.obtenu, notes$resultat)
selectcounts <- counts[c("BAC","CPGE","DUT"),]
barplot(selectcounts, main="Notes finales de SY02 en fonction du
dernier diplome obtenu de l'etudiant",
xlab="Note finale", col=c("darkblue","cyan3","pink"),
legend = rownames(selectcounts), beside=TRUE)
x11()
N <- c(notes$note.median,notes$note.final)
C <- c(notes$correcteur.median,notes$correcteur.final)
boxplot(N~C)
|
81831f3d7d1d7b7c4dfcfeadb54d8820409c7d94
|
a00b74d82bfad4d0c6d432304e4deb3b30b455a8
|
/old_or_unused/reproduction_methods.R
|
429e509e9ba4225f0080db69f8f83f1cc7df954e
|
[] |
no_license
|
pilarcormo/fragmented_genome_with_snps
|
e97b02272dcfbd887a0e44232ff66a0cd54d7edb
|
daeb1883a4f8f2a4c8096a62efd1e69cc0257a4e
|
refs/heads/master
| 2021-01-18T21:22:54.370306
| 2014-12-28T12:16:43
| 2014-12-28T12:16:43
| 24,980,602
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 550
|
r
|
reproduction_methods.R
|
reproduction_plot <- function(x_gen, y_fits, se, group, location, filename){
library("ggplot2", lib.loc="/Library/Frameworks/R.framework/Versions/3.0/Resources/library")
df <- data.frame(
generation = factor(x_gen),
y_fits = y_fits,
group = factor(group),
se = se
)
limits <- aes(ymax = y_fits + se, ymin = y_fits - se)
p <- ggplot(df, aes(colour = group, y = y_fits, x = generation)) + geom_crossbar(limits, width = 0.2)
ggsave(p, file = paste("~/", location, filename,".png", sep = ""))
}
st_err <- function(a) sd(a)/sqrt(length(a))
|
34552075e3f6dcedac58f31f169b87e63d769ed4
|
094f81c31a3cfd560b24280e476d5af4fb52b9e3
|
/R/fixviolation.R
|
6d5a7ce2a223d078525c8d1da2bb4c95583f1e40
|
[
"MIT"
] |
permissive
|
PJOssenbruggen/Basic
|
6c2343dcb135cb364d059160925ded5cb43b5455
|
1885fa40d3318cc554b4dd80154b263baef19ac4
|
refs/heads/master
| 2021-01-25T11:57:19.583401
| 2019-01-04T13:03:32
| 2019-01-04T13:03:32
| 123,449,454
| 0
| 0
| null | 2018-03-05T12:26:55
| 2018-03-01T14:56:48
|
R
|
UTF-8
|
R
| false
| false
| 11,031
|
r
|
fixviolation.R
|
#' \code{fixviolation} produces of a safe headways for a following vehicle
#'
#' @return \code{fixviolation} is creates following vehicle trajectory that satisfies the safe headway rule.
#' @param veh vehicle, a number
#' @param zone from \code{dfcrit} matrix for vehicle \code{veh}, a number
#' @param df1df2 leading and following vehicle information, a matrix
#' @param dfcrit critical times, a matrix
#' @param delt time-step, a number
#' @param tend.0 end time for over the long time range, a number
#' @param leff effective vehicle lenght, a number
#' @param xfunnel location where the lane drop is located, a number
#' @usage fixviolation(veh, zone, df1df2, dfcrit, delt, tend.0, leff, xfunnel)
# #' @examples
# #' fixviolation(veh, zone, df1df2, dfcrit, delt, tend.0, leff, xfunnel)
#' @export
fixviolation <- function(veh, zone, df1df2, dfcrit, delt, tend.0, leff, xfunnel) {
# create df1 and df2
ucol <- 3*(veh-1) - 2
xcol <- 3*(veh-1) - 1
ycol <- 3*(veh-1)
df1 <- cbind(df1df2[,ucol], df1df2[,xcol], df1df2[,ycol])
ucol. <- 3*veh - 2
xcol. <- 3*veh - 1
ycol. <- 3*veh
df2 <- cbind(df1df2[,ucol.], df1df2[,xcol.], df1df2[,ycol.])
t <- seq(0,tend.0,delt)
df1 <- cbind(t,df1)
df2 <- cbind(t,df2)
tcrit. <- as.numeric(dfcrit[veh,4])
xcrit1. <- df1[df1[,1]== tcrit.,3]
xcrit2. <- df2[df2[,1]== tcrit.,3]
leadveh <- veh - 1
for(v in 1:leadveh) {
if(v == 1) {
ucol <- 1
xcol <- 2
ycol <- 3
df1. <- cbind(df1df2[,ucol], df1df2[,xcol], df1df2[,ycol])
df1. <- cbind(t,df1.)
} else {
ucol <- 3*v - 2
xcol <- 3*v - 1
ycol <- 3*v
df1. <- cbind(df1df2[,ucol], df1df2[,xcol], df1df2[,ycol])
df1. <- cbind(t,df1.)
}
}
# Zone 2 #############################################################################
df2.fix <- df2
zone.df <- NA
tstart <- as.numeric(dfcrit[veh,4])
tend <- as.numeric(dfcrit[veh,5])
X3 <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[,1]
tcrit3 <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[1,6]
tstart <- as.numeric(dfcrit[veh,3])
tend <- as.numeric(dfcrit[veh,4])
X2 <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[,1]
tcrit2 <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[1,6]
crittable <- data.frame(t1 = as.numeric(dfcrit[veh,3]), tcrit2, sum.X2 = sum(X2), tcrit3, sum.X3 = sum(X3))
if(sum(X2) == 0 & sum(X3) == 0) return(df2.fix[,-1])
# Zone 2 df2.fix
if(sum(X2) > 0) {
fraction <- seq(1,0,-0.001)
tseq <- seq(tstart,tend,delt)
for(j in 1:length(fraction)) {
X <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[,1]
tcrit <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[1,6]
sum.X <- sum(X)
if(j == 1) {
ustart <- as.numeric(df2.fix[df2.fix[,1] == tstart,2])
xstart <- as.numeric(df2.fix[df2.fix[,1] == tstart,3])
if(X[1] == 1) xstart <- xstart - hsafe(ustart, leff)
uend <- as.numeric(df2.fix[df2.fix[,1] == tend,2])
xend <- as.numeric(df2.fix[df2.fix[,1] == tend,3])
xlen <- length(X)
if(X[xlen] == 1) {
uend <- as.numeric(df1[df1[,1] == tend,2])
xend <- as.numeric(df1[df1[,1] == tend,3]) - hsafe(uend, leff)
}
if(X[1] == 1) {
ustart <- as.numeric(df1[df1[,1] == tstart,2])
xstart <- as.numeric(df1[df1[,1] == tstart,3]) - hsafe(ustart, leff)
}
} else {
ustart <- ustart * fraction[j]
xend <- xend - hsafe(ustart, leff)
}
ab <- xabparam(tstart, tend, ustart, uend, xstart, xend)
a <- ab[1]
b <- ab[2]
tseq <- seq(tstart,tend,delt)
t0 <- tstart
x.fix <- xab(xstart,ustart,a,b,t = tseq,t0)
u.fix <- uab(ustart,a,b,t = tseq,t0)
# Zone 2
df2.fix[df2.fix[,1] >= tstart & df2.fix[,1] <= tend,2] <- u.fix
df2.fix[df2.fix[,1] >= tstart & df2.fix[,1] <= tend,3] <- x.fix
X <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[,1]
tcrit <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[1,6]
sum.X.rev <- sum(X)
ustart <- u.fix[1]
xstart <- x.fix[1]
uend <- u.fix[length(tseq)]
xend <- x.fix[length(tseq)]
df.12 <- data.frame("Zone 2",j,tstart, tend, ustart, uend, xstart, xend)
if(sum(X) == 0) {
zone.df <- rbind(zone.df, data.frame(Zone = 2, tstart, tend, ustart, uend, xstart, xend))
break
}
if(sum.X.rev == sum.X) {
break
}
}
}
# Zone 3 #################################################################################
if(sum(X3) >= 0) {
test <- NA
tstart <- tend
if(sum(X2) == 0) {
tstart <- max(df2.fix[df2.fix[,3] <= 0,1])
ustart <- as.numeric(df2.fix[df2.fix[,1] == tstart,2])
xstart <- as.numeric(df2.fix[df2.fix[,1] == tstart,3])
} else {
ustart <- uend
xstart <- xend
}
tseq <- seq(0,tend.0,delt)
tlen <- length(tseq)
x2sight <- rep(NA,tlen)
for(i in 1:tlen) x2sight[i] <- xstart + ustart * (tseq[i] - tstart)
viol <- findviolation(tstart, tend.0, tend.0, df1[,-1], df2.fix[,-1], delt, leff)
df12 <- cbind(df1[,c(1:3)],df2.fix[,c(2,3)],x2sight)
colnames(df12) <- c("t", "u1", "x1", "u2", "x2","x2sight")
tseq <- seq(tstart,tend.0,delt)
dx2x1 <- df12[,3] - df12[,6]
df12 <- cbind(df12,dx2x1)
df12. <- df12[df12[,1] > tstart,]
if(tstart == tend.0) {
tstart <- tend
test <- Inf
} else {
if(length(df12.[df12.[,7] <= 0,1]) == 0) test <- tend.0 else test <- min(df12.[df12.[,7] <= 0,1], na.rm = TRUE)
xtest <- as.numeric(df12[df12[,1] == test,6])
}
if(!is.infinite(test)) {
ustart <- as.numeric(df2.fix[df2.fix[,1] == tstart,2])
xstart <- as.numeric(df2.fix[df2.fix[,1] == tstart,3])
tend <- test
uend <- min(as.numeric(df1[df1[,1] == tend,2]))
xend <- xtest - hsafe(uend,leff)
uend2 <- as.numeric(df2.fix[df2.fix[,1] == tend,2])
xend2 <- as.numeric(df2.fix[df2.fix[,1] == tend,3])
if(tend == tend.0 & xend2 < xend) uend <- uend2
if(tend == tend.0 & xend2 < xend) xend <- xend2
if(tstart < tend) {
ab <- xabparam(tstart, tend, ustart, uend, xstart, xend)
a <- ab[1]
b <- ab[2]
tseq <- seq(tstart,tend,delt)
t0 <- tstart
x.fix <- xab(xstart,ustart,a,b,t = tseq,t0)
u.fix <- uab(ustart,a,b,t = tseq,t0)
tlen <- length(tseq)
if(x.fix[tlen] < xstart | min(u.fix) < 0) {
for(i in 1:tlen) {
u.fix[i] <- ustart
x.fix[i] <- xstart + ustart * (tseq[i] - tstart)
}
}
df2.fix[df2.fix[,1] >= tstart & df2.fix[,1] <= tend,2] <- u.fix
df2.fix[df2.fix[,1] >= tstart & df2.fix[,1] <= tend,3] <- x.fix
}
tseq. <- seq(test,tend.0,delt)
tlen. <- length(tseq.)
if(test < tend.0) {
u.fix <- x.fix <- rep(NA,tlen.)
for(i in 1:tlen.) {
u.fix[i] <- as.numeric(df1[df1[,1] == tseq.[i],2])
x.fix[i] <- as.numeric(df1[df1[,1] == tseq.[i],3]) - hsafe(u.fix[i],leff)
}
for(i in 2:tlen.) {
if(x.fix[i] < x.fix[i-1]) x.fix[i] <- x.fix[i-1]
}
if(min(u.fix) < 0) {
for(i in 1:tlen.) {
u.fix[i] <- ustart
x.fix[i] <- xstart + ustart * (tseq.[i] - tstart)
}
}
df2.fix[df2.fix[,1] >= test,2] <- u.fix
df2.fix[df2.fix[,1] >= test,3] <- x.fix
}
}
zone.df <- rbind(zone.df, data.frame(Zone = 3, tstart, tend, ustart, uend, xstart, xend))[-1,]
}
# Zone 1 ################################################################################
if(dim(dfcrit)[1] > 5) {
tend <- tstart
uend <- ustart
xend <- xstart
tstart <- 0
ustart <- as.numeric(df2.fix[1,2])
xstart <- as.numeric(df2.fix[1,3])
tseq <- seq(tstart,tend,delt)
ab <- xabparam(tstart, tend, ustart, uend, xstart, xend)
a <- ab[1]
b <- ab[2]
t0 <- tstart
x.fix <- xab(xstart,ustart,a,b,t = tseq,t0)
u.fix <- uab(ustart,a,b,t = tseq,t0)
df2.fix[df2.fix[,1] >= tstart & df2.fix[,1] <= tend,2] <- u.fix
df2.fix[df2.fix[,1] >= tstart & df2.fix[,1] <= tend,3] <- x.fix
} else {
df.01 <- data.frame("Zone 1", NA, tstart, tend, ustart, uend, xstart, xend)
fraction <- seq(1,0,-0.001)
tend <- tstart
uend <- ustart
xend <- xstart
tstart <- 0
ustart <- as.numeric(df2.fix[1,2])
xstart <- as.numeric(df2.fix[1,3])
tseq <- seq(tstart,tend,delt)
tlen <- length(tseq)
X <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[,1]
tcrit <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[1,6]
ucrit <- as.numeric(df1[df1[,1] == tcrit,2])
xcrit <- as.numeric(df1[df1[,1] == tcrit,3]) - hsafe(ucrit,leff)
if(sum(X) > 0) {
for(j in 1:length(fraction)) {
tseq <- seq(tstart,tcrit,delt)
if(tcrit > tstart) {
ab <- xabparam(tstart, tcrit, ustart, ucrit, xstart, xcrit)
a <- ab[1]
b <- ab[2]
t0 <- tstart
x.fix <- xab(xstart,ustart,a,b,t = tseq,t0)
u.fix <- uab(ustart,a,b,t = tseq,t0)
df2.fix[df2.fix[,1] >= tstart & df2.fix[,1] <= tcrit,2] <- u.fix
df2.fix[df2.fix[,1] >= tstart & df2.fix[,1] <= tcrit,3] <- x.fix
}
tseq <- seq(tcrit,tend,delt)
if(tcrit < tend) {
ab <- xabparam(tcrit, tend, ucrit, uend, xcrit, xend)
a <- ab[1]
b <- ab[2]
t0 <- tcrit
x.fix <- xab(xcrit,ucrit,a,b,t = tseq,t0)
u.fix <- uab(ucrit,a,b,t = tseq,t0)
df2.fix[df2.fix[,1] >= tcrit & df2.fix[,1] <= tend,2] <- u.fix
df2.fix[df2.fix[,1] >= tcrit & df2.fix[,1] <= tend,3] <- x.fix
}
X <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[,1]
tcrit <- findviolation(tstart, tend, tend.0, df1[,-1], df2.fix[,-1], delt, leff)[1,6]
ucrit <- as.numeric(df1[df1[,1] == tcrit,2])
xcrit <- as.numeric(df1[df1[,1] == tcrit,3]) - 1.05 * hsafe(ucrit,leff)
ustart <- fraction[j] * ustart
if(sum(X) == 0) break
}
}
}
df.01 <- data.frame("Zone 1", tstart, tend, ustart, uend, xstart, xend)
zone.df <- rbind(data.frame(Zone = 1, tstart, tend, ustart, uend, xstart, xend), zone.df)
return(df2.fix[,-1])
}
|
1f6b9f566b3d15e07828600d0007d97db87f54af
|
21f54b374eb4a60d8e3e418185b78e65e747b6e6
|
/prog_Lect8.R
|
7052cf6580f664387296f5b24a3d6defa81028b4
|
[] |
no_license
|
marcos0318/FinancialDataAnalysis
|
479855291a0863b43d0a1a01ebe2e00c5c4cc8a2
|
d4298bd3eb706e2052f277a6dca7b6403a450e07
|
refs/heads/master
| 2020-03-28T15:15:35.434989
| 2018-11-01T12:44:35
| 2018-11-01T12:44:35
| 148,572,617
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,944
|
r
|
prog_Lect8.R
|
rm(list = ls())
setwd("...//Lect5") # set directory
BHData <- read.table("...//SP_BH.csv",sep=',',header=TRUE)
attach(BHData)
### original price
par(mfrow=c(1,2))
plot(SP,BH, main="BH vs SP500")
BHSPl2 <- lsfit(SP,BH)
abline(BHSPl2)
plot(BHSPl2$residuals,type="l",main="Residual Plot of LS Regression of BH Against SP500 ")
### to see positive association btw adjacent residuals
par(mfrow=c(1,1))
n <- length(BH)
plot(BHSPl2$residuals[-n],BHSPl2$residuals[-1],main="Residual against previous residual ")
cor(BHSPl2$residuals[-n],BHSPl2$residuals[-1])
acf(BHSPl2$residuals, type = "correlation", plot = TRUE)
# returns
rBH<-diff(BH)/BH[-n]
rSP<-diff(SP)/SP[-n]
par(mfrow=c(1,2))
plot(rSP, rBH, main="Returns on BH vs returns on SP500")
# ls fit
rBHSPl2 <- lsfit(rSP,rBH)
abline(rBHSPl2)
plot(rBHSPl2$residuals,type="l",main="Residual Plot of LS Regression of rBH Against rSP")
### association btw adjacent residuals
par(mfrow=c(1,1))
n <- length(BH) - 1
plot(rBHSPl2$residuals[-n],rBHSPl2$residuals[-1],main="Residual against previous residual ")
cor(rBHSPl2$residuals[-n],rBHSPl2$residuals[-1])
acf(rBHSPl2$residuals, type = "correlation", plot = TRUE)
########## Factor Models #########
BHData <- read.table("...//BH2009-2018.csv",sep=',',header=TRUE)
summary(BHData)
rBH <- BHData$rBH
rf <- BHData$rf
rBH_ex <- rBH - rf
rM_ex <- BHData$rM_ex
rSmB <- BHData$rSmB
rHmL <- BHData$rHmL
#One factor model
Onefactor <- lm(rBH_ex ~ rM_ex)
summary(Onefactor)
c(summary(Onefactor)$r.squared,cor(rBH_ex,rM_ex)^2)
#Model diagnostics
library(MASS)
par(mfrow=c(2,1))
plot(stdres(Onefactor),type="l",main="Standardized Residuals")
plot(studres(Onefactor),type="l",main="Studentized Residuals")
n <- length(rBH_ex) - 1
plot(stdres(Onefactor)[-n],stdres(Onefactor)[-1],main="Standardized residual against previous one ")
cor(stdres(Onefactor)[-n],stdres(Onefactor)[-1])
plot(studres(Onefactor)[-n],studres(Onefactor)[-1],main="Studentized residual against previous one ")
cor(studres(Onefactor)[-n],studres(Onefactor)[-1])
acf(stdres(Onefactor), type = "correlation", plot = TRUE)
acf(studres(Onefactor), type = "correlation", plot = TRUE)
par(mfrow=c(1,2))
qqnorm(stdres(Onefactor),main="Q-Q Plot of Standardized Residuals")
abline(0,1,col="red")
qqnorm(studres(Onefactor),main="Q-Q Plot of Studentized Residuals")
abline(0,1,col="red")
### 3-factor model
pairs(cbind(rBH_ex,rM_ex,rSmB,rHmL))
FF3factor <- lm(rBH_ex ~ rM_ex + rSmB + rHmL)
summary(FF3factor)
#Model diagnostics
par(mfrow=c(2,1))
plot(stdres(FF3factor),type="l",main="Standardized Residuals")
plot(studres(FF3factor),type="l",main="Studentized Residuals")
par(mfrow=c(1,2))
qqnorm(stdres(FF3factor),main="Q-Q Plot of Standardized Residuals")
abline(0,1,col="red")
qqnorm(studres(FF3factor),main="Q-Q Plot of Studentized Residuals")
abline(0,1,col="red")
anova(Onefactor,FF3factor)
round(c(summary(Onefactor)$r.squared, summary(FF3factor)$r.squared),3)
|
d91dc6593fa9435b4f3e217a9c1d9427b844b4b6
|
60e830955164b8f0ff21a3dbd850319bae68567e
|
/WaveformCode/AnalysisofDiscreteHeartRateStatisticalApproach.R
|
ee11bb5cd243043e23e42487514b62f98bc10332
|
[] |
no_license
|
BenLopez/UHSM_BHF
|
739ed912182c36f2bf719d771c6392a1dfbe6f47
|
4afad6662f95f1d20f7270113c80a1836d035028
|
refs/heads/master
| 2021-05-26T07:31:07.476669
| 2019-12-03T09:52:56
| 2019-12-03T09:52:56
| 127,924,523
| 0
| 0
| null | 2018-10-10T11:03:39
| 2018-04-03T14:53:28
|
R
|
UTF-8
|
R
| false
| false
| 10,722
|
r
|
AnalysisofDiscreteHeartRateStatisticalApproach.R
|
pathFiles <- choose.dir(caption="Select folder with source code")
pathFiles <- paste0(pathFiles, "\\")
setwd(pathFiles)
source("LibrariesAndSettings.R" , print.eval = TRUE )
# Load patient index
path_PatIndex <- choose.files(caption="Select 2017 PatientIndex.csv file")
PatIndex2017 <- read.csv(file=path_PatIndex, stringsAsFactors = FALSE)
path_PatIndex <- choose.files(caption="Select 2017 (2) PatientIndex.csv file")
PatIndex2017 <- rbind(PatIndex2017 , read.csv(file=path_PatIndex, stringsAsFactors = FALSE))
path <- choose.files( caption="choose ReducedExtractedDiscreteData.RData" )
load( path )
path <- choose.files( caption="choose GroupofNonAFPatients.RData" )
load( path )
# Remove data with under 100
DataSet <- DataSet[(as.matrix(lapply(DataSet , function(X){length(X[[1]]$tt)})) > 5000)]
DataSet <- DataSet[(as.matrix(lapply(DataSet , function(X){X$MetaData$Pre_OperativeHeartRhythm[[1]] == "Sinus Rhythm"})) == TRUE )]
AFlogical <- matrix(0 , length(DataSet) , 1)
PreOpSRLogical <- matrix(0 , length(DataSet) , 1)
for( i in 1:length(DataSet) )
{
if(!is.na(DataSet[[i]][["MetaData"]]$FirstNewAF[1])){ AFlogical[i] <- 1 }
if( DataSet[[i]][["MetaData"]]$Pre_OperativeHeartRhythm[1] == "Sinus Rhythm" ){ PreOpSRLogical[i] <- 1 }
}
# Process Data
LocalAndGlobalComponents <- setNames(lapply( DataSet , ComputeLocalandGlobalSecondOrderStatistics ) , names(DataSet))
tslengths <- matrix(0 , length(GoodGroup) , 1)
for(i in 1:length(GoodGroup))
{
tslengths[i] <- length(DataSet[[GoodGroup[i]]]$HeartRate$tt)
}
SecondOrderStatisticsMatrix <- matrix( 0 , max(tslengths) , length(GoodGroup) )
for( i in 1:length(GoodGroup) )
{
SecondOrderStatisticsMatrix[1:length( LocalAndGlobalComponents[[GoodGroup[i]]]$Glo),i] <- LocalAndGlobalComponents[[GoodGroup[i]]]$Glo
}
E_Glo <- apply( SecondOrderStatisticsMatrix , 1 , sum ) / apply( SecondOrderStatisticsMatrix != 0 , 1 , sum )
V_GLo <- ((apply( SecondOrderStatisticsMatrix^2 , 1 , sum ) / apply( SecondOrderStatisticsMatrix != 0 , 1 , sum ) - E_Glo^2))
SecondOrderStatisticsMatrix <- matrix( 0 , max(tslengths) , length(GoodGroup) )
for( i in 1:length(GoodGroup) )
{
SecondOrderStatisticsMatrix[1:length( LocalAndGlobalComponents[[GoodGroup[i]]]$Lo),i] <- LocalAndGlobalComponents[[GoodGroup[i]]]$Lo
}
E_Lo <- apply( SecondOrderStatisticsMatrix , 1 , sum ) / apply( SecondOrderStatisticsMatrix != 0 , 1 , sum )
V_Lo <- (apply( SecondOrderStatisticsMatrix^2 , 1 , sum ) / apply( SecondOrderStatisticsMatrix != 0 , 1 , sum ) - E_Lo^2)
SecondOrderStatisticsMatrix <- matrix( 0 , max(tslengths) , length(GoodGroup) )
for( i in 1:length(GoodGroup) )
{
SecondOrderStatisticsMatrix[1:length( LocalAndGlobalComponents[[GoodGroup[i]]]$V_L),i] <- LocalAndGlobalComponents[[GoodGroup[i]]]$V_L
}
E_V_Lo <- apply( SecondOrderStatisticsMatrix , 1 , sum ) / apply( SecondOrderStatisticsMatrix != 0 , 1 , sum )
V_V_Lo <- (apply( SecondOrderStatisticsMatrix^2 , 1 , sum ) / apply( SecondOrderStatisticsMatrix != 0 , 1 , sum ) - E_V_Lo^2)
stdresid <- list()
for(i in 1:length(DataSet))
{
stdresid2 <- (E_Lo[1:length(LocalAndGlobalComponents[[i]]$Lo)] - LocalAndGlobalComponents[[i]]$Lo) / sqrt((V_Lo[1:length(LocalAndGlobalComponents[[i]]$Lo)]))
stdresid3 <- (E_V_Lo[1:length(LocalAndGlobalComponents[[i]]$V_L)] - LocalAndGlobalComponents[[i]]$V_L) / sqrt((V_V_Lo[1:length(LocalAndGlobalComponents[[i]]$V_L)]))
#stdresid[[i]] <- abs(stdresid3)
stdresid[[i]] <- apply( abs(rbind(stdresid2 , stdresid3)) , 2 , mean )
}
stdresid <- setNames(stdresid , names(DataSet))
stdthresholdthreshold = c(2 , 3 , 4 , 5)
Window = 1000
IntervalThreshold = seq(25, 999 , 50)
Sensitivity <- matrix(0 , length(stdthresholdthreshold) , length(IntervalThreshold))
Specificity <- matrix(0 , length(stdthresholdthreshold) , length(IntervalThreshold))
Accuracy <- matrix(0 , length(stdthresholdthreshold) , length(IntervalThreshold))
Errorpatients <- list()
counter <- 1
for(ii in 1:length(stdthresholdthreshold))
{
output <- lapply( stdresid , function(X){ cumsum(X > stdthresholdthreshold[ii]) } )
output <- lapply(output , function(X){X[(Window+1):length(X)] - X[1:( length(X) - Window )] } )
for(jj in 1:length(IntervalThreshold))
{
AFDetection <- matrix(0 , length(DataSet) , 1)
TimeofDetection <- list()
DetectionDifference <- matrix(NA , length(DataSet) , 1)
for( i in 1:length(DataSet) )
{
if( sum( output[[i]] > IntervalThreshold[jj], na.rm = TRUE) > 0 ){
AFDetection[i] <- 1
if( DataSet[[i]][["MetaData"]]$Pre_OperativeHeartRhythm[1] != "Sinus Rhythm" ){ AFDetection[i] <- 0 }
Index <- which((output[[i]] > IntervalThreshold[jj]))
TimeofDetection[[i]] <- LocalAndGlobalComponents[[i]]$t[Index[[1]]]
if(AFlogical[i] == 1){
DetectionDifference[i] <- difftime(TimeofDetection[[i]] , as.POSIXct(DataSet[[i]]$MetaData$FirstNewAF[[1]]) , units = 'hours')}
}
}
# Calulate sensitivity, specificity and accuracy.
NumberAF <- sum(AFlogical)
N <- length(AFlogical)
NumberCorrectAFDetections <- sum(AFDetection*AFlogical)
NumberCorrectNonAFDetections <- sum((AFDetection == 0)*(AFlogical ==0))
NumberInCorrectAFDetections <- NumberAF - NumberCorrectAFDetections
Sensitivity[ii,jj] <- NumberCorrectAFDetections/NumberAF
Specificity[ii,jj] <- NumberCorrectNonAFDetections/sum(AFlogical ==0)
Accuracy[ii,jj] <- (NumberCorrectAFDetections + NumberCorrectNonAFDetections)/N
# Extract false positive and negative patients
FalsePositivePatients <- list()
FalseNegativePatients <- list()
counter1 <- 1
counter2 <- 1
for( i in 1:length(DataSet) )
{
if( ((AFDetection[i] == 1)*(AFlogical[i] ==0)) == 1){
FalsePositivePatients[[counter1]] <- names(DataSet[i])
counter1 <- counter1 + 1
}
if( ((AFDetection[i] == 0)*(AFlogical[i] == 1)) == 1){
FalseNegativePatients[[counter2]] <- names(DataSet[i])
counter2 <- counter2 + 1
}
}
Errorpatients[[counter]] <- setNames(list( FalsePositivePatients
, FalseNegativePatients
, c(stdthresholdthreshold[ii] , IntervalThreshold[jj])
, Sensitivity[ii,jj]
, Specificity[ii,jj]
, Accuracy[ii,jj]
, TimeofDetection
, DetectionDifference) ,
c('FalsePositivePatients'
,'FalseNegativePatients'
, 'Parameters'
, 'Sensitivity'
, 'Specificity'
, 'Accuracy'
, 'TimeofDetection '
, 'DetectionTimeDifference')
)
counter <- counter + 1
}
}
par(mfrow = c(1 , 3))
image.plot(Sensitivity*100 , col = heat.colors(100) , axes = F , xlab = 'Heart rate' , ylab = '5 Min Threshold' , zlim = c(0,100))
axis(1 , at = seq(0 , 1 , 1/3) , labels = stdthresholdthreshold)
axis(2 , at = seq(0 , 1 , 1/(length(IntervalThreshold) - 1) ) , labels = IntervalThreshold)
title('Sensitivity')
image(Specificity*100, col = heat.colors(100), axes = F , xlab = 'Heart rate' , ylab = '5 Min Threshold', zlim = c(0,100))
axis(1 , at = seq(0 , 1 , 1/3) , labels = stdthresholdthreshold)
axis(2 , at = seq(0 , 1 , 1/(length(IntervalThreshold) - 1) ) , labels = IntervalThreshold)
title('Specificity')
image(Accuracy*100, col = heat.colors(100), axes = F , xlab = 'Heart rate' , ylab = '5 Min Threshold', zlim = c(0,100))
title('Accuracy')
axis(1 , at = seq(0 , 1 , 1/3) , labels = stdthresholdthreshold)
axis(2 , at = seq(0 , 1 , 1/(length(IntervalThreshold) - 1) ) , labels = IntervalThreshold)
AlwaysFalsePositive <- Errorpatients[[1]][[1]]
for( i in 2:length(Errorpatients))
{
AlwaysFalsePositive <- intersect( as.matrix(AlwaysFalsePositive) , as.matrix(Errorpatients[[i]][[1]]) )
}
AlwaysFalseNegative <- Errorpatients[[1]][[2]]
for( i in 2:length(Errorpatients))
{
AlwaysFalseNegative <- intersect( as.matrix(AlwaysFalseNegative) , as.matrix(Errorpatients[[i]][[2]]) )
}
waveformindex <- which(names(DataSet)==AlwaysFalsePositive[10])
par(mfrow = c(1 , 1))
df<- data.frame(x<- DataSet[[waveformindex]]$HeartRate$tt , y <- DataSet[[waveformindex]]$HeartRate$HeartRate )
ggplot( df , aes(x,y)) +
geom_point(colour="blue", alpha=0.009) +
ggtitle(names(DataSet[waveformindex])) +
xlab("Time") + ylab("Heart Rate") +
geom_hline( yintercept = 130 , linetype="dashed" , color = "red" ) +
geom_hline( yintercept = 100 , linetype="dashed" , color = "black" ) +
geom_hline( yintercept = 60 , linetype="dashed" , color = "blue" ) +
geom_vline( xintercept = as.numeric(Errorpatients[[7]]$TimeofDetection[[waveformindex]]) , linetype="dashed" , color = "black" )
waveformindex <- which(names(DataSet)== AlwaysFalseNegative[1])
par(mfrow = c(1 , 1))
df <- data.frame(x <- as.POSIXct(DataSet[[waveformindex]]$HeartRate$tt) , y <- DataSet[[waveformindex]]$HeartRate$HeartRate )
ggplot(df , aes(x,y)) +
geom_point(colour="blue", alpha=0.009) +
ggtitle(names(DataSet[waveformindex])) +
xlab("Time") + ylab("Heart Rate") +
geom_hline( yintercept = 130 , linetype="dashed" , color = "red" ) +
geom_hline( yintercept = 100 , linetype="dashed" , color = "black" ) +
geom_hline( yintercept = 60 , linetype="dashed" , color = "blue" ) +
geom_vline( xintercept = as.numeric(as.POSIXct(DataSet[[waveformindex]]$MetaData$FirstNewAF , tz = 'GMT')) , linetype="dashed" , color = "black" )
TotalErrorRates <- ExtractNumberofErrors(Errorpatients)
par( mfrow = c(1 , 2) )
plot( TotalErrorRates$FalsePositives$n , ylab = 'Number of Errors' , xaxt='n' )
title('False Positive')
plot( TotalErrorRates$FalseNegatives$n , ylab = 'Number of Errors' , xaxt='n')
title('False Negative')
par(mfrow = c(1 , 1))
plot( Errorpatients[[7]]$DetectionTimeDifference[!is.na( Errorpatients[[7]]$DetectionTimeDifference)] , xlab = 'Sensitivity' , ylab = 'Time Diff Hours' , ylim = c(-100 , 100))
abline(mean(Errorpatients[[7]]$DetectionTimeDifference[!is.na( Errorpatients[[7]]$DetectionTimeDifference)]) , 0)
title('Forecast Potential')
save(Errorpatients , file = 'C:\\Users\\Ben\\Documents\\BHF Results\\DiscreteDataHeartRateStatisticalResults.RData')
|
3917c98f863bd070bc26660eddf49d3c49a10588
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/OasisR/examples/RCEPoly.Rd.R
|
1b3b3387ec8d5c781b57bf3376b5aa24f85cadd9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 713
|
r
|
RCEPoly.Rd.R
|
library(OasisR)
### Name: RCEPoly
### Title: A function to compute Polycentric Relative Centralisation Index
### Aliases: RCEPoly
### ** Examples
x <- segdata@data[ ,1:2]
foldername <- system.file('extdata', package = 'OasisR')
shapename <- 'segdata'
RCEPoly(x, spatobj = segdata, center = c(28, 83) )
RCEPoly(x, folder = foldername, shape = shapename, center = c(28, 83))
center <- c(28, 83)
polydist <- matrix(data = NA, nrow = nrow(x), ncol = length(center))
for (i in 1:ncol(polydist))
polydist[,i] <- distcenter(spatobj = segdata, center = center[i])
RCEPoly(x, dc = polydist)
distmin <- vector(length = nrow(x))
for (i in 1:nrow(polydist)) distmin[i] <- min(polydist[i,])
RCE(x, dc = distmin)
|
aa6d0a5e1edbcf1dd3dd7ec7b2593d134cf83ff7
|
29fae7bd72da57d6683c9e14f4753214bd02789b
|
/NN/fn/cd.weight.R
|
7d443ae96f18d54621a21d5d060da39f10773d89
|
[] |
no_license
|
eugene9212/Machine_Learning_with_R
|
8f7f2c8a837f073803f04cc41e95b88e3ea655a4
|
93beaa391057d1b795cbfaff91c706b8256c4645
|
refs/heads/master
| 2020-03-29T18:21:40.831846
| 2019-02-21T04:41:51
| 2019-02-21T04:41:51
| 150,207,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,827
|
r
|
cd.weight.R
|
# CD Algorithm
cd.weight <- function(input, weight, bias_v, bias_h, learning){
# Demensionality Check!!
if (dim(input)[2] != dim(weight)[1]) output <- t(weight)
if (dim(input)[2] != dim(weight)[1]) warning("Dimensions of input and output are not identical")
# v : visible layer in RBM
# h : hidden layer in RBM
for (m in 1:dim(input)[1]){
v1_value <- matrix(as.matrix(input[m,]),ncol = dim(weight)[1])
# Probability for h1
h1_prob <- 1 / (1 + exp(-(v1_value %*% weight + bias_h)))
# value of h1
h1_value <- matrix(0, nrow = 1, ncol = dim(weight)[2]) ## need correction
for (i in 1:dim(weight)[2])
{
if (h1_prob[i] >= runif(1, min = 0, max = 1)) h1_value[i] <- 1
else h1_value[i] <- 0
}
# probability for v2
t.weight <- t(weight)
v2_prob <- 1 / (1 + exp(-(h1_value %*% t.weight + bias_v)))
# value of v2
v2_value <- matrix(0, nrow = 1, ncol = dim(weight)[1])
for (i in 1:length(v2_prob))
{
if (v2_prob[i] >= runif(1, min = 0, max = 1)) v2_value[i] <- 1
else v2_value[i] <- 0
}
# Probability for h2
h2_prob <- 1 / (1 + exp(-(v2_value %*% weight + bias_h)))
# value of h1
h2_value <- matrix(0, nrow = 1, ncol = dim(weight)[2])
for (i in 1:length(h2_value))
{
if (h2_prob[i] >= runif(1, min = 0, max = 1)) h2_value[i] <- 1
else h2_value[i] <- 0
}
# weight update
for (i in 1:dim(weight)[1]) {
for (j in 1:dim(weight)[2]) {
weight[i, j] <- weight[i, j] - learning * (v1_value[i] * h1_prob[j] - v2_value[i] * h2_prob[j])
bias_v <- bias_v + learning * (v1_value - v2_value)
bias_h <- bias_h + learning * (h1_prob - h2_prob)
}
}
}
# return
obj <- list(weight = weight, bias_v, bias_h)
}
|
5d503015a3b3609c0ebff5441999cb14fcd25871
|
5ad474ec063b6dc6b62250267c0575996aee2a3a
|
/Server_e_block.R
|
b19cb10f59f09334acae8bdc40a539bb07536523
|
[] |
no_license
|
JuanGaleano/Migration-and-Mobility-Survey
|
2989e48fa75a23c7142f7e926ab2d1c51ae80e9b
|
a6d12d4a64b0f34a77ec10977d0421584ac78226
|
refs/heads/main
| 2023-04-19T11:34:50.640369
| 2021-05-14T17:33:28
| 2021-05-14T17:33:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 136,894
|
r
|
Server_e_block.R
|
#### E1 WHAT WAS YOUR LABOUR MARKET SITUATION #####
output$E1_1_16 <-renderHighchart({
mar <- lapply(dmms, function(x){
x<-x[,c("E1_1","E1_2","E1_3","E1_4",
"E1_5","E1_6","E1_7","E1_8","E1_9",
input$magE,"year")]#input$magC,
colnames(x)<-c("E1_1","E1_2","E1_3","E1_4",
"E1_5","E1_6","E1_7","E1_8","E1_9","pop","year")
xlong<-x %>%gather(groupz, value,c(E1_1,E1_2,E1_3,E1_4,
E1_5,E1_6,E1_7,E1_8,E1_9),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("In full-time employment",
"In part-time employment",
"Working in more than one part-time job",
"Seeking a job",
"Undergoing training",
"Looking after home or family",
"Disabled or partially disabled",
"Retired",
"In another non-employed situation")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 5500,550000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
data<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(data)<-c("groupz","year","prop")
data$year<-paste("Wave ",data$year, sep="")
mar1 <- lapply(dmms, function(x){
x<-x[,c("E1_1",input$magE,"year")]#input$magC,
colnames(x)<-c("E1_1","pop","year")
xlong<-x %>%gather(groupz, value,c(E1_1),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
#filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDFtotales<- as.data.frame(do.call("rbind", mar1))
rank <-data %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Labour market situation before moving to Switzerland",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDFtotales[marDFtotales$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDFtotales[marDFtotales$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDFtotales[marDFtotales$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E30 Before moving to Switzerland in [A6], did you ever work abroad (Abroad means in another country than Switzerland)? ####
output$E30_18 <-renderHighchart({
dmms1820 <- dmms[2:3]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E30",
input$magE,"year")]#input$magC,
colnames(x)<-c("E30","pop","year")
xlong<-x %>%gather(groupz, value,c(E30),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Yes, I already had a working experience abroad")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 1000, 100000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms1820, function(x){
x<-x[,c("E30",
input$magE,"year")]#input$magC,
colnames(x)<-c("E30","pop","year")
xlong<-x %>%gather(groupz, value,c(E30),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Before moving to Switzerland, did you ever work abroad?",align = 'left') %>%
hc_subtitle(text =paste(
#paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)[c(2,3)]))%>% hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E31 Before moving to Switzerland in [A6], did you work as a cross-border worker in Switzerland? ####
output$E31_18 <-renderHighchart({
dmms1820 <- dmms[2:3]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E31",
input$magE,"year")]#input$magC,
colnames(x)<-c("E31","pop","year")
xlong<-x %>%gather(groupz, value,c(E31),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Yes, I was a cross-border worker")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 500, 50000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms1820, function(x){
x<-x[,c("E31",
input$magE,"year")]#input$magC,
colnames(x)<-c("E30","pop","year")
xlong<-x %>%gather(groupz, value,c(E30),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,20))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Before moving to Switzerland, did you ever work abroad?",align = 'left') %>%
hc_subtitle(text =paste(
#paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>% hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)[c(2,3)]))%>% hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E2 OCCUPATIONAL SITUATION #####
output$E2_16 <-renderHighchart({
mar <- lapply(dmms, function(x){
x<-x[,c(input$BE,input$magE,"year")]
#x<-x[,c("E2","weight","year")]
colnames(x)<-c("E2", "pop","year")
x<-x %>%
filter(E2!=-7)%>%
filter(E2!=-9)%>%
filter(E2!=-8)%>%
group_by(year,E2) %>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange((E2))
x$E2<-as.factor(x$E2)
x$E2<-fct_explicit_na(x$E2)
x
})
levels(mar[["D16"]]$E2)<-c("Self-employed",
"A company owner",
"A relative employed in a family business",
"Employed as director or board member and/or with managerial",
"Employed without managerial responsibility",
"Employed in a protected workshop (except support staff)",
"An apprentice",
"A PhD student")
levels(mar[["D18"]]$E2)<-c("Self-employed",
"A company owner",
"A relative employed in a family business",
"Employed as director or board member and/or with managerial",
"Employed without managerial responsibility",
"Employed in a protected workshop (except support staff)",
"An apprentice",
"A PhD student")
levels(mar[["D20"]]$E2)<-c("Self-employed",
"A company owner",
"A relative employed in a family business",
"Employed as director or board member and/or with managerial",
"Employed without managerial responsibility",
"Employed in a protected workshop (except support staff)",
"An apprentice",
"A PhD student")
marDF<- as.data.frame(do.call("rbind", mar))
marDF$E2<-as.factor(marDF$E2)
data<- if(input$magE2=="Absolute"){
marDF[,c("year","E2", "pop")]}else{
marDF[,c("year","E2", "prop")]}
hc_yAxis<-ifelse(input$magE=="n_nw", 5000,500000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
#hc_chart(type = "bar") %>%
hc_xAxis(categories = levels(data$E2), title = list(text = '')) %>%
hc_add_series(name= "Wave 2016",data = data[data$year==2016,3])%>%
hc_add_series(name= "Wave 2018",data = data[data$year==2018,3])%>%
hc_add_series(name= "Wave 2020",data = data[data$year==2020,3])%>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,70))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "What was your occupational status before moving to Switzerland?",
align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF[marDF$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF[marDF$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF[marDF$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E3 Once you arrived in Switzerland in [A6], what sector of business, or industry was your company or institution active in for the most part? ####
output$E3_16 <-renderHighchart({
#mar <- lapply(dmms, function(x){
x<-dmms[["D16"]][,c(input$BE,input$magE,"year")]
x[is.na(x)] <- -7
colnames(x)<-c("E3","pop","year")
x<-x %>%
group_by(year,E3) %>%
filter(E3!=-7)%>%
filter(E3!=-9)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round((pop/sum(pop)*100),1))
x$E3<-as.factor(x$E3)
x$E3<-fct_explicit_na(x$E3)
x
#})
levels(x$E3)<-c(
"Agriculture, forestry and fishing",
"Manufacturing, mining and quarrying and other industry ",
"Construction",
"Wholesale and retail trade, transportation, hotels and restaurants",
"Information and communication",
"Financial and insurance activities",
"Real estate activities",
"Professional, scientific, technical, administration and support service activities",
"Public administration, defense, education, human health and social action",
"Other activities and services")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magE=="n_nw", 1000,80000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.character(x$E3), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,25))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2016", data = data) %>%
hc_title(text = "2016: Sector of business, or industry was your company",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
#hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)[1]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
output$E3_18 <-renderText({
x<-"Question not included in 2018 and 2020"
})
#### E32 Just before moving to Switzerland in [A6], were you actively looking for a new job in the labor market, either in Switzerland or abroad? ####
output$E32_18 <-renderHighchart({
dmms1820 <- dmms[2:3]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E32",
input$magE,"year")]#input$magC,
colnames(x)<-c("E32","pop","year")
xlong<-x %>%gather(groupz, value,c(E32),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Yes")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 3000, 325000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms1820, function(x){
x<-x[,c("E32",
input$magE,"year")]#input$magC,
colnames(x)<-c("E32","pop","year")
xlong<-x %>%gather(groupz, value,c(E32),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Just before moving to Switzerland, were you actively looking for a new job in the labor market, either in Switzerland or abroad?",align = 'left') %>%
hc_subtitle(text =paste(
# paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)[c(2,3)]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E33 Please indicate the situation that best describes your job search just before moving to Switzerland in [A6 #####
output$E33_18 <-renderHighchart({
dmms1820 <- dmms[2:3]
mar <- lapply(dmms1820, function(x){
x<-x[,c(input$BE,input$magE,"year")]
#x<-x[,c("E33","weight","year")]
colnames(x)<-c("E33", "pop","year")
x<-x %>%
filter(E33!=-7)%>%
filter(E33!=-9)%>%
filter(E33!=-8)%>%
group_by(year,E33) %>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange((E33))
x$E33<-as.factor(x$E33)
x$E33<-fct_explicit_na(x$E33)
x
})
levels(mar[["D18"]]$E33)<-c("I was specifically looking for a job in Switzerland",
"I was looking for a job in Switzerland but also in other countries",
"I was looking for a job in another or other countries, Switzerland was not my priority")
levels(mar[["D20"]]$E33)<-c("I was specifically looking for a job in Switzerland",
"I was looking for a job in Switzerland but also in other countries",
"I was looking for a job in another or other countries, Switzerland was not my priority")
marDF<- as.data.frame(do.call("rbind", mar))
marDF$E33<-as.factor(marDF$E33)
data<- if(input$magE2=="Absolute"){
marDF[,c("year","E33", "pop")]}else{
marDF[,c("year","E33", "prop")]}
hc_yAxis<-ifelse(input$magE=="n_nw", 2000,200000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
#hc_chart(type = "bar") %>%
hc_xAxis(categories = levels(data$E33), title = list(text = '')) %>%
#hc_add_series(name= "Wave 2016",data = data[data$year==2016,3])%>%
hc_add_series(name= "Wave 2018",data = data[data$year==2018,3])%>%
hc_add_series(name= "Wave 2020",data = data[data$year==2020,3])%>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,70))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Job search just before moving to Switzerland",
align = 'left') %>%
hc_subtitle(text =paste(
# paste("\n2016 N",a(sum(marDF[marDF$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF[marDF$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF[marDF$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)[c(2:3)]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E34 What were the main reasons explaining that you were looking for a job in Switzerland? ####
output$E34_1_18 <-renderHighchart({
dmms1820 <- dmms[2:3]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E34_1","E34_2","E34_3",
"E34_4","E34_5","E34_6",
input$magE,"year")]#input$magC,
colnames(x)<-c("E34_1","E34_2","E34_3",
"E34_4","E34_5","E34_6",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E34_1,E34_2,E34_3,
E34_4,E34_5,E34_6),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Family reasons",
"The high wages in Switzerland",
"The overall working conditions in Switzerland",
"I was looking for a professional experience abroad, not specifically in Switzerland",
"The quality of life in Switzerland",
"Other reasons")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 1000, 125000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms1820, function(x){
x<-x[,c("E34_1",input$magE,"year")]#input$magC,
colnames(x)<-c("E34_1","pop","year")
xlong<-x %>%gather(groupz, value,c(E34_1),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
#filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDFtotales<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "What were the main reasons explaining that you were looking for a job in Switzerland?",align = 'left') %>%
hc_subtitle(text =paste(
# paste("\n2016 N",a(sum(marDFtotales[marDFtotales$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDFtotales[marDFtotales$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDFtotales[marDFtotales$year==2020,"pop"])),sep=": "),
sep=" | ")) %>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)[c(2,3)]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E4 WDid you have a job contract or a job offer in Switzerland at the time you immigrated to Switzerland in [A6]? ####
output$E4_16 <-renderHighchart({
#dmms1820 <- dmms[2:3]
mar <- lapply(dmms, function(x){
x<-x[,c("E4",
input$magE,"year")]#input$magC,
colnames(x)<-c("E4",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E4),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Yes")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 4000, 400000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms, function(x){
x<-x[,c("E4",
input$magE,"year")]#input$magC,
colnames(x)<-c("E4","pop","year")
xlong<-x %>%gather(groupz, value,c(E4),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Did you have a job or a job offer in Switzerland before you immigrated to Switzerland?",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E5 It was a transfer within the same company #####
output$E5_16 <-renderHighchart({
#dmms1820 <- dmms[2:3]
mar <- lapply(dmms, function(x){
x<-x[,c("E5",
input$magE,"year")]#input$magC,
colnames(x)<-c("E5",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E5),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Yes")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 1250, 100000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms, function(x){
x<-x[,c("E5",
input$magE,"year")]#input$magC,
colnames(x)<-c("E5","pop","year")
xlong<-x %>%gather(groupz, value,c(E5),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "It was a transfer within the same company",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
### E43 Under what type of work contract did you work when arriving in Switzerland?#####
output$E43_20 <-renderHighchart({
#mar <- lapply(dmms, function(x){
x<-dmms[["D20"]][,c(input$BE,input$magE,"year")]
x[is.na(x)] <- -7
colnames(x)<-c("E43","pop","year")
x<-x %>%
group_by(year,E43) %>%
filter(E43!=-7)%>%
filter(E43!=-9)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round((pop/sum(pop))*100,1))
x$E43<-as.factor(x$E43)
x$E43<-fct_explicit_na(x$E43)
x
#})
levels(x$E43)<-c(
"I retained my work contract with my company",
"I have an additional assignment contract for the duration of my stay in Switzerland",
"I have an employment contract with the Swiss branch of my company for the time of my stay in Switzerland",
"I don't know")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magE=="n_nw", 300,30000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.character(x$E43), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,60))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2020", data = data) %>%
hc_title(text = "Under what type of work contract did you work when arriving in Switzerland?",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
#hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)[3]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
### E44 If you had been given the choice, which type of work contract would you have preferred when coming to Switzerland? ####
output$E44_20 <-renderHighchart({
#mar <- lapply(dmms, function(x){
x<-dmms[["D20"]][,c(input$BE,input$magE,"year")]
x[is.na(x)] <- -7
colnames(x)<-c("E44","pop","year")
x<-x %>%
group_by(year,E44) %>%
filter(E44!=-7)%>%
filter(E44!=-9)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round((pop/sum(pop))*100,1))
x$E44<-as.factor(x$E44)
x$E44<-fct_explicit_na(x$E44)
x
#})
levels(x$E44)<-c(
"A contract with the company in the country I came from",
"A contract with the Swiss branch of my company",
"I don't know")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magE=="n_nw", 500,30000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.character(x$E44), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2020", data = data) %>%
hc_title(text = "Under what type of work contract did you work when arriving in Switzerland?",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
#hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)[3]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E6 Before moving to Switzerland in [A6], in which countries did you look for a job? #####
output$E6_1_16 <-renderHighchart({
dmms1820 <- dmms[1]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E6_1","E6_2","E6_3",
"E6_4","E6_5","E6_6",
input$magE,"year")]#iinput$magE,
colnames(x)<-c("E6_1","E6_2","E6_3",
"E6_4","E6_5","E6_6",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E6_1,E6_2,E6_3,
E6_4,E6_5,E6_6),
factor_key=FALSE)
xlong[is.na(xlong)] <- -7
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x2
x1$prop<-round(x1$pop/(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Switzerland",
"Your home country of origin",
"Your country of birth",
"The last country you lived in prior to coming to Switzerland",
"Other countries",
"You were not looking for a job")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 3000, 300000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms1820, function(x){
x<-x[,c("E6_1",
input$magE,"year")]#iinput$magE,
colnames(x)<-c("E6_1",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E6_1),
factor_key=FALSE)
xlong[is.na(xlong)] <- -7
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Before moving to Switzerland, in which countries did you look for a job?",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
#paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
#paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E7 How did you go about looking for a job in Switzerland? ####
output$E7_1_16 <-renderHighchart({
dmms1820 <- dmms[1]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E7_1","E7_2","E7_3",
"E7_4","E7_5","E7_6",
"E7_7","E7_8","E7_9","E7_10",
input$magE,"year")]#iinput$magE,
colnames(x)<-c("E7_1","E7_2","E7_3",
"E7_4","E7_5","E7_6",
"E7_7","E7_8","E7_9","E7_10",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E7_1,E7_2,E7_3,
E7_4,E7_5,E7_6,
E7_7,E7_8,E7_9,E7_10),
factor_key=FALSE)
xlong[is.na(xlong)] <- -7
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x2
x1$prop<-round(x1$pop/(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Contact potential employer(s) directly",
"Talk to friends or relatives",
"Place or answer newspaper, internet or other media ad(s)",
"Contact an employment agency",
"Ask for referrals from another employer",
"Contact a school, community college, university",
"Contact government agencies",
"Contact ethnic/cultural group or association",
"Participate in networking events",
"Other")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 1000, 100000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms1820, function(x){
x<-x[,c("E6_1",
input$magE,"year")]#iinput$magE,
colnames(x)<-c("E6_1","pop","year")
xlong<-x %>%gather(groupz, value,c(E6_1),
factor_key=FALSE)
xlong[is.na(xlong)] <- -7
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "How did you go about looking for a job in Switzerland?",align = 'left') %>%
hc_subtitle(text =paste(
# paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
# paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E8 Have you had any of the following problems or difficulties when looking for a job in Switzerland? ####
output$E8_1_16 <-renderHighchart({
dmms1820 <- dmms[1]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E8_1","E8_2","E8_3",
"E8_4","E8_5","E8_6",
"E8_7","E8_8","E8_9",
input$magE,"year")]#iinput$magE,
colnames(x)<-c("E8_1","E8_2","E8_3",
"E8_4","E8_5","E8_6",
"E8_7","E8_8","E8_9",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E8_1,E8_2,E8_3,
E8_4,E8_5,E8_6,
E8_7,E8_8,E8_9),
factor_key=FALSE)
xlong[is.na(xlong)] <- -7
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x2
x1$prop<-round(x1$pop/(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
levels(marDF$groupz)<-c("Language problems",
"Qualifications and job experience from outside Switzerland not accepted",
"Problems with the administration or need for a permit in order to work",
"No family or friends who could help",
"No connections in the job market",
"Employers only offered you unsatisfactory job contracts",
"Personal or financial constraints (time, costs, family, other responsibilities)",
"Discrimination",
"Other problems or difficulties")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 750, 50000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Have you had any of the following problems or difficulties when looking for a job in Switzerland?",align = 'left') %>%
#hc_subtitle(text = paste("\nN",a(sum(c(tem[["D16"]]$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### e8 tables ####
output$TE8_16<- renderText({
dmms1820 <- dmms[1]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E8_1","E8_2","E8_3",
"E8_4","E8_5","E8_6",
"E8_7","E8_8","E8_9",
input$magE,"year")]#iinput$magE,
colnames(x)<-c("E8_1","E8_2","E8_3",
"E8_4","E8_5","E8_6",
"E8_7","E8_8","E8_9",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E8_1,E8_2,E8_3,
E8_4,E8_5,E8_6,
E8_7,E8_8,E8_9),
factor_key=FALSE)
xlong[is.na(xlong)] <- -7
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
#filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
levels(marDF$groupz)<-c("Language problems",
"Qualifications and job experience from outside Switzerland not accepted",
"Problems with the administration or need for a permit in order to work",
"No family or friends who could help",
"No connections in the job market",
"Employers only offered you unsatisfactory job contracts",
"Personal or financial constraints (time, costs, family, other responsibilities)",
"Discrimination",
"Other problems or difficulties")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
kable(marDF %>%
# arrange(desc(pop))%>%
#ungroup %>%
select(groupz,year,pop)%>%
rename(Area="groupz", Year = "year", N = "pop"))%>%
#)%>%
kable_styling(
font_size = 15,
bootstrap_options = c("striped", "hover", "condensed"))
})
#### E9 Sometimes people receive help when looking for job. From whom did you receive assistance? Was it from ####
output$E9_1_16 <-renderHighchart({
dmms1820 <- dmms[1]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E9_1","E9_2","E9_3",
"E9_4","E9_5","E9_6",
"E9_7","E9_8",
input$magE,"year")]#iinput$magE,
colnames(x)<-c("E9_1","E9_2","E9_3",
"E9_4","E9_5","E9_6",
"E9_7","E9_8",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E9_1,E9_2,E9_3,
E9_4,E9_5,E9_6,
E9_7,E9_8),
factor_key=FALSE)
xlong[is.na(xlong)] <- -7
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x2
x1$prop<-round(x1$pop/(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Friends or relatives",
"Former business relations/colleagues",
"Your spouse's/partner's employer",
"A private institution",
"A public institution",
"Users of an online social media",
"Other persons or institutions",
"I did not receive any support")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 750, 75000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms1820, function(x){
x<-x[,c("E9_1",
input$magE,"year")]#iinput$magE,
colnames(x)<-c("E9_1",
"pop","year")
xlong<-x %>%gather(groupz, value,c(E9_1),
factor_key=FALSE)
xlong[is.na(xlong)] <- -7
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "From whom did you receive assistance?",align = 'left') %>%
hc_subtitle(text =paste(
# paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
# paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E10 Once you arrived in Switzerland in [A6], how long did you spend looking for a job before finding one? ####
output$E10_16 <-renderHighchart({
#mar <- lapply(dmms, function(x){
x<-dmms[["D16"]][,c(input$BE,input$magE,"year")]
x[is.na(x)] <- -7
colnames(x)<-c("E10","pop","year")
x<-x %>%
group_by(year,E10) %>%
filter(E10!=-7)%>%
filter(E10!=-9)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round((pop/sum(pop))*100,1))
x$E10<-as.factor(x$E10)
x$E10<-fct_explicit_na(x$E10)
x
#})
levels(x$E10)<-c(
"Less than one month",
"Up to 6 months",
"6 to 12 months",
"More than 12 months")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magE=="n_nw", 300,30000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.character(x$E10), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,60))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2016", data = data) %>%
hc_title(text = "How long did you spend looking for a job before finding one",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
#hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### e11 What was your labor market situation once you arrived in Switzerland in [A6]? Were you… #####
output$E11_1_16 <-renderHighchart({
mar <- lapply(dmms, function(x){
x<-x[,c("E11_1","E11_2","E11_3","E11_4",
"E11_5","E11_6","E11_7","E11_8","E11_9",
input$magE,"year")]#input$magC,
colnames(x)<-c("E11_1","E11_2","E11_3","E11_4",
"E11_5","E11_6","E11_7","E11_8","E11_9","pop","year")
xlong<-x %>%gather(groupz, value,c(E11_1,E11_2,E11_3,E11_4,
E11_5,E11_6,E11_7,E11_8,E11_9),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("In full-time employment",
"In part-time employment",
"Working in more than one part-time job",
"Seeking a job",
"Undergoing training",
"Looking after home or family",
"Disabled or partially disabled",
"Retired",
"In another non-employed situation")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 5500,550000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms, function(x){
x<-x[,c("E11_1",input$magE,"year")]#input$magC,
colnames(x)<-c("E11_1","pop","year")
xlong<-x %>%gather(groupz, value,c(E11_1),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
#filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDFtotales<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Labour market situation once you arrived to Switzerland",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDFtotales[marDFtotales$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDFtotales[marDFtotales$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDFtotales[marDFtotales$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>% hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E12 What was your occupational status once you arrived in Switzerland in #####
output$E12_16 <-renderHighchart({
mar <- lapply(dmms, function(x){
x<-x[,c(input$BE,input$magE,"year")]
#x<-x[,c("E2","weight","year")]
colnames(x)<-c("E12", "pop","year")
x<-x %>%
filter(E12!=-7)%>%
filter(E12!=-9)%>%
filter(E12!=-8)%>%
group_by(year,E12) %>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange((E12))
x$E12<-as.factor(x$E12)
x$E12<-fct_explicit_na(x$E12)
x
})
levels(mar[["D16"]]$E12)<-c("Self-employed",
"A company owner",
"A relative employed in a family business",
"Employed as director or board member and/or with managerial",
"Employed without managerial responsibility",
"Employed in a protected workshop (except support staff)",
"An apprentice",
"A PhD student")
levels(mar[["D18"]]$E12)<-c("Self-employed",
"A company owner",
"A relative employed in a family business",
"Employed as director or board member and/or with managerial",
"Employed without managerial responsibility",
"Employed in a protected workshop (except support staff)",
"An apprentice",
"A PhD student")
levels(mar[["D20"]]$E12)<-c("Self-employed",
"A company owner",
"A relative employed in a family business",
"Employed as director or board member and/or with managerial",
"Employed without managerial responsibility",
"Employed in a protected workshop (except support staff)",
"An apprentice",
"A PhD student")
marDF<- as.data.frame(do.call("rbind", mar))
marDF$E12<-as.factor(marDF$E12)
data<- if(input$magE2=="Absolute"){
marDF[,c("year","E12", "pop")]}else{
marDF[,c("year","E12", "prop")]}
hc_yAxis<-ifelse(input$magE=="n_nw", 5000,500000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
#hc_chart(type = "bar") %>%
hc_xAxis(categories = levels(data$E12), title = list(text = '')) %>%
hc_add_series(name= "Wave 2016",data = data[data$year==2016,3])%>%
hc_add_series(name= "Wave 2018",data = data[data$year==2018,3])%>%
hc_add_series(name= "Wave 2020",data = data[data$year==2020,3])%>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,70))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "What was your occupational status once you arrived in Switzerland?",
align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF[marDF$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF[marDF$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF[marDF$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E13 Once you arrived in Switzerland in [A6], how long did you spend looking for a job before finding one? ####
output$E13_16 <-renderHighchart({
#mar <- lapply(dmms, function(x){
x<-dmms[["D16"]][,c(input$BE,input$magE,"year")]
x[is.na(x)] <- -7
colnames(x)<-c("E13","pop","year")
x<-x %>%
group_by(year,E13) %>%
filter(E13!=-7)%>%
filter(E13!=-9)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round((pop/sum(pop))*100,1))
x$E13<-as.factor(x$E13)
x$E13<-fct_explicit_na(x$E13)
x
#})
levels(x$E13)<-c("Agriculture, forestry and fishing",
"Manufacturing, mining and quarrying and other industry ",
"Construction",
"Wholesale and retail trade, transportation, hotels and restaurants",
"Information and communication",
"Financial and insurance activities",
"Real estate activities",
"Professional, scientific, technical, administration and support service activities",
"Public administration, defense, education, human health and social action",
"Other activities and services")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magE=="n_nw", 800,80000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.character(x$E13), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2016", data = data) %>%
hc_title(text = "Once you arrived in Switzerland, what sector of business, or industry was your company or institution active in for the most part?",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
#hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E14 In total, how many years have you been in paid work in your whole life? ####
output$E14_16 <-renderHighchart({
#nco <- lapply(dmms, function(x){
x<-dmms[["D16"]][,c(input$BE,input$magE,"year")]
x[is.na(x)] <- -7
x<-x%>%
filter(E14!=-7)%>%
filter(E14!=0)
x$B5CAT<- with(x, ifelse(E14<5,"Less than five year",
ifelse((E14>=5 & E14<10),"Between 5 and 9 years",
ifelse((E14>=10 & E14<20),"Between 10 and 19 years",
ifelse((E14>=20 & E14<30),"Between 19 and 29 years",
ifelse(E14 >=30,"30 or more years",0))))))
x$B5CAT<-factor(x$B5CAT, levels=c("Less than five year","Between 5 and 9 years",
"Between 10 and 19 years","Between 19 and 29 years",
"30 or more years"))
colnames(x)<-c("B5", "pop", "year","B5CAT")
x<-x %>%
group_by(year,B5CAT) %>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange(desc(B5CAT))
x
#})
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magE=="n_nw", 2500,200000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.character(x$B5CAT), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2016", data = data) %>%
hc_title(text = "Number of years in paid work",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E15 Before moving to Switzerland in [A6], did you ever work abroad (Abroad means in another country than Switzerland)? ####
output$E15_16 <-renderHighchart({
#dmms1820 <- dmms[2:3]
mar <- lapply(dmms, function(x){
x<-x[,c("E15",
input$magE,"year")]#input$magC,
colnames(x)<-c("E15","pop","year")
xlong<-x %>%gather(groupz, value,c(E15),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
levels(marDF$groupz)<-c("Yes")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 2250, 200000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms, function(x){
x<-x[,c("E15",
input$magE,"year")]#input$magC,
colnames(x)<-c("E15","pop","year")
xlong<-x %>%gather(groupz, value,c(E15),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Do you still have the same job and position as when you arrived in Switzerland?",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E16 WHAT WAS YOUR LABOUR MARKET SITUATION #####
output$E16_1_16 <-renderHighchart({
mar <- lapply(dmms, function(x){
x<-x[,c("E16_1","E16_2","E16_3","E16_4",
"E16_5","E16_6","E16_7","E16_8","E16_9",
input$magE,"year")]#input$magC,
colnames(x)<-c("E16_1","E16_2","E16_3","E16_4",
"E16_5","E16_6","E16_7","E16_8",
"E16_9","pop","year")
xlong<-x %>%gather(groupz, value,c(E16_1,E16_2,E16_3,E16_4,
E16_5,E16_6,E16_7,E16_8,E16_9),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("In full-time employment",
"In part-time employment",
"Working in more than one part-time job",
"Seeking a job",
"Undergoing training",
"Looking after home or family",
"Disabled or partially disabled",
"Retired",
"In another non-employed situation")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 5500,550000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms, function(x){
x<-x[,c("E16_1",input$magE,"year")]#input$magC,
colnames(x)<-c("E16_1","pop","year")
xlong<-x %>%gather(groupz, value,c(E16_1),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
#filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDFtotales<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "What is your current labor market situation?",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDFtotales[marDFtotales$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDFtotales[marDFtotales$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDFtotales[marDFtotales$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E36 Once you arrived in Switzerland in [A6], how long did you spend looking for a job before finding one? ####
output$E36_18 <-renderHighchart({
#mar <- lapply(dmms, function(x){
x<-dmms[["D18"]][,c(input$BE,input$magE,"year")]
x[is.na(x)] <- -7
colnames(x)<-c("E36","pop","year")
x<-x %>%
group_by(year,E36) %>%
filter(E36!=-7)%>%
filter(E36!=-9)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round((pop/sum(pop))*100,1))
x$E36<-as.factor(x$E36)
x$E36<-fct_explicit_na(x$E36)
x
#})
levels(x$E36)<-c(
"Less than three months",
"3 months to 1 year",
"More than 1 year")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magE=="n_nw", 300,25000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.character(x$E36), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2018", data = data) %>%
hc_title(text = "Time looking for a job in Switzerland",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)[2]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E37 Among the following statements, which best describes your situation regarding your job search in Switzerland? ####
output$E37_18 <-renderHighchart({
#mar <- lapply(dmms, function(x){
x<-dmms[["D18"]][,c(input$BE,input$magE,"year")]
x[is.na(x)] <- -7
colnames(x)<-c("E37","pop","year")
x<-x %>%
group_by(year,E37) %>%
filter(E37!=-7)%>%
filter(E37!=-9)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round((pop/sum(pop))*100,1))
x$E37<-as.factor(x$E37)
x$E37<-fct_explicit_na(x$E37)
x
#})
levels(x$E37)<-c(
"I recently received a job contract or a job offer",
"I will quickly find a job in Switzerland",
"I will probably find a job in Switzerland but it will take some time",
"It will be difficult for me to find a job in Switzerland",
"I will probably not find a job in Switzerland",
"I am so discouraged that I do not look for a job anymore")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magE=="n_nw", 400,25000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.character(x$E37), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2018", data = data) %>%
hc_title(text = "Job search in Switzerland",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)[2]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E17 In what year did you start your current job? #####
output$E17_16 <-renderHighchart({
x<-dmms[["D16"]][,c(input$BE,input$magE,"year")]
colnames(x)<-c("E17","pop","year")
x[is.na(x)] <- -7
x<-x %>%
group_by(year,E17) %>%
filter(E17!=-7)%>%
filter(E17!=-9)%>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange(desc(E17))
x$E17<-as.factor(x$E17)
x$E17<-fct_explicit_na(x$E17)
x
#})
levels(x$E17)<-c(2016:2006,"Before 2006")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
hc_yAxis<-ifelse(input$magE=="n_nw", 750,60000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
hc_chart(type = 'column',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.factor(x$E17), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,25))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2016", data = data) %>%
hc_title(text = "Year starting current job",align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)[1]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E18 What is your current occupational status? #####
output$E18_16 <-renderHighchart({
mar <- lapply(dmms, function(x){
x<-x[,c(input$BE,input$magE,"year")]
#x<-x[,c("E2","weight","year")]
colnames(x)<-c("E18", "pop","year")
x<-x %>%
filter(E18!=-7)%>%
filter(E18!=-9)%>%
filter(E18!=-8)%>%
group_by(year,E18) %>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange((E18))
x$E18<-as.factor(x$E18)
x$E18<-fct_explicit_na(x$E18)
x
})
levels(mar[["D16"]]$E18)<-c("Self-employed",
"A company owner",
"A relative employed in a family business",
"Employed as director or board member and/or with managerial",
"Employed without managerial responsibility",
"Employed in a protected workshop (except support staff)",
"An apprentice",
"A PhD student")
levels(mar[["D18"]]$E18)<-c("Self-employed",
"A company owner",
"A relative employed in a family business",
"Employed as director or board member and/or with managerial",
"Employed without managerial responsibility",
"Employed in a protected workshop (except support staff)",
"An apprentice",
"A PhD student")
levels(mar[["D20"]]$E18)<-c("Self-employed",
"A company owner",
"A relative employed in a family business",
"Employed as director or board member and/or with managerial",
"Employed without managerial responsibility",
"Employed in a protected workshop (except support staff)",
"An apprentice",
"A PhD student")
marDF<- as.data.frame(do.call("rbind", mar))
marDF$E18<-as.factor(marDF$E18)
data<- if(input$magE2=="Absolute"){
marDF[,c("year","E18", "pop")]}else{
marDF[,c("year","E18", "prop")]}
hc_yAxis<-ifelse(input$magE=="n_nw", 5000,500000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
#hc_chart(type = "bar") %>%
hc_xAxis(categories = levels(data$E18), title = list(text = '')) %>%
hc_add_series(name= "Wave 2016",data = data[data$year==2016,3])%>%
hc_add_series(name= "Wave 2018",data = data[data$year==2018,3])%>%
hc_add_series(name= "Wave 2020",data = data[data$year==2020,3])%>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,70))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF[marDF$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF[marDF$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF[marDF$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_title(text = "What is your current occupational status?",
align = 'left') %>%
#hc_subtitle(text = paste("\nN",a(sum(c(mar[["D16"]]$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E19 OWhat sector of business, or industry is your company or institution active in for the most part? ? ####
output$E19_16 <-renderHighchart({
#mar <- lapply(dmms, function(x){
x<-dmms[["D16"]][,c(input$BE,input$magE,"year")]
x[is.na(x)] <- -7
colnames(x)<-c("E19","pop","year")
x<-x %>%
group_by(year,E19) %>%
filter(E19!=-7)%>%
filter(E19!=-9)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round((pop/sum(pop))*100,1))
x$E19<-as.factor(x$E19)
x$E19<-fct_explicit_na(x$E19)
x
#})
levels(x$E19)<-c("Agriculture, forestry and fishing",
"Manufacturing, mining and quarrying and other industry ",
"Construction",
"Wholesale and retail trade, transportation, hotels and restaurants",
"Information and communication",
"Financial and insurance activities",
"Real estate activities",
"Professional, scientific, technical, administration and support service activities",
"Public administration, defense, education, human health and social action",
"Other activities and services")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magE=="n_nw", 800,60000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.character(x$E19), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2016", data = data) %>%
hc_title(text = "What sector of business, or industry is your company or institution active in for the most part?",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
#hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E21 PDo you have a work contract of.... #####
output$E21_16 <-renderHighchart({
# dmms1820 <- dmms[2:3]
mar <- lapply(dmms, function(x){
x<-x[,c(input$BE,input$magE,"year")]
#x<-x[,c("E33","weight","year")]
colnames(x)<-c("E21", "pop","year")
x<-x %>%
filter(E21!=-7)%>%
filter(E21!=-9)%>%
filter(E21!=-8)%>%
group_by(year,E21) %>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange((E21))
x$E21<-as.factor(x$E21)
x$E21<-fct_explicit_na(x$E21)
x
})
levels(mar[["D16"]]$E21)<-c("unlimited duration",
"limited duration",
" You don't have a contract")
levels(mar[["D18"]]$E21)<-c("unlimited duration",
"limited duration",
" You don't have a contract")
levels(mar[["D20"]]$E21)<-c("unlimited duration",
"limited duration",
" You don't have a contract")
marDF<- as.data.frame(do.call("rbind", mar))
marDF$E21<-as.factor(marDF$E21)
data<- if(input$magE2=="Absolute"){
marDF[,c("year","E21", "pop")]}else{
marDF[,c("year","E21", "prop")]}
hc_yAxis<-ifelse(input$magE=="n_nw", 4500,500000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
#hc_chart(type = "bar") %>%
hc_xAxis(categories = levels(data$E21), title = list(text = '')) %>%
hc_add_series(name= "Wave 2016",data = data[data$year==2016,3])%>%
hc_add_series(name= "Wave 2018",data = data[data$year==2018,3])%>%
hc_add_series(name= "Wave 2020",data = data[data$year==2020,3])%>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,100))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Do you have a work contract of....",
align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF[marDF$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF[marDF$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF[marDF$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E22 In which locality is the company that you work for?#####
output$E22_16 <-renderHighchart({
#mar <- lapply(dmms, function(x){
x<-dmms[["D16"]][,c(input$BE,input$magE,"year")]
colnames(x)<-c("E22", "pop","year")
x[is.na(x)] <- -9
x<-x %>%
group_by(E22) %>%
filter(E22!=-9)%>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange((E22))
x$E22<-as.factor(x$E22)
x$E22<-fct_explicit_na(x$E22)
x
# })
levels(x$E22)<-c("In the same commune as your residence",
"In the same canton but not the same commune as your residence",
"In a different canton to your residence",
"Abroad")
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
hc_yAxis<-ifelse(input$magE=="n_nw", 2500,200000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = TRUE) %>%
hc_xAxis(categories = as.factor(x$E22), title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Wave 2016", data = data) %>%
hc_title(text = "Locality of the company that you work for",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E38 Do you think that your current level of education (that is [D1]) is appropriate for your current job?#####
output$E38_18 <-renderHighchart({
dmms1820 <- dmms[2:3]
mar <- lapply(dmms1820, function(x){
x<-x[,c(input$BE,input$magE,"year")]
#x<-x[,c("E33","weight","year")]
colnames(x)<-c("E38", "pop","year")
x<-x %>%
filter(E38!=-7)%>%
filter(E38!=-9)%>%
filter(E38!=-8)%>%
group_by(year,E38) %>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange((E38))
x$E38<-as.factor(x$E38)
x$E38<-fct_explicit_na(x$E38)
x
})
levels(mar[["D18"]]$E38)<-c("Yes, fully appropriate",
"Yes, rather appropriate",
"No, not really appropriate",
"No, absolutely not appropriate")
levels(mar[["D20"]]$E38)<-c("Yes, fully appropriate",
"Yes, rather appropriate",
"No, not really appropriate",
"No, absolutely not appropriate")
marDF<- as.data.frame(do.call("rbind", mar))
marDF$E38<-as.factor(marDF$E38)
data<- if(input$magE2=="Absolute"){
marDF[,c("year","E38", "pop")]}else{
marDF[,c("year","E38", "prop")]}
hc_yAxis<-ifelse(input$magE=="n_nw", 4000,400000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
#hc_chart(type = "bar") %>%
hc_xAxis(categories = levels(data$E38), title = list(text = '')) %>%
#hc_add_series(name= "Wave 2016",data = data[data$year==2016,3])%>%
hc_add_series(name= "Wave 2018",data = data[data$year==2018,3])%>%
hc_add_series(name= "Wave 2020",data = data[data$year==2020,3])%>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,100))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Do you think that your current level of education is appropriate for your current job?",
align = 'left') %>%
hc_subtitle(text =paste(
#paste("\n2016 N",a(sum(marDF[marDF$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF[marDF$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF[marDF$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)[c(2:3)]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E23 LEVEL OF EDUCATION FOR JOB #####
output$E23_16 <-renderHighchart({
dmms1618<- dmms[1:2]
mar <- lapply(dmms1618, function(x){
x<-x[,c(input$BE,input$magE,"year")]
colnames(x)<-c("E23", "pop","year")
x[is.na(x)] <- -9
x<-x %>%
group_by(year, E23) %>%
filter(E23!=-9)%>%
filter(E23!=-7)%>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange(desc(E23))
x$E23<-as.factor(x$E23)
x$E23<-fct_explicit_na(x$E23)
x
})
levels(mar[["D16"]]$E23)<-c("No formal educational qualification",
"Compulsory education",
"Higher secondary education not giving access to universities",
"Vocational education and/or training",
"High school-leaving certificate giving access to universities",
"Advanced technical and professional training",
"Bachelor or equivalent",
"Master or equivalent",
"Phd Doctoral or equivalent")
levels(mar[["D18"]]$E23)<-c("No formal educational qualification",
"Compulsory education",
"Higher secondary education not giving access to universities",
"Vocational education and/or training",
"High school-leaving certificate giving access to universities",
"Advanced technical and professional training",
"Bachelor or equivalent",
"Master or equivalent",
"Phd Doctoral or equivalent")
marDF<- as.data.frame(do.call("rbind", mar))
marDF$E23<-as.factor(marDF$E23)
data<- if(input$magE2=="Absolute"){
marDF[,c("year","E23", "pop")]}else{
marDF[,c("year","E23", "prop")]}
hc_yAxis<-ifelse(input$magE=="n_nw", 2000,150000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
#hc_chart(type = "bar") %>%
hc_xAxis(categories = levels(data$E23), title = list(text = '')) %>%
#hc_add_series(name= "Wave 2016",data = data[data$year==2016,3])%>%
hc_add_series(name= "Wave 2016",data = data[data$year==2016,3])%>%
hc_add_series(name= "Wave 2018",data = data[data$year==2018,3])%>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "What type of education do you feel is most appropriate for your current job?",
align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF[marDF$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF[marDF$year==2018,"pop"])),sep=": "),
# paste("\n2020 N",a(sum(marDF[marDF$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)[c(1:2)]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E24 What are the reasons for you currently occupying a job that does not correspond to your educational level? #####
output$E24_1_16 <-renderHighchart({
dmms16<- dmms[1]
mar <- lapply(dmms16, function(x){
x<-x[,c("E24_1","E24_2","E24_3","E24_4",
"E24_5","E24_6","E24_7","E24_8","E24_9",
"E24_10","E24_11","E24_12","E24_13",
input$magE,"year")]#input$magC,
colnames(x)<-c("E24_1","E24_2","E24_3","E24_4",
"E24_5","E24_6","E24_7","E24_8","E24_9",
"E24_10","E24_11","E24_12","E24_13","pop","year")
xlong<-x %>%gather(groupz, value,c(E24_1,E24_2,E24_3,E24_4,
E24_5,E24_6,E24_7,E24_8,E24_9,
E24_10,E24_11,E24_12,E24_13),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Inadequate knowledge of one of the national languages",
"Qualifications obtained abroad are not recognized in Switzerland",
"A change of career",
"Lack of jobs with corresponding qualifications",
"Future salary improvements and promotional opportunities",
"To be able to study at the same time",
"To avoid unemployment",
"Origin, religion or social background",
"Family obligations",
"Health reasons",
"No interest in changing jobs",
"Other obstacle",
"No particular obstacles")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 750,50000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms16, function(x){
x<-x[,c("E24_1",
input$magE,"year")]#input$magC,
colnames(x)<-c("E24_1","pop","year")
xlong<-x %>%gather(groupz, value,c(E24_1),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1))
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "What are the reasons for you currently occupying a job that does not correspond to your educational level?",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
# paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
# paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### e25 FROM 0 TO 7 #####
#input<-data.frame(mag1="weight",mag3="Relative")
output$E25_16 <-renderHighchart({
#aver<-dmms[[1]]
mar <- lapply(dmms, function(x){
x<-x[,c("E25",
input$magE,"year")]#input$mag1,
colnames(x)<-c("E25","pop","year")
xlong<-x %>%gather(groupz, value,c(E25),
factor_key=FALSE)
xlong[is.na(xlong)] <- -8
x1<-xlong %>%
group_by(year,groupz,value) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,0))
#x1$prop<-round(x1$pop/unique(x1$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
# marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Knowledge and overall skills utilized in your current work")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 3000,300000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF1<-if(input$magE2=="Relative"){
marDF[,c(1,2,3,5)]
} else {
marDF[,c(1,2,3,4)]}
colnames(marDF1)<-c("groupz","year","value", "prop")
marDF1<-as.data.frame(marDF1)
rank <- highchart() %>%
hc_xAxis(categories = levels(as.factor(marDF1$value)), title = list(text = '')) %>%
hc_add_series(name= "Wave 2016",data = marDF1[marDF1$year==2016,4])%>%
hc_add_series(name= "Wave 2018",data = marDF1[marDF1$year==2018,4])%>%
hc_add_series(name= "Wave 2020",data = marDF1[marDF1$year==2020,4])%>%
hc_chart(type = 'column',zoomType= 'xy')%>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Knowledge and overall skills utilized in your current work",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF[marDF$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF[marDF$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF[marDF$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E26 What are the reasons for you currently occupying a job that does not correspond to your educational level? #####
output$E26_1_16 <-renderHighchart({
dmms16<- dmms[1]
mar <- lapply(dmms16, function(x){
x<-x[,c("E26_1","E26_2","E26_3","E26_4",
"E26_5","E26_6","E26_7","E26_8","E26_9",
"E26_10","E26_11","E26_12","E26_13",
input$magE,"year")]#input$magC,
colnames(x)<-c("E26_1","E26_2","E26_3","E26_4",
"E26_5","E26_6","E26_7","E26_8","E26_9",
"E26_10","E26_11","E26_12","E26_13","pop","year")
xlong<-x %>%gather(groupz, value,c(E26_1,E26_2,E26_3,E26_4,
E26_5,E26_6,E26_7,E26_8,
E26_6,
E26_10,E26_11,E26_12,E26_13),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Inadequate knowledge of one of the national languages",
"Qualifications obtained abroad are not recognized in Switzerland",
"A change of career",
"Lack of jobs with corresponding qualifications",
"Future salary improvements and promotional opportunities",
"To be able to study at the same time",
"To avoid unemployment",
"Origin, religion or social background",
"Family obligations",
"Health reasons",
"No interest in changing jobs",
"Other obstacle",
"No particular obstacles")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 750,50000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
mar1 <- lapply(dmms16, function(x){
x<-x[,c("E26_1",
input$magE,"year")]#input$magC,
colnames(x)<-c("E26_1","pop","year")
xlong<-x %>%gather(groupz, value,c(E26_1),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
# filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x1
})
marDF1<- as.data.frame(do.call("rbind", mar1)) #
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "What are the reasons for you currently occupying a job that does not utilize your knowledge and overall skills?",align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(marDF1[marDF1$year==2016,"pop"])),sep=": "),
# paste("\n2018 N",a(sum(marDF1[marDF1$year==2018,"pop"])),sep=": "),
#paste("\n2020 N",a(sum(marDF1[marDF1$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### e39 FROM 0 TO 7 #####
#input<-data.frame(mag1="weight",mag3="Relative")
output$E39_16 <-renderHighchart({
#aver<-dmms[[1]]
dmms1820 <- dmms[2:3]
mar <- lapply(dmms1820, function(x){
x<-x[,c("E39",
input$magE,"year")]#input$mag1,
colnames(x)<-c("E39","pop","year")
xlong<-x %>%gather(groupz, value,c(E39),
factor_key=FALSE)
xlong[is.na(xlong)] <- -8
x1<-xlong %>%
group_by(year,groupz,value) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,0))
#x1$prop<-round(x1$pop/unique(x1$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
# marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("Level of satisfaction with your current occupation")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 2500,250000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF1<-if(input$magE2=="Relative"){
marDF[,c(1,2,3,5)]
} else {
marDF[,c(1,2,3,4)]}
colnames(marDF1)<-c("groupz","year","value", "prop")
marDF1<-as.data.frame(marDF1)
rank <- highchart() %>%
hc_xAxis(categories = levels(as.factor(marDF1$value)), title = list(text = '')) %>%
# hc_add_series(name= "Wave 2016",data = marDF[marDF$year==2016,4])%>%
hc_add_series(name= "Wave 2018",data = marDF1[marDF1$year==2018,4])%>%
hc_add_series(name= "Wave 2020",data = marDF1[marDF1$year==2020,4])%>%
hc_chart(type = 'column',zoomType= 'xy')%>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Level of satisfaction with your current occupation",align = 'left') %>%
hc_subtitle(text =paste(
#paste("\n2016 N",a(sum(marDF[marDF$year==2016,"pop"])),sep=": "),
paste("\n2018 N",a(sum(marDF[marDF$year==2018,"pop"])),sep=": "),
paste("\n2020 N",a(sum(marDF[marDF$year==2020,"pop"])),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)[c(2:3)]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E27 when comparing your situation today with your situation before moving to Switzerland in ...? It has ...####
output$E27_16 <-renderHighchart({
mar <- lapply(dmms, function(x){
x<-x[,c(input$BE,input$magE,"year")]
colnames(x)<-c("E27", "pop","year")
x[is.na(x)] <- -9
x<-x %>%
group_by(year, E27) %>%
filter(E27!=-9)%>%
filter(E27!=-7)%>%
#replace_with_na(replace = list(A6 = -9))%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange(desc(E27))
x$E27<-as.factor(x$E27)
x$E27<-fct_explicit_na(x$E27)
x
})
levels(mar[["D16"]]$E27)<-c("Improved substantially",
"Improved slightly",
"Remained the same",
"Worsened slightly",
"Worsened substantially")
levels(mar[["D18"]]$E27)<-c("Improved substantially",
"Improved slightly",
"Remained the same",
"Worsened slightly",
"Worsened substantially")
levels(mar[["D20"]]$E27)<-c("Improved substantially",
"Improved slightly",
"Remained the same",
"Worsened slightly",
"Worsened substantially")
marDF<- as.data.frame(do.call("rbind", mar))
marDF$E27<-as.factor(marDF$E27)
data<- if(input$magE2=="Absolute"){
marDF[,c("year","E27", "pop")]}else{
marDF[,c("year","E27", "prop")]}
hc_yAxis<-ifelse(input$magE=="n_nw",3500,350000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
rank <- highchart() %>%
#hc_chart(type = "bar") %>%
hc_xAxis(categories = rev(levels(data$E27)), title = list(text = '')) %>%
hc_add_series(name= "Wave 2016",data = data[data$year==2016,3])%>%
hc_add_series(name= "Wave 2018",data = data[data$year==2018,3])%>%
hc_add_series(name= "Wave 2020",data = data[data$year==2020,3])%>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,75))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "Improvement in professional situation",
align = 'left') %>%
hc_subtitle(text =paste(
paste("\n2016 N",a(sum(c(mar[[1]]$pop))),sep=": "),
paste("\n2018 N",a(sum(c(mar[[2]]$pop))),sep=": "),
paste("\n2020 N",a(sum(c(mar[[3]]$pop))),sep=": "),
sep=" | "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE)%>%
hc_colors(c(gg_color_hue(3)))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
#### E40 Have you had business activities in the past 12 months with people or institutions in countries other than Switzerland?####
output$E40_16 <-renderText({
x<-"Question not included in 2016 and 2020"
})
output$E40_18 <-renderHighchart({
#fst <- lapply(dmms, function(x){
x<-dmms[["D18"]][,c("E40",input$magE,"year")]
colnames(x)<-c("E40","pop","year")
x[is.na(x)] <- -7
x<-x %>%
group_by(year,E40) %>%
filter(E40!=-7)%>%
filter(E40!=-9)%>%
filter(E40!=-8)%>%
summarise(pop=round(sum(pop),0))
# })
pie_single<- highchart() %>%
hc_chart(type = 'pie') %>%
hc_legend(enabled = TRUE) %>%
hc_plotOptions(column = list(stacking = "normal"),
series = list(dataLabels = list(enabled = TRUE,
format = '<b>{point.name}</b>: {point.percentage:.1f} %'))) %>%
hc_add_series(data = list(
list(y = round(x[1,3][[1]],2), name = "Yes"),
list(y = round(x[2,3][[1]],2), name = "No")))%>%
hc_title(text = "2018:Business activities in countries other than Switzerland", align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x[1,3][[1]],
x[2,3][[1]]))),
sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
#
hc_tooltip(pointFormat = "N: {point.y}", enabled = TRUE)
pie_single})
#### e41 COUNTRY OF bussnies ####
output$E41_1_16 <-renderText({
x<-"Question not included in 2016"
})
output$E41_1_18 <-renderHighchart({
#hom <- lapply(dmms, function(x){
x<-dmms[["D18"]][,c("E41_1",input$magE,"year")]
colnames(x)<-c("E41_1","pop","year")
x<-as.data.frame(x)
x = data.frame(x, countries[match(x[,"E41_1"],
countries[,"A3"]),c("official_title_en")])
colnames(x)[length(names(x))]<-paste("E41_1","B",sep="")
x$E41_1B<-as.factor(x$E41_1B)
x$E41_1B<-fct_explicit_na(x$E41_1B)
colnames(x)<-c("B1","pop","year","B1B")
x<-x %>%
group_by(year,B1B) %>%
filter(B1B!="(Missing)")%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange(desc(pop))%>%
head(20)
x
# })
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magD=="n_nw", 200,20000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = FALSE) %>%
hc_xAxis(categories = as.character(x$B1B), title = list(text = '')) %>%
hc_yAxis(title = list(text = 'N')) %>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Value",
data = data) %>%
hc_title(text = "2018: Top 20 countries business activities (1st choice)",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,25))%>%
hc_exporting(enabled = TRUE)%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
hc_tooltip(enabled = TRUE)
rank
})
output$E41_2_18 <-renderHighchart({
#hom <- lapply(dmms, function(x){
x<-dmms[["D18"]][,c("E41_2",input$magE,"year")]
colnames(x)<-c("E41_2","pop","year")
x<-as.data.frame(x)
x = data.frame(x, countries[match(x[,"E41_2"],
countries[,"A3"]),c("official_title_en")])
colnames(x)[length(names(x))]<-paste("E41_2","B",sep="")
x$E41_2B<-as.factor(x$E41_2B)
x$E41_2B<-fct_explicit_na(x$E41_2B)
colnames(x)<-c("B1","pop","year","B1B")
x<-x %>%
group_by(year,B1B) %>%
filter(B1B!="(Missing)")%>%
summarise(pop=round(sum(pop),0))%>%
mutate(prop=round(pop/sum(pop)*100,1))%>%
arrange(desc(pop))%>%
head(20)
x
# })
data<- if(input$magE2=="Absolute"){
c(x$pop)}else{
c(x$prop)}
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
hc_yAxis<-ifelse(input$magD=="n_nw", 200,20000)
rank <- highchart() %>%
hc_chart(type = 'bar',zoomType= 'xy') %>%
hc_legend(enabled = FALSE) %>%
hc_xAxis(categories = as.character(x$B1B), title = list(text = '')) %>%
hc_yAxis(title = list(text = 'N')) %>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
hc_add_series(name = "Value",
data = data) %>%
hc_title(text = "2018: Top 20 countries business activities (2nd choice)",
align = 'left') %>%
hc_subtitle(text = paste("\nN",a(sum(c(x$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,25))%>%
hc_exporting(enabled = TRUE)%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")%>%
hc_tooltip(enabled = TRUE)
rank
})
#### E42 What kinds of business activities have you had in the past 12 months with people and institutions in countries other than Switzerland? #####
output$E42_1_18 <-renderHighchart({
dmms16<- dmms[2]
mar <- lapply(dmms16, function(x){
x<-x[,c("E42_1","E42_2","E42_3","E42_4",
"E42_5","E42_6",
input$magE,"year")]#input$magC,
colnames(x)<-c("E42_1","E42_2","E42_3","E42_4",
"E42_5","E42_6","pop","year")
xlong<-x %>%gather(groupz, value,c(E42_1,E42_2,E42_3,E42_4,
E42_5,E42_6),
factor_key=FALSE)
x1<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
filter(value!=2)%>%
summarise(pop=round(sum(pop),0))
x2<-xlong %>%
group_by(year,groupz) %>%
filter(value!=-7)%>%
filter(value!=-9)%>%
filter(value!=-8)%>%
summarise(pop=round(sum(pop),0))
x1$prop<-round(x1$pop/unique(x2$pop)*100,1)
x1
})
marDF<- as.data.frame(do.call("rbind", mar)) #CATS 1,2,4,5,6
marDF<-marDF %>% complete(groupz, nesting(year))
marDF$groupz<-as.factor(marDF$groupz)
#levels( marDF$groupz)
#marDF$groupz<-factor(marDF$groupz, levels=c("B27_1","B27_2","B27_3",
# "B27_4","B27_5","B27_6"))
levels(marDF$groupz)<-c("I import goods produced abroad",
"I have people located abroad who do jobs for me (paid or unpaid)",
"I have financial support from abroad for my business activities",
"I sell goods / services abroad (myself and / or via internet)",
"I receive information, training, or counseling services from abroad, including via the internet",
"Other")
marDF[is.na(marDF)] <- 0
marDF<-with(marDF, marDF[order(groupz),])
hc_yAxis<-ifelse(input$magE=="n_nw", 500,50000)
formatter<- ifelse(input$magE2=="Relative","function(){ return Math.abs(this.value) + '%'; }",
"function(){ return Math.abs(this.value); }")
# marDF<-as.data.frame(marDF)
#marDF<-ifelse(input$mag3=="Relative","prop","pop")
marDF<-if(input$magE2=="Relative"){
marDF[,c(1,2,4)]
} else {
marDF[,c(1,2,3)]}
colnames(marDF)<-c("groupz","year","prop")
marDF$year<-paste("Wave ",marDF$year, sep="")
rank <-marDF %>%
hchart('bar', hcaes(x = 'groupz', y = 'prop', group = 'year')) %>%
hc_legend(enabled =TRUE ) %>%
hc_yAxis(title = list(text = '')) %>%
hc_xAxis(title = list(text = '')) %>%
hc_yAxis(labels = list(formatter = JS(formatter)),
min=ifelse(input$magE2=="Absolute",0,0),
max=ifelse(input$magE2=="Absolute",hc_yAxis,50))%>%
hc_plotOptions(series = list(dataLabels = list(enabled = TRUE))) %>%
#hc_add_series(name = "Value", data = data) %>%
hc_title(text = "What kinds of business activities have you had in the past 12 months with people and institutions in countries other than Switzerland?",align = 'left') %>%
hc_subtitle(text = paste("\n2018 N",a(sum(c(mar[[1]]$pop))),sep=": "))%>%
hc_add_theme(hc_theme_smpl()) %>%
hc_exporting(enabled = TRUE)%>%
hc_tooltip(enabled = TRUE) %>%
hc_colors(c(gg_color_hue(3)[2]))%>%
hc_credits(enabled = TRUE, text = "NCCR ON THE MOVE",href = "https://nccr-onthemove.ch/")
rank
})
|
4901fa00eb8cfcf3558222071ed0350420b26aef
|
04a7e4899d9aac6d1dbb0c37a4c45e5edb4f1612
|
/man/conv_type_1_table_lines.Rd
|
6c943b0b31ba7aa76382f8fd8b3042312b11b625
|
[
"MIT"
] |
permissive
|
pbs-assess/csasdown
|
796ac3b6d30396a10ba482dfd67ec157d7deadba
|
85cc4dda03d6513c11350f7f607cce1cacb6bf6a
|
refs/heads/main
| 2023-08-16T17:22:18.050497
| 2023-08-16T00:35:31
| 2023-08-16T00:35:31
| 136,674,837
| 47
| 18
|
NOASSERTION
| 2023-06-20T01:45:07
| 2018-06-08T23:31:16
|
R
|
UTF-8
|
R
| false
| true
| 2,425
|
rd
|
conv_type_1_table_lines.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conv-type-1-table-lines.R
\name{conv_type_1_table_lines}
\alias{conv_type_1_table_lines}
\title{Convert an Rmarkdown type 1 table into a simplified form with WYSIWYG
newlines}
\usage{
conv_type_1_table_lines(chunk)
}
\arguments{
\item{chunk}{A vector of character strings representing lines for RMD code}
}
\value{
A list of two elements, which are the modified type 1 table and the
rest of the chunk starting with the line after the end of the table
}
\description{
Convert an Rmarkdown type 1 table into a simplified form with WYSIWYG
newlines. If there is no table detected, NULL will be returned as the
converted chunk, or an error will be thrown depending on the situation.
}
\details{
A type 1 table is defined as a manually-entered Rmarkdown table with a
minimum of five lines with this format:
\if{html}{\out{<div class="sourceCode">}}\preformatted{----- ----- -----
a b c
----- ----- -----
abc def ghi
----- ----- -----
}\if{html}{\out{</div>}}
There is no whitespace in between any of these lines. The first text row is
the column headers for the table and the second text row is the table data.
The second row can be multiple rows separated by an arbitrary number of
blank lines, but there cannot be blank lines before the table data or after.
Here is an example with three table data rows in acceptable format, along
with the optional table caption text which must start with 'Table:'
\if{html}{\out{<div class="sourceCode">}}\preformatted{----- ----- -----
a b c
----- ----- -----
abc def ghi
jkl mno pqr
stu vwx yza
----- ----- -----
Table: Table caption (0 or more blank lines between table and this caption)
A second line of table caption here (no blank lines in between)
}\if{html}{\out{</div>}}
}
\examples{
\dontrun{
chunk <- c("---------- -----------", " Parameter Value",
"---------- -----------", " x 1.0",
" y 2.2", "---------- -----------")
tmp <- csasdown:::conv_type_1_table_lines(chunk)
the_rest <- tmp[[2]]
}
}
\seealso{
Other rmd_conversion_functions:
\code{\link{conv_blank_lines}()},
\code{\link{conv_header_lines}()},
\code{\link{conv_list_lines}()},
\code{\link{conv_paragraph_lines}()},
\code{\link{conv_type_2_table_lines}()},
\code{\link{convert_newlines_rmd}()}
}
\concept{rmd_conversion_functions}
\keyword{internal}
|
b4caa56b693969d007bff291a98b76cfbdadd012
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lcyanalysis/examples/up_function.Rd.R
|
8b9373de06769ac927b0ec6cd0d16ac574614148
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 224
|
r
|
up_function.Rd.R
|
library(lcyanalysis)
### Name: up_function
### Title: up trend line up function
### Aliases: up_function
### ** Examples
library(quantmod)
aapl<-getSymbols("AAPL",src="yahoo",auto.assign=FALSE)
up_function(aapl,20,1)
|
fea0bbbb769d69129c3ae995b19ee0183f816ccf
|
2717925e436d4740eb5aef564b037f4cac45e64a
|
/tests/testthat/test_tmux.R
|
c03416703127b72701b10dc3938b89ee3552b4ce
|
[
"BSD-2-Clause"
] |
permissive
|
mllg/gtfo
|
5415faca17469cb5ede6dbbe86f4ee89d44b24f0
|
2541e68b6f76f94f37b55870029779a274452801
|
refs/heads/master
| 2020-03-18T13:17:35.653943
| 2018-05-31T11:40:02
| 2018-05-31T11:40:02
| 134,774,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
test_tmux.R
|
context("tmux")
test_that("tmux is detected", {
withr::with_envvar(list(TMUX = "foo"), {
expect_is(find_tmux(), "cmd")
})
withr::with_envvar(list(TMUX = ""), {
expect_null(find_tmux())
})
})
|
a080471b0936242dc5c21d6ad8cd754f64047f11
|
a188192e32330117438c7bb2b4681ff515b4e775
|
/man/cleanQMJ10QMonthly.Rd
|
721c5c79d443ef014620e505c23299e982081db0
|
[] |
no_license
|
jrs9/FFAQR
|
6e5e80efced864c9f3f1ad0bbf0882878d8b1def
|
fd4f45a4a83899112ee8676113990c5e92c2c131
|
refs/heads/master
| 2016-09-05T20:20:01.386675
| 2015-01-28T15:17:18
| 2015-01-28T15:17:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 372
|
rd
|
cleanQMJ10QMonthly.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/cleanQMJ10QMonthly.R
\name{cleanQMJ10QMonthly}
\alias{cleanQMJ10QMonthly}
\title{Reads in, cleans, and subdivides daily QMJ10QMonthly data set in data folder.}
\usage{
cleanQMJ10QMonthly()
}
\description{
Reads in, cleans, and subdivides daily QMJ10QMonthly data set in data folder.
}
|
0ce122499fc5fcbfba23cad93dd1688ae689a39e
|
2dcdabb1e68f1068000ddc3bfcbeec4cc7386e21
|
/man/signal_func.Rd
|
c06bf6b77522675a9859cc664c7b7669e61b1aca
|
[
"MIT"
] |
permissive
|
dozmorovlab/preciseTAD
|
026a98aab76f46cd8df3301020c996677a224d01
|
4b71866911061eb3bf2f79584bc7ff219c7bae16
|
refs/heads/master
| 2023-05-23T21:24:02.386063
| 2022-04-22T01:47:12
| 2022-04-22T01:47:12
| 278,137,960
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 501
|
rd
|
signal_func.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signal_func.R
\name{signal_func}
\alias{signal_func}
\title{Helper function used to create signal type feature space}
\usage{
signal_func(binned_data_gr, annot_data_gr)
}
\arguments{
\item{binned_data_gr}{A GRanges object}
\item{annot_data_gr}{A GRanges object}
}
\value{
A vector of intensities indicating the signal strength within each
overlap
}
\description{
Helper function used to create signal type feature space
}
|
eae536ec6c8b4ffd625133731699aa262c3fe2a8
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/src/library/tools/man/undoc.Rd
|
6ad76ec7bf4707c89e8f576d3e862101baeee870
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 2,143
|
rd
|
undoc.Rd
|
% File src/library/tools/man/undoc.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2007 R Core Team
% Distributed under GPL 2 or later
\name{undoc}
\alias{undoc}
\alias{print.undoc}
\title{Find Undocumented Objects}
\usage{
undoc(package, dir, lib.loc = NULL)
}
\description{
Finds the objects in a package which are undocumented, in the sense
that they are visible to the user (or data objects or S4 classes
provided by the package), but no documentation entry exists.
}
\arguments{
\item{package}{a character string naming an installed package.}
\item{dir}{a character string specifying the path to a package's root
source directory. This must contain the subdirectory \file{man}
with \R documentation sources (in Rd format), and at least one of
the \file{R} or \file{data} subdirectories with \R code or data
objects, respectively.}
\item{lib.loc}{a character vector of directory names of \R libraries,
or \code{NULL}. The default value of \code{NULL} corresponds to all
libraries currently known. The specified library trees are used to
search for \code{package}.}
}
\details{
This function is useful for package maintainers mostly. In principle,
\emph{all} user-level \R objects should be documented.
The \pkg{base} package is special as it contains the primitives and
these do not have definitions available at code level. We provide
equivalent closures in environments \code{.ArgsEnv} and
\code{.GenericArgsEnv} in the \pkg{base} package that are used
for various purposes: \code{undoc("base")} checks that all the
primitives that are not language constructs are prototyped in those
environments and no others are.
}
\value{
An object of class \code{"undoc"} which is a list of character vectors
containing the names of the undocumented objects split according to
documentation type.
There is a \code{print} method for nicely displaying the information
contained in such objects.
}
\seealso{
\code{\link{codoc}}, \code{\link{QC}}
}
\examples{
undoc("tools") # Undocumented objects in 'tools'
}
\keyword{documentation}
|
01ea3a22e222be0e0170b5f8598ca1a115bfff5d
|
94cfb0deb1dad7940a3ba4c5d7c7d60e293a0bc5
|
/TestFunctions/test_ggplot2.R
|
71e51ae6f065cfbed67773973ed4e9d26c5f90eb
|
[] |
no_license
|
fredcommo/CGHObjectsSources
|
13334364b625d43e965700edca16f450f3cd9284
|
613143d5ebd30ff290dc0b31d3136bd032925b8c
|
refs/heads/master
| 2020-03-29T09:35:34.126976
| 2013-10-19T18:52:18
| 2013-10-19T18:52:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,774
|
r
|
test_ggplot2.R
|
locateChr <- function(y){
colText = 'grey40'
colLines = 'grey80'
arrayInfoPath = '/Users/fredcommo/Documents/Projet Safir/Arrays Infos/'
hg19 <- read.csv(paste(arrayInfoPath, 'human.chrom.info.hg19.FC.txt', sep = ''), header = TRUE, sep = '\t')
cumLen = cumsum(as.numeric(hg19$length))
cumCentr <- 1/2*cumLen[1]
for(chr in 2:length(cumLen)) cumCentr = c(cumCentr, cumLen[chr-1] + 1/2*hg19$length[chr])
+ geom_hline(yintercept = 0)
+ geom_vline(xintercept = cumLen[1:23], color = 'grey30', linetype = 2, size = 0.1)
# annotate(0, y, labels = "chr", cex = 0.75, col = colText)
# ltext(cumLen[1]/2, y, labels = 1, cex = 0.75, col = colText)
# for(i in 2:length(cumCentr)){
# x <- (hg19$length[i]/2 + cumLen[i-1])
# ltext(x, y, labels = i, cex = 0.75, col = colText)
# }
}
createProfile <- function(object, gain = log2(2.25/2), loss = log2(1.80/2)){
require(ggplot2, quietly = TRUE)
wrapper <- function(x, ...) paste(strwrap(x, ...), collapse = "\n")
hg19 <- read.csv(paste0(arrayInfoPath, 'human.chrom.info.hg19.FC.txt'), header = TRUE, sep = '\t')
cumLen = cumsum(as.numeric(hg19$length))
cumCentr <- 1/2*cumLen[1]
for(chr in 2:length(cumLen)) cumCentr = c(cumCentr, cumLen[chr-1] + 1/2*hg19$length[chr])
cnSet = getCNset(object)
segTable = getSegTable(object)
Title = paste(getInfo(object, 'sampleId'), '-', getInfo(object, 'analyseDate'),
'\nGain threshold: ', round(gain, 3), ' Loss threshold:', round(loss, 3))
if(any(is.na(cnSet))){
NAs <- c()
for(i in 1:ncol(cnSet))
NAs <- c(NAs, which(is.na(cnSet[,i])))
NAs <- unique(NAs)
cnSet <- cnSet[-NAs,]
}
Samp = seq(1, nrow(cnSet), len = 15e3)
cnSet = cnSet[Samp,]
cnSet <- cbind.data.frame(cnSet, rMed = runmed(cnSet$Log2Ratio, k=71))
gPlot <- ggplot(data = cnSet, aes(x = genomicPos, y = Log2Ratio)) +
geom_point(pch = 19, cex = 0.25, col = 'grey80') +
geom_hline(yintercept = 0) +
geom_vline(xintercept = cumLen[1:23], color = 'grey30', linetype = 2, size = 0.1) +
annotate('text', x = c(0, cumCentr), y = rep(1.4, 25), label = c("Chr", seq(1, 24)), size = 2.5, colour = 'grey30') +
geom_point(aes(x = genomicPos, y = rMed), cex = 0.25, type = 'l') +
geom_point(aes(x = genomicPos, y = Segm), cex = 1,
col = ifelse(cnSet$Segm<= loss, 'red3', ifelse(cnSet$Segm>= gain, 'dodgerblue2', 'black'))) +
coord_cartesian(ylim = range(-1.5, 1.5))+
ggtitle(Title) +
theme_bw() +
theme( panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(lineheight=.8, face="bold"))
object@gProfile <- gPlot
return(object)
}
tagGene <- function(object, gain = log2(2.25/2), loss = log2(1.80/2), tag = NULL){
if(!'geneDB' %in% ls()){
arrayInfoPath = '/Users/fredcommo/Documents/Projet Safir/Arrays Infos/'
geneDB <- readRDS(paste0(arrayInfoPath, 'myGeneDB.rds'))
}
if(!is.null(tag)){
segTable = getSegTable(object)
tmp <- geneDB[which(geneDB$NomenclatureSymbol == tag),]
LR <- segTable$seg.med[which(segTable$loc.start<= tmp$genomicStart & segTable$loc.end >= tmp$genomicStop)]
Col <- ifelse(LR<= loss, 'red3', ifelse(LR>= gain, 'dodgerblue2', 'black'))
dat <- data.frame(xstart = c(tmp$genomicStart-5e7, tmp$genomicStart),
xend = c(tmp$genomicStart, tmp$genomicStart),
ystart = c(1, 1),
yend = c(1, LR),
Col = rep(Col, 2))
getProfile(object) +
annotate("text", x = tmp$genomicStart-1.5e8, y = 1, label = paste0(tmp$NomenclatureSymbol, '\n(Log2R = ', round(LR, 3), ')'), cex = 5) +
geom_segment(data = dat, aes(x = xstart, y = ystart, xend = xend, yend = yend), colour = Col)
}
else getProfile(object)
}
tagGene(object6, tag = 'EGFR')
myPlot
annotate("text", 2e9, 1, label = 'another gene', cex = 5)
|
f893074243fc061f4454f5620caa4557b9765aa3
|
aa7dd0af20f44056cff5da956eb6911c232a77ce
|
/man/plot.FDAimage.Rd
|
ff063abbc4512377c99d0aed8cf2dcb308b98bc7
|
[] |
no_license
|
FIRST-Data-Lab/FDAimage
|
b5f92040c90c90ed8afb45af06471cb204e7b7c1
|
82def691e3089d0a056d1dd1f869385b68aacd8b
|
refs/heads/master
| 2022-02-27T16:40:13.924022
| 2019-10-11T01:43:08
| 2019-10-11T01:43:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 656
|
rd
|
plot.FDAimage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.FDAimage.R
\name{plot.FDAimage}
\alias{plot.FDAimage}
\title{Produces coefficient function plots for a fitted "FDAimage" object.}
\usage{
\method{plot}{FDAimage}(mfit)
}
\arguments{
\item{mfit}{Fitted ``FDAimage" object.
\cr}
}
\value{
None
}
\description{
This function produces the images of the estimated coefficient function for image-on-scalar regression.
}
\details{
This R package is the implementation program for manuscript entitled "Multivariate Spline Estimation and Inference for Image-on-Scalar Regression" by Shan Yu, Guannan Wang, Li Wang and Lijian Yang.
}
|
ed0679cabe9fc6e20c5bd6b7a002d75a6e422dea
|
7e2366414e85d27c7487944dc0ac64a6a87e216b
|
/Predicted_observed.R
|
b8568f1968e9254cbb9153238cecf790495cf6a0
|
[] |
no_license
|
JanBaert/Per-capita-interactions-and-stress-tolerance-drive-stress-induced-changes-in-biodiversity-effects
|
f54738078ad971e650c885da4e4ead5d897558dc
|
eefc27602c8dbcbbbb9ac4ef33c134c32781c695
|
refs/heads/master
| 2021-01-09T20:53:24.371084
| 2016-06-30T11:57:55
| 2016-06-30T11:57:55
| 58,455,402
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,561
|
r
|
Predicted_observed.R
|
# set working directory
setwd("/Users/Jan/Documents/UGent/Data/Experiment 1/Supporting information")
# load packages and data
require(deSolve)
load("data_biov.RData")
# load vector with the order in which parameters were estimated
load("compositions_list.RData")
# load model precitions
load("MC_out.RData")
load("M_out.RData")
# calculate predicted and observed relative species yields
time <- c(7,14,21,28)
Y_pred_obs <- array(dim=c(length(compositions)*8*2*4,4,5))
RY_pred_obs <- array(dim=c(length(compositions)*8*2*4,4,5))
for(scen in 1:5){
for(comp in compositions[1:31]){
spec <- strsplit(toString(comp),"/",fixed=TRUE)[[1]]
n_spec <- length(spec)
# obeserved yields
data_0 <- data_biov[which(data_biov$composition==comp&data_biov$concentration==0),c(2,13:20)]
ind <- which(data_0[1,2:9]!=0)
data_0 <- data_0[,c(1,1+which(data_0[1,c(2:9)]!=0))]
data_0 <- t(sapply(time,function(x) apply(data_0[which(data_0[,1]==x),2:(n_spec+1)],2,mean)))
data_250 <- data_biov[which(data_biov$composition==comp&data_biov$concentration==250),c(2,13:20)]
data_250 <- data_250[,c(1,1+which(data_250[1,c(2:9)]!=0))]
data_250 <- t(sapply(time,function(x) apply(data_250[which(data_250[,1]==x),2:(n_spec+1)],2,mean)))
Y_obs_0 <- data_0
Y_obs_250 <- data_250
RY_obs_0 <- t(sapply(1:nrow(data_0),function(x) data_0[x,]/sum(data_0[x,])))
RY_obs_250 <- t(sapply(1:nrow(data_250),function(x) data_250[x,]/sum(data_250[x,])))
#predicted realtive yields
pred_0 <- MC_out[which(compositions==comp),ind,,which(c(0,25,250)==0),,scen]
pred_0 <- t(apply(pred_0,c(1,2),median))
pred_250 <- MC_out[which(compositions==comp),ind,,which(c(0,25,250)==250),,scen]
pred_250 <- t(apply(pred_250,c(1,2),median))
Y_pred_0 <- t(sapply(1:length(time),function(x) pred_0[x,]))
Y_pred_250 <- t(sapply(1:length(time),function(x) pred_250[x,]))
RY_pred_0 <- t(sapply(1:length(time),function(x) pred_0[x,]/sum(pred_0[x,])))
RY_pred_250 <- t(sapply(1:length(time),function(x) pred_250[x,]/sum(pred_250[x,])))
BC_0 <- sapply(1:nrow(Y_pred_0),function(x) sum(abs(Y_obs_0[x,]-Y_pred_0[x,]))/(sum(Y_obs_0[x,]+Y_pred_0[x,])))
BC_250 <- sapply(1:nrow(Y_pred_0),function(x) sum(abs(Y_obs_250[x,]-Y_pred_250[x,]))/(sum(Y_obs_250[x,]+Y_pred_250[x,])))
BC <- rbind(sum(BC_0)/4,sum(BC_250)/4)
pred_0 <- rowSums(pred_0)
pred_250 <- rowSums(pred_250)
resp_pred <- ((pred_0-pred_250)/pred_0)
Y_pred_obs[(which(is.na(Y_pred_obs[,1,scen]))[1]+c(0:(2*length(spec)*length(time)-1))),,scen] <- cbind(as.vector(c(Y_obs_0,Y_obs_250)),as.vector(c(Y_pred_0,Y_pred_250)),rep(length(spec),2*length(spec)*length(time)),c(rep(0,length(spec)*length(time)),rep(250,length(spec)*length(time))))
RY_pred_obs[(which(is.na(RY_pred_obs[,1,scen]))[1]+c(0:(2*length(spec)*length(time)-1))),,scen] <- cbind(as.vector(c(RY_obs_0,RY_obs_250)),as.vector(c(RY_pred_0,RY_pred_250)),rep(length(spec),2*length(spec)*length(time)),c(rep(0,length(spec)*length(time)),rep(250,length(spec)*length(time))))
}}
# Supplementray Figure 2
colours=c("black","grey")
quartz(width=13,height=6)
layout(matrix(c(1:10,11,11,11,11,11),byrow=T,nrow=3),height=c(.45,.45,.06))
par(oma=rep(1,4),mai=c(.55,.75,.5,.0))
for(i in 1:5){
plot(0,0,xlim=c(1,10),ylim=c(1,10),xlab=expression(paste("Observed density (mm"^3,paste(" L"^-1,")"))),ylab=expression(paste("Predicted density (mm"^3,paste(" L"^-1,")"))),cex.lab=1.1,cex.axis=1.2,main=paste("Scenario ",i),cex.main=1.5)
for(c in c(0,250)){
points(log10(Y_pred_obs[which(Y_pred_obs[,4,i]==c),1,i]),log10(Y_pred_obs[which(Y_pred_obs[,4,i]==c),2,i]),col=colours[which(c(0,250)==c)],cex=.3)}
abline(0,1,lty=2)}
par(mai=c(.55,.75,.5,.0))
for(i in 1:5){
plot(0,0,xlim=c(0,1),ylim=c(0,1),main="",xlab="Observed relative abundance (-)",ylab="Predicted relative abundance (-)",cex.lab=1.1,cex.axis=1.2)
for(c in c(0,250)){
points(RY_pred_obs[which(RY_pred_obs[,4,i]==c),1,i],RY_pred_obs[which(RY_pred_obs[,4,i]==c),2,i],col=colours[which(c(0,250)==c)],cex=.3)}
abline(0,1,lty=2)}
par(mai=rep(0,4))
plot.new()
legend("bottom",c(expression(paste("0 ",paste(mu,"g L"^-1))),expression(paste("250 ",paste(mu,"g L"^-1)))),pch=16,col=colours,horiz=T,bty="n",cex=1.2)
|
1aae58d897723711509c48f3d00cb31d0e077089
|
3be81a994b1ba51fb5fad7ef9bf2bde7ef039e44
|
/man/allCounty.Rd
|
6dcb8d209099c2497c1d8f62b205fab5f27c1f50
|
[] |
permissive
|
mthomas-ketchbrook/tidyUSDA
|
73e8b67f06b64725105f3aacf407a7b3273e9374
|
bcf918743d3b726337576791a297498ba1468b04
|
refs/heads/master
| 2020-07-11T09:22:18.022960
| 2019-09-11T16:21:53
| 2019-09-11T16:21:53
| 204,502,010
| 0
| 0
|
MIT
| 2019-09-11T16:21:54
| 2019-08-26T15:10:29
|
R
|
UTF-8
|
R
| false
| true
| 378
|
rd
|
allCounty.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-allcounty.R
\docType{data}
\name{allCounty}
\alias{allCounty}
\title{All possible values from the COUNTY field.}
\format{A vector with 1 variable}
\source{
\url{https://quickstats.nass.usda.gov}
}
\usage{
allCounty
}
\description{
All possible values from the COUNTY field.
}
\keyword{datasets}
|
223b151edff0870ede66dcd528be978cd7dbbe51
|
198f171a9d02f67e8e6cbf58303583dc2b5f656c
|
/plot4.R
|
3b95becde8276b1725ebc686b9356090ce6848b1
|
[] |
no_license
|
sorenlind/ExData_Plotting1
|
653d4f429ca9d14624b358606f114ab29da7a7e6
|
5f622551cfbb1502d967f8cfab1db0bb6377e718
|
refs/heads/master
| 2020-12-25T08:37:41.486341
| 2015-08-09T17:01:53
| 2015-08-09T17:01:53
| 40,360,954
| 0
| 0
| null | 2015-08-07T13:07:37
| 2015-08-07T13:07:36
| null |
UTF-8
|
R
| false
| false
| 1,020
|
r
|
plot4.R
|
source('./common.R')
# Read the data used for plotting
plotData <- readData()
# Open PNG device
png(file = "plot4.png", height = 480, width = 480)
# Make space for 4 plots in one and set transparent background color
par(mfrow = c(2, 2), bg = NA)
# Create the plot
with(plotData, {
# Top left plot
plot(Global_active_power ~ DateTime, type = "l", xlab = "", ylab = "Global Active Power")
# Top right plot
plot(Voltage ~ DateTime, type = "l", xlab = "datetime", ylab = "Voltage")
# Bottom left plot
plot(Sub_metering_1 ~ DateTime, type = "l", xlab = "", ylab = "Energy sub metering")
lines(Sub_metering_2 ~ DateTime, col = "red")
lines(Sub_metering_3 ~ DateTime, col = "blue")
legend("topright",
col=c("black", "red", "blue"),
lty = 1,
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Bottom right plot
plot(Global_reactive_power~DateTime, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
#Shut down PNG device
dev.off()
|
6f87d079ee0a18600a9377f842648ba847b1c96f
|
4d449c0a0aba578bd843499661325a5b95c6c745
|
/master_scripts/air_vars.R
|
c390e9828a691a10acc6a014eaf1661492b5a28e
|
[] |
no_license
|
CourtneyCampany/selaginella
|
b90df01b1fb52d7352d91cea7a2376125e6cb54a
|
f68c72c99e8a97afa961764599cd496605fe8a5e
|
refs/heads/master
| 2021-06-30T11:05:27.493146
| 2019-05-13T17:25:53
| 2019-05-13T17:25:53
| 104,884,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,981
|
r
|
air_vars.R
|
source("master_scripts/plot_objects.R")
##micro climate data for site conditions for selaginella
clim <- read.csv("raw_data/microclimate.csv", stringsAsFactors = FALSE)
library(lubridate)
clim$datetime <- mdy_hm(clim$datetime, tz='UTC')
clim$site <- as.factor(clim$site)
library(plantecophys)
clim$vpd <- RHtoVPD(RH=clim$rh_perc, TdegC=clim$temp_C, Pa=101)
#max diff for discussion among habitats
# open_max_t <- max(clim[clim$site == "sel_umb", 4])
# open_max_vpd <- max(clim[clim$site == "sel_umb", 7])
#
# open_cc_t <- max(clim[clim$site == "sel_anc", 4])
# open_cc_vpd <- max(clim[clim$site == "sel_anc", 7])
#
# open_swamp_t <- max(clim[clim$site == "sel_oxa", 4])
# open_swamp_vpd <- max(clim[clim$site == "sel_oxa", 7])
#
# tdiff <- (open_max_t - (open_cc_t+open_swamp_t)/2) / open_max_t
# vdiff <- (open_max_vpd - (open_cc_vpd+open_swamp_vpd)/2) / open_max_vpd
# plot objects ------------------------------------------------------------
startday <- min(clim$datetime)
enddate <- max(clim$datetime)
enddate2 <- as.POSIXct("2011-07-01 14:00:00", tz="UTC")
# xAT <- seq(from=startday,to=enddate, by="hour", tz="UTC")
xAT2 <- seq(from=startday,to=enddate2, length.out = 10, tz="UTC")
cols <- c("red3","gray15","cornflowerblue","forestgreen")
cols2 <- c("red3","cornflowerblue","forestgreen")
sites <- c("Open Canopy/Full Sun", "Closed Canopy/Low Light","Swamp/Low Light")
# plot all data----------------------------------------------------------------
# windows(7,10)
# par(mfrow=c(3,1), las=1, cex.axis=1.21, cex.lab=1.51, mgp=c(2.5,1,0),oma=c(4, 0, 1,0),
# lwd=2)
#
# #1: temp
# par(mar=c(0,5,0,1))
# par(lwd=2)
# plot(temp_C ~ datetime, data=clim[clim$site=="sel_anc",], ylim=c(20, 32),
# type='l',col="cornflowerblue",lwd=2, lty=1, xlab="", ylab=templab)
# points(temp_C ~ datetime, data=clim[clim$site=="sel_eur",], col="gray15", type='l')
# points(temp_C ~ datetime, data=clim[clim$site=="sel_oxa",], col="forestgreen", type='l')
# points(temp_C ~ datetime, data=clim[clim$site=="sel_umb",], col="red3", type='l')
# axis.POSIXct(1, at=xAT2, format = "%H:%M:%S", labels=FALSE)
# legend("topright",col=cols,lty=1,legend=sites2,inset=.01, bty='n',cex=1.1)
#
# #2. dewpoint
# par(mar=c(0,5,0,1))
# plot(dewpoint_C ~ datetime, data=clim[clim$site=="sel_anc",], ylim=c(10, 30),
# type='l',col="cornflowerblue",lwd=2, lty=1, xlab="", ylab=dewlab)
# points(dewpoint_C ~ datetime, data=clim[clim$site=="sel_eur",], col="gray15", type='l')
# points(dewpoint_C ~ datetime, data=clim[clim$site=="sel_oxa",], col="forestgreen", type='l')
# points(dewpoint_C ~ datetime, data=clim[clim$site=="sel_umb",], col="red3", type='l')
# axis.POSIXct(1, at=xAT2, format = "%H:%M:%S", labels=FALSE)
#
# #3. vpd
# par(mar=c(4,5,0,1))
# plot(vpd ~ datetime, data=clim[clim$site=="sel_anc",], ylim=c(0, 1.5),
# type='l',col="cornflowerblue",lwd=2, lty=1, xlab="", ylab=vpdlab, xaxt='n')
# points(vpd ~ datetime, data=clim[clim$site=="sel_eur",], col="gray15", type='l')
# points(vpd ~ datetime, data=clim[clim$site=="sel_oxa",], col="forestgreen", type='l')
# points(vpd ~ datetime, data=clim[clim$site=="sel_umb",], col="red3", type='l')
# axis.POSIXct(1, at=xAT2, format = "%H:%M:%S", las=3)
# plot one day ------------------------------------------------------------
friday <- clim[clim$datetime >= "2011-06-30 02:00:00" & clim$datetime <= "2011-06-30 18:00:00",]
startfri <- min(friday$datetime)
endfri <- max(friday$datetime)
startcex <- strptime("2011-06-30 05:45:00", tz= "UTC", format= "%Y-%m-%d %H:%M:%S")
# enddate2 <- as.POSIXct("2011-07-01 14:00:00", tz="UTC")
xAT_fri <- seq(from=startfri,to=endfri, by="hour", tz="UTC")
#omit closed canopy-open (messed up humiity values)
#png(filename = "output/airvars.png", width = 11, height = 8.5, units = "in", res= 400)
jpeg(filename = "output/manuscript_figures/Figure_1.jpeg",
width = 6.8, height = 9, units = "in", res= 300)
# setEPS()
# postscript("output/manuscript_figures/Figure_1..eps")
# windows(7,10)
par(mfrow=c(3,1), las=1, cex.axis=1.21, cex.lab=1.51, mgp=c(2.5,1,0),oma=c(4, 0, 1,0),
lwd=2)
#1: temp
par(mar=c(0,5,0,1))
par(lwd=2)
plot(temp_C ~ datetime, data=friday[friday$site=="sel_anc",], ylim=c(22, 32),
type='l',col="cornflowerblue",lwd=2, lty=1, xlab="", ylab=templab, xaxt='n')
#points(temp_C ~ datetime, data=friday[friday$site=="sel_eur",], col="gray15", type='l')
points(temp_C ~ datetime, data=friday[friday$site=="sel_oxa",], col="forestgreen", type='l')
points(temp_C ~ datetime, data=friday[friday$site=="sel_umb",], col="red3", type='l')
axis.POSIXct(1, at=xAT_fri, format = "%H:%M:%S", labels=FALSE)
legend("topright",col=cols2,lty=1,legend=sites,inset=.01, bty='n',cex=1.25)
text("A", x=startcex, y= 31.5, cex=2)
#2. dewpoint
par(mar=c(0,5,0,1))
plot(dewpoint_C ~ datetime, data=friday[friday$site=="sel_anc",], ylim=c(20, 30),
type='l',col="cornflowerblue",lwd=2, lty=1, xlab="", ylab=dewlab,xaxt='n')
#points(dewpoint_C ~ datetime, data=friday[friday$site=="sel_eur",], col="gray15", type='l')
points(dewpoint_C ~ datetime, data=friday[friday$site=="sel_oxa",], col="forestgreen", type='l')
points(dewpoint_C ~ datetime, data=friday[friday$site=="sel_umb",], col="red3", type='l')
axis.POSIXct(1, at=xAT_fri, format = "%H:%M:%S", labels=FALSE)
text("B", x=startcex, y= 29.5, cex=2)
#3. vpd
par(mar=c(4,5,0,1))
plot(vpd ~ datetime, data=friday[friday$site=="sel_anc",], ylim=c(0, 0.5),
type='l',col="cornflowerblue",lwd=2, lty=1, xlab="", ylab=vpdlab, xaxt='n')
#points(vpd ~ datetime, data=friday[friday$site=="sel_eur",], col="gray15", type='l')
points(vpd ~ datetime, data=friday[friday$site=="sel_oxa",], col="forestgreen", type='l')
points(vpd ~ datetime, data=friday[friday$site=="sel_umb",], col="red3", type='l')
axis.POSIXct(1, at=xAT_fri, format = "%H:%M:%S", las=3)
text("C", x=startcex, y= 0.46, cex=2)
#text was 1.25 and legend was 1.1
# dev.copy2pdf(file= "output/airvars.pdf")
dev.off()
|
6928f5553b051864c4784a5b9f1057d7268cdbde
|
27771fb1e6ede18171011e29c028f955238c3bd9
|
/R/FWI_ERA5_grab.R
|
21f0a457096da6eddec3026a0b08a9ab9a300f6f
|
[] |
no_license
|
xynpocari/DCCW
|
1eb84de7673c55134a8e6686b6982558a0a8dfb7
|
775ebf992f192a849ac43622e6c3ba44f1f20ea4
|
refs/heads/main
| 2023-08-07T09:19:25.677117
| 2021-10-04T22:39:07
| 2021-10-04T22:39:07
| 356,772,041
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,180
|
r
|
FWI_ERA5_grab.R
|
#' Grab FWI metrics
#'
#' Extract daily rasters for Fire Weather Indices calculated using ERA5 Reanalysis data to a user-defined date range and spatial extent.
#'
#' @param variable A string or vector of FWI metrics. E.g. \code{c('FFMC','DMC','DC','ISI','BUI','FWI')}.
#' @param overwinter_DC Logical. Should FWI metrics using overwintered DC be extracted? Default is \code{TRUE}. If \code{FALSE}, FWI metrics using default DC startup is extracted (see details).
#' @param start_date Date object or character. Start date of data extraction, using UTC. E.g."2018-05-01".
#' @param end_date Date object or character. Start date of data extraction, using UTC. E.g."2018-05-01". Must have the same year as start_date.
#' @param extent An sp or sf object used to define the extraction extent.
#' @param buff_width Numeric. Width (in meters) to buffer the object defined in extent, thereby increasing the extraction extent. Defaults to 0.
#' @param match_crs Logical. Should the extracted rasters be projected to the CRS of the reference extent object? Defaults to \code{FALSE}. If \code{FALSE}, the raster is returned in the native projection and resolution (WGS 84, 0.25 arc degrees).
#' @param input_directory Directory to where FWI NetCDFs are saved on the local device. If \code{NULL}, FWI NetCDFs will be downloaded to tempfiles. See details for recommendations.
#' @param output_directory Optional. Output directory where NetCDFs will be exported. If \code{NULL}, the NetCDFs will not be exported to the local device.
#' @param filename_prefix Optional. String that will be appended to the start of the exported raster file name.
#'
#' @details
#' This function extracts daily FWI rasters (from \url{https://zenodo.org/record/3626193}) to a user-defined date range and spatial extent.
#' Currently, this function can only temporally subset to dates within the same year.
#'
#' It is recommended to download NetCDFs for the variables and years of interest from \url{https://zenodo.org/record/3626193}, then define the \code{input_directory} to reduce download times.
#' If the \code{input_directory} is defined, this function will search for the proper NetCDF using both year and variable name within the \code{input_directory.} The user should not change the default name of files downloaded from the Zenodo page.
#'
#' @return List of named RasterBricks
#' @export
#'
#' @references Global Fire Weather Indices: \url{https://zenodo.org/record/3626193}
#'
#' @import raster
#' @import sf
#' @import lubridate
#' @import dplyr
#' @import ncdf4
#' @import curl
#'
#' @examples
#' \dontrun{
#' library(raster)
#' library(sf)
#' library(lubridate)
#' library(dplyr)
#' library(ncdf4)
#' library(curl)
#'
#' # Create extent polygon
#' e <- extent(-1198869,-1131697,901665.8,1004528) %>%
#' st_bbox() %>% st_as_sfc() %>% st_sf()
#' st_crs(e) <- 3978 # assign CRS as EPSG 3978, Canada Atlas Lambert
#'
#' FWI <- FWI_ERA5_grab(variable = 'FWI',# string or vector of variables, e.g. c('FWI','FFMC')
#' overwinter_DC = TRUE,
#' start_date = '2018-05-20',
#' end_date='2018-05-30', # must be same year as start date
#' extent = e, # sp or sf object with extent and crs info
#' buff_width = 0,
#' match_crs = TRUE, # match crs of input extent object?
#' input_directory = NULL, # folder where ncdfs are saved
#' output_directory = NULL)
#'
#'plot(FWI[[1]][[1:4]])
#' }
FWI_ERA5_grab <- function(variable = NULL,
overwinter_DC = TRUE,
start_date,
end_date,
extent,
buff_width = 0,
match_crs = FALSE,
input_directory = NULL,
output_directory = NULL,
filename_prefix = NULL){
if(!year(as.Date(start_date)) == year(as.Date(end_date))){stop('start_date must be from the same year as end_date.')}
pull_year <- as.Date(start_date) %>% year() %>% as.character()
# make sure requested variables exist
if(is.null(variable)){
stop("variable argument empty. Valid variable inputs include: 'FFMC','DMC','DC','ISI','BUI','FWI','DSR', or a vector including a combination of these.")
}
allowed_vars <- c('FFMC','DMC','DC','ISI','BUI','FWI','DSR')
if(all(variable %in% allowed_vars) == FALSE){
notallowed <- variable[which(variable %in% allowed_vars == FALSE)]
stop("Invalid variable input used. Valid variable inputs include: 'FFMC','DMC','DC','ISI','BUI','FWI','DSR'.")
}
if(!grepl('sf|character|SpatialPolygons|SpatialPolygonsDataFrame', class(extent)[1])){
stop("extent must an object of class sf, SpatialPolygons, or SpatialPolygonsDataFrame.")
}
if(is.na(crs(extent))){
stop('extent object must have a valid crs.')
}
if(is.null(input_directory)){ # if no user input, download from online
# FWI metrics from https://zenodo.org/record/3626193
# only available from 1979-2018 (inclusive).
if(!as.numeric(pull_year) %in% 1979:2018){
stop('FWI metrics are only available online from 1979-2018 (inclusive; see https://zenodo.org/record/3626193). Specify an input_directory to use NetCDFs stored on your local device.')
}
if(overwinter_DC == TRUE){
download_vars <- paste0(variable, '_owDC')
} else {
download_vars <- paste0(variable, '_dDC')
}
URL_lookup <- tibble(var = c("FFMC_dDC","DMC_dDC","DC_dDC","ISI_dDC","BUI_dDC","FWI_dDC","DSR_dDC","FFMC_owDC","DMC_owDC","DC_owDC","ISI_owDC","BUI_owDC","FWI_owDC","DSR_owDC"),
partialURL = c('https://zenodo.org/record/3540950/files/no_overwintering_fine_fuel_moisture_code_',
'https://zenodo.org/record/3540954/files/no_overwintering_duff_moisture_code_',
'https://zenodo.org/record/3540959/files/no_overwintering_drought_code_',
'https://zenodo.org/record/3540946/files/no_overwintering_initial_spread_index_',
'https://zenodo.org/record/3540942/files/no_overwintering_build_up_index_',
'https://zenodo.org/record/3540938/files/no_overwintering_fire_weather_index_',
'https://zenodo.org/record/3540962/files/no_overwintering_daily_severity_rating_',
'https://zenodo.org/record/3540922/files/fine_fuel_moisture_code_',
'https://zenodo.org/record/3540924/files/duff_moisture_code_',
'https://zenodo.org/record/3540926/files/drought_code_',
'https://zenodo.org/record/3540920/files/initial_spread_index_',
'https://zenodo.org/record/3540918/files/build_up_index_',
'https://zenodo.org/record/3539654/files/fire_weather_index_',
'https://zenodo.org/record/3540928/files/daily_severity_rating_'))
partialURLs <- URL_lookup %>% filter(var %in% download_vars) %>% pull(partialURL)
URLs <- paste0(partialURLs,pull_year,'.nc?download=1')
# Download ncs
tempfile_list <- vector(length = length(URLs))
for(url in seq_along(URLs)){
message(paste0('Downloading file ',url,' of ', length(URLs), ' (',variable[url],')...'))
tempfile_list[url] <- tempfile(fileext = '.nc')
curl_download(url = URLs[url], destfile = tempfile_list[url])
message(paste0('Downloaded file ',url,' of ', length(URLs), ' (',variable[url],')'))
}
# import nc as RasterBrick
rasts <- vector(mode = 'list', length=length(tempfile_list))
for(nc in seq_along(rasts)){
rasts[[nc]] <- brick(tempfile_list[nc])
}
}
if(!is.null(input_directory)){ # user supplied input directory containing the FWI nc files
message('Reading in files from user defined input directory...')
# search for the files within the folder
filename_lookup <- tibble(var = c('FFMC','DMC','DC','ISI','BUI','FWI','DSR'),
partialFilename = c('fine_fuel_moisture_code_','duff_moisture_code_','drought_code_',
'initial_spread_index_','build_up_index_','fire_weather_index_','daily_severity_rating_'))
partialFilenames <- filename_lookup %>% filter(var %in% variable)
partialFilenames <- partialFilenames[match(variable, partialFilenames$var),] %>%
pull(partialFilename) # reorder filenames to match user input
Filenames <- paste0(partialFilenames,pull_year, '.nc')
nc_paths <- file.path(input_directory, Filenames)
if(all(file.exists(nc_paths))==FALSE){
missingvars <- variable[which(file.exists(nc_paths)==FALSE)]
missingfile <- Filenames[which(file.exists(nc_paths)==FALSE)]
stop(paste0(missingvars, ' file could not be located in specified input directory. File should be named ',missingfile, collapse = '\n'))
} else {
message('Files for all specified variables located within input directory.')
}
# import nc as RasterBrick
rasts <- vector(mode = 'list', length=length(nc_paths))
for(nc in seq_along(rasts)){
rasts[[nc]] <- brick(nc_paths[nc])
}
}
# parse to and from julian dates
from_julian <- yday(start_date)
to_julian <- yday(end_date)
# get spatial extent (including buffer, if provided)
if(!buff_width == 0){
pull_extent <- st_buffer(extent, dist = buff_width)
pull_extent <- st_transform(pull_extent, crs = 4326)
} else {
pull_extent <- st_transform(extent, crs = 4326)
}
# temporal subset to dates and area of interest for each brick
message('Subsetting RasterBrick(s)...')
rasts_subset <- vector(mode = 'list', length=length(rasts))
for(rast in seq_along(rasts)){
rast_tmp <- rasts[[rast]]
rast_tmp <- raster::subset(rast_tmp, from_julian:to_julian) # temporal subset
rast_tmp <- raster::rotate(rast_tmp) # rotate from 0-360 to 0-180
rast_tmp <- raster::crop(rast_tmp, pull_extent)
# set layer names
layer_names <- paste0(as.numeric(pull_year), from_julian:to_julian) %>% as.Date(format = '%Y%j')
names(rast_tmp) <- layer_names
# project to extent crs if match_crs is TRUE
if(match_crs == TRUE){
# get upper and lower limits, for clamping later
lower <- raster::cellStats(rast_tmp, min) %>% min(na.rm = TRUE)
upper <- raster::cellStats(rast_tmp, max) %>% max(na.rm = TRUE)
rast_tmp <- projectRaster(from = rast_tmp, crs = crs(extent))
rast_tmp <- raster::clamp(rast_tmp, lower = lower, upper = upper)
}
# setZ. this has to be performed after projecting, bc projectRaster drops z info.
rast_tmp <- setZ(rast_tmp, from_julian:to_julian, name = 'Time (Day)')
rasts_subset[[rast]] <- rast_tmp
message(paste0('Processed variable ',variable[rast],'...'))
}
# name the list of RasterBricks
names(rasts_subset) <- variable
if(is.null(input_directory)){
unlink(tempfile_list, recursive = T) # remove tempfiles, if they were created
}
if(!is.null(output_directory)){
filename_lookup <- tibble(var = c('FFMC','DMC','DC','ISI','BUI','FWI','DSR'),
partialFilename = c('fine_fuel_moisture_code_','duff_moisture_code_','drought_code_',
'initial_spread_index_','build_up_index_','fire_weather_index_','daily_severity_rating_'))
for(brick in seq_along(variable)){
varname <- variable[brick]
partialFilename <- filename_lookup %>% filter(var %in% varname) %>% pull(partialFilename)
if(!is.null(filename_prefix)){
outname <- paste0(filename_prefix,'_FireIndices_' ,partialFilename, gsub('-','',start_date),'_',gsub('-','',end_date),'.nc')
} else {
outname <- paste0('FireIndices_' ,partialFilename, gsub('-','',start_date),'_',gsub('-','',end_date),'.nc')
}
simpleCap <- function(x) { # function to capitalize first letter of each word in string
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
longname <- gsub('_',' ',partialFilename) %>% simpleCap()
writeRaster(x = rasts_subset[[brick]],
filename = file.path(output_directory, outname),
varname = varname,
longname = longname,
zname = 'Time',
zunit = 'Day',
overwrite = T)
}
message(paste0('FWI metrics NetCDFs saved locally in ',output_directory))
}
message(paste0(c('Returning subsetted RasterBrick(s) for variable(s)',variable), collapse=" "))
return(rasts_subset)
}
|
d841ac68ce77ba51302a4a452e364075aec4eff3
|
a285601f8addbee198781ccb560e217b40601c31
|
/altura_solar_resultados.R
|
99a8e8ec7a555cfdd807be93405b5d6f6c7fadf4
|
[] |
no_license
|
Cicconella/Mestrado
|
20de71b2337f2660552eae0ebebf8891598ee12d
|
9a414141c03452c7611db6f0aabe51cf9eafb138
|
refs/heads/master
| 2021-01-21T13:07:29.911278
| 2018-01-10T09:49:35
| 2018-01-10T09:49:35
| 91,807,251
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,384
|
r
|
altura_solar_resultados.R
|
library(qqman)
i=1
altura = c()
for(i in 1:22){
cnv = read.table(paste("/media/cicconella/01D2FE834EA51BE0/Documents and Settings/Nina/Google Drive/Mestrado/Cromossomos/cromo", i, sep=""), header = T)
cnv[1:10,1:10]
a = read.table(paste("/home/cicconella/solar831/Chr", i, "/altura/resultados/p-values",sep=""), sep="?")
head(a)
b = unlist(strsplit(as.character(a[,1]), split=":"))
b = unlist(strsplit(b, split="p = "))
b = unlist(strsplit(b, split="(", fixed=T))
b = b[seq(1,length(b),by=2)]
cns = b[seq(1,length(b),by=2)]
pvalue = b[seq(2,length(b),by=2)]
cns = unlist(strsplit(cns, split="CN"))
cns = cns[seq(2,length(cns),by=2)]
final = as.data.frame(cbind(cns, pvalue))
head(final)
final$cns = as.numeric(final$cns)
final = final[order(cns),]
head(final)
plot(final)
a = read.table(paste("/home/cicconella/solar831/Chr",i,"/altura/resultados/herdabilidades",sep=""), sep="?")
head(a)
b = unlist(strsplit(as.character(a[,1]), split=":"))
b = unlist(strsplit(b, split="p = "))
b = unlist(strsplit(b, split="is "))
cns = b[seq(1,length(b),by=4)]
herd = b[seq(3,length(b),by=4)]
herd = as.numeric(herd)
plot(herd)
head(final)
final = cbind(cnv[,1:3], final, herd)
head(final)
altura = rbind(altura, final)
}
dim(altura)
head(altura)
colnames(altura) = c("Chr", "Start", "End","CNV", "P-value", "Herdabilidade")
class(altura)
head(altura)
plot(altura$Herdabilidade, pch = 20, col="red")
altura[which(altura$Herdabilidade<0.8244920),]
altura$`P-value`= as.numeric(as.character(altura$`P-value`))
manAltura = altura[,c(4,1,2,5)]
colnames(manAltura) = c("SNP","CHR", "BP", "P")
head(manAltura)
manAltura$P = as.numeric(as.character(manAltura$P))
dim(manAltura)
manAltura$SNP = c(1:nrow(manAltura))
manhattan(manAltura, col = c("blue", "black"), main="Height",
cex.axis=0.9)
summary(manAltura)
altura[which((altura$`P-value`)==min(altura$`P-value`)),]
altura[which((altura$Herdabilidade)==min(altura$Herdabilidade)),]
head(altura)
manAltura = altura[,c(4,1,2,6)]
colnames(manAltura) = c("SNP","CHR", "BP", "P")
head(manAltura)
manAltura$P = as.numeric(as.character(manAltura$P))
dim(manAltura)
min(manAltura$P)
manhattan(manAltura, col = c("blue", "black"), main="Height", logp = F,
ylim=c(0.82,0.87), ylab="Heritability", cex.axis=0.85)
?manhattan
|
d80d0ec05c41eac6e82522acebf7481becf6d352
|
eb4667b178e418d936c35569383e5cb0663f93ad
|
/R/ind.contrib.R
|
e7b63a30f7cca7596258f262313e0651cf2e23c2
|
[] |
no_license
|
cran/RVAideMemoire
|
21081d49de9999a7438c40de05ab67a145336a02
|
6a48aaa7facac606e954b06a9cc1ea46b387d575
|
refs/heads/master
| 2023-08-31T00:44:09.327145
| 2023-08-23T07:30:05
| 2023-08-23T09:30:39
| 17,692,998
| 7
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,061
|
r
|
ind.contrib.R
|
ind.contrib <-
function(model,print.diff=FALSE,graph=TRUE,warning=25) {
if ("lm" %in% class(model)) {
coeffs <- model$coefficients
coeffs.diff <- lm.influence(model)$coefficients
} else if ("least.rect" %in% class(model)) {
coeffs <- model$coefficients
coeffs.mat <- matrix(ncol=2,nrow=nrow(model$model),dimnames=list(1:nrow(model$model),c("(Intercept)",colnames(model$model)[2])))
x <- model$model[,2]
y <- model$model[,1]
for (i in 1:nrow(model$model)) {
x2 <- x[-i]
y2 <- y[-i]
coeffs2 <- least.rect(y2~x2)$coefficients
coeffs.mat[i,1] <- coeffs2[1]
coeffs.mat[i,2] <- coeffs2[2]
}
coeffs.diff <- data.frame(coeffs[1]-coeffs.mat[,1],coeffs[2]-coeffs.mat[,2])
colnames(coeffs.diff)[1] <- "(Intercept)"
colnames(coeffs.diff)[2] <- colnames(model$model)[2]
} else {
stop("model not recognized")
}
coeffs.prop <- 100*coeffs.diff/coeffs
if (graph) {
plot(coeffs.prop[,1],ylim=c(1.1*min(coeffs.prop),1.1*max(coeffs.prop)),type="o",pch=16,cex=0.5,xlab="Individual",ylab="Difference in parameters (%)")
abline(h=0,col="grey",lty=3)
abline(h=-100,col="grey",lty=3)
abline(h=warning,col="grey",lty=3)
abline(h=-warning,col="grey",lty=3)
abline(h=100,col="grey",lty=3)
for (i in 2:ncol(coeffs.prop)) {
lines(coeffs.prop[,i],col=i,type="o",pch=16,cex=0.5)
}
legend(0.75*nrow(model$model),1.05*max(coeffs.prop),colnames(coeffs.prop),col=1:ncol(coeffs.prop),lty=1)
lignes <- which(abs(coeffs.prop)>warning)
if (length(lignes)>0) {
colonnes <- lignes%/%nrow(coeffs.prop)+1
lignes <- lignes-(colonnes-1)*nrow(coeffs.prop)
ecart <- abs(1.1*max(coeffs.prop)-1.1*min(coeffs.prop))*4/100
for (i in 1:length(lignes)) {
text(lignes[i],coeffs.prop[lignes[i],colonnes[i]]+ecart*sign(coeffs.prop[lignes[i],colonnes[i]]),lignes[i],cex=0.5)
}
}
}
if (print.diff) {print(coeffs.prop,digits=5)}
result <- list(coefficients=coeffs,coefficients.diff=coeffs.diff,coefficients.prop=coeffs.prop)
return(result)
}
|
c1a418cb9c21cc0c635eb23512d6cdbf2d07bdfc
|
95be3f083f504afb5898a8cec46397f5e01e15bb
|
/download.site.filesFn.R
|
9b36460789f38953573286c445186c7515f4c77d
|
[] |
no_license
|
Kevin-M-Smith/nwis-simple
|
906ecf11be27ba73ca9d042618905c65f0e19034
|
da3cb3d6a25134ed9f5f74309bc851cc778c91a5
|
refs/heads/master
| 2021-01-13T14:13:11.416348
| 2015-01-05T16:51:26
| 2015-01-05T16:51:26
| 28,007,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,211
|
r
|
download.site.filesFn.R
|
download.site.file <- function(state){
#print(paste("Collecting active sites from ", state, "...", sep = ""))
url <- paste("http://waterservices.usgs.gov/nwis/site/?format=rdb&stateCd=",
state,
"&period=P5W&siteOutput=expanded&hasDataTypeCd=iv,id",
sep = "")
active <- importRDB1(url)
dbWriteTable(con2, "activesites", active, append = TRUE, row.names = FALSE, overwrite = FALSE)
#pathToFile <- tempfile()
#download.file(url, pathToFile)
#active <- importRDB(pathToFile)
}
download.site.inventory <- function(site){
url <- paste("http://waterservices.usgs.gov/nwis/site/?format=rdb,1.0&sites=",
site,
"&seriesCatalogOutput=true&outputDataTypeCd=iv",
sep = "")
active <- importRDB1(url)
active <- transform(active, seriesid = paste(
agency_cd, ":", site_no, ":", parm_cd, ":00011:",
formatC(dd_nu, width = 5, format = "d", flag = "0"), sep = ""),
familyid = paste(agency_cd, ":", site_no, ":00011:", formatC(dd_nu, width = 5, format = "d", flag = "0"), sep = ""))
if(nrow(active) > 0){
dbWriteTable(con2, "assets", active, append = TRUE, row.names = FALSE, overwrite = FALSE)
}
}
|
6fdf42249c697f92460b137128072eb2c9a64887
|
d9559311f50a4fccc915a4a5cb4516aa014ee24f
|
/maxandmin.R
|
b4a502009b55282bd41ee1e3dfae4b9546e1b101
|
[] |
no_license
|
a168828/R_SCRIPT_CODES
|
fb0b27143bd6a584cac35eb7157d96abf4a69aeb
|
09ef0347fad1577022eccbf99825d949b5e5ff68
|
refs/heads/main
| 2023-06-01T01:03:20.445962
| 2021-06-13T18:34:25
| 2021-06-13T18:34:25
| 376,614,631
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43
|
r
|
maxandmin.R
|
fe<-c(20,30,77,1,23,6,10)
max(fe)
min(fe)
|
68bbe285fd16f6e350138ada1a4662f287f7d2de
|
18fb990a4c3c45a72f816be5eeb146c5c1162983
|
/man/odds.ratio.Rd
|
457fd4f412de187243f7b11fb1962025d7bd8afe
|
[] |
no_license
|
yijaeseung/samplesizeCMH
|
576573fb07238ed3f64f92568f79412104a5d12b
|
53c30dd1803a7f5e1e138d81da84eb28cf249b52
|
refs/heads/master
| 2023-03-17T19:23:46.242510
| 2019-12-03T06:28:27
| 2019-12-03T06:28:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 967
|
rd
|
odds.ratio.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/odds.ratio.r
\name{odds.ratio}
\alias{odds.ratio}
\title{Create an odds ratio estimate from a 2-by-2 table of frequencies or proportions}
\usage{
odds.ratio(x)
}
\arguments{
\item{x}{A two-dimensional matrix or table containing frequencies or proportions.}
}
\value{
A numeric vector.
}
\description{
Create an odds ratio estimate from a 2-by-2 table of frequencies or proportions
}
\examples{
# Load in Titanic data from datasets package
data(Titanic, package = "datasets")
# Get marginal table of survival by sex
marginal_table <- margin.table(Titanic, c(2,4))
marginal_table
# Compute odds ratio of marginal table
odds.ratio(marginal_table)
# Get partial tables of survival by sex, stratified by class
partial_tables <- margin.table(Titanic, c(2,4,1))
partial_tables
# Compute odds ratio of each partial table
apply(partial_tables, 3, odds.ratio)
}
\author{
Paul W. Egeler, M.S.
}
|
8096ed21b787b95a31c1091497c3a89898fe69fb
|
d5a9254953c538b7162c5b77fa663a5a11b47986
|
/training/sample.r
|
dc44b3c998b514b49751293bf1ccdc13f9d12bb5
|
[] |
no_license
|
mcelevan12/picture_dimensionality
|
dba8ba9a90ec5b732ddea730bafbf84feea9811b
|
33c1e1518759f9970d3d28e016055a4c37744768
|
refs/heads/master
| 2020-04-23T17:58:16.148343
| 2019-03-19T19:41:28
| 2019-03-19T19:41:28
| 171,350,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,298
|
r
|
sample.r
|
#Sample script for running a model of aclove on PCA08
st = list(c = 2.3, phi = 4.4, lw = 0.2, la = 0.1, r = 1, q = 1, w = matrix(0, 2, 18), h = matrix(c(1.246941135940084, 3.8921262091647004, -0.18739697773108563, -0.8740265465228294, 1.512747971612059, 0.6069902130842613, 0.12495299159147684, 0.5523350257852532, 0.6288818382489025,5.194534790099107, 1.9972514014430376, 0.40050487836843807, 1.4912289270615922, 0.29042900397896765, 1.1395396402203735, 2.4351874111492724, 0.7828446779575104, 3.5112506998272726, -1.539436158792307, -1.1193435240879241, -1.6300907723247842, -0.9156920361189461, -1.6523661694820913, -2.458666392890401, -0.7897691916334053, 2.029158304615232, 0.18050709819193927, -2.56460940920617, -0.5775191422946859, -0.027777156794864137, -2.3051037114210216, -2.334744549959926, 1.6404130280218108, 3.7513317872997733, 0.5054849981632412, -1.195898732505985, -1.2027416241769116, -2.0312050578888745, -0.36492127242531475, 0.7746111786075088, -0.41085173404063163, 4.122367840870528, 1.3318543275160661, -0.8928581809573074, 1.040821588789485, -0.9247061393751572, -1.1650361415400972, 2.080635830482442, 2.6913819968444823, 1.9946872923253605, 0.2634832920905385, 2.0709416829624616, 0.6824109660595236, -1.5051150829454654, 2.7589089920962326, -1.6177762493578711, 2.3404956186598462, 2.364477505690643, 2.808005610308732, 2.525663643382217, -3.468427647708265, -0.5936882378759407, 0.08152605507926691, 1.8446848844069, 0.6890692077578144, 1.0572570667314596, -0.698196333087968, -1.8228796380538048, 1.0494131094902668, 0.6672415705408417, 4.337373161152902, -1.7383153997778433, 0.025296988568839416, -2.252334868574457, 4.4914707411630985, -0.8979430140466168, 2.638737558978532, -0.3869836909533588, -0.5963575937215, -0.18352197027111847, 0.2724814000118901, -0.6791003912959886, 3.821021329272511, -1.5851728996263974, 0.21197871743205424, 2.1142275690814323, -1.3210146855223828, -0.8969150465417786, 1.4596277603163381, 0.40407028037178966, 2.8243425396075974, 2.1917863236891444, -0.0889307360957284, 3.194772664477745, -0.44791077034922044, -0.10361423163733155, -1.5830918890979475, -1.392763223207289, 2.0605103274670564, -3.867959768149254, -1.6315539546246811, 0.9034578659493129, -0.6952518298980733, -0.8117754631840285, 1.976033568324301, -3.621532254145691, 1.3148334861234556, -3.26514473039267, -0.2203719006684618, -0.8965860147899537, -0.1607251628899496, 0.8251004986119442, -0.939677471351701, 0.49701952117941756, 3.626310248776555, -3.0962695638292885, -0.8389284727617528, 0.08291160926639825, 0.7753017361840048, -2.0024563932876336, 4.730143147513038, -2.824098322506962, 0.2730627385275171, -1.4778985177882114, 1.5008391132008163, 0.34090419330488914, 1.3522345224599024, 1.3548192922379585, 3.4644257806227783, -2.181476207608314, 2.970637458126881, 0.21128554122673643, 1.9380351258950181, 0.8702578372919477, -0.489476598644884, 0.1662478055713134, -1.59785171917 91315, -0.10553317170453562, 1.7098715048083128, -2.0619989267041565, -1.45435688081991, 2.5740849288420975, 2.330810521953837, -0.7898159592181805), 8, 18), alpha = c(1, 1, 1, 1, 1, 1, 1, 1), colskip = 3)
tr = data.matrix(read.csv("~/Documents/picture_dimensionality/training/training_data/near/PCA08.csv"))
slpALCOVE(st, tr)
#Sample script for running a model of aclove on PCA02
st = list(c = 2.3, phi = 4.4, lw = 0.2, la = 0.1, r = 1, q = 1, w = matrix(0, 2, 18), h = matrix(c(1.246941135940084, 3.8921262091647004, 0.6288818382489025, 5.194534790099107, 0.7828446779575104, 3.5112506998272726, -0.7897691916334053, 2.029158304615232, 1.6404130280218108, 3.7513317872997733, -0.41085173404063163, 4.122367840870528, 2.6913819968444823, 1.9946872923253605, 2.3404956186598462, 2.364477505690643, 0.6890692077578144, 1.0572570667314596, 0.025296988568839416, -2.252334868574457, 0.2724814000118901, -0.6791003912959886, 1.4596277603163381, 0.40407028037178966, -1.5830918890979475, -1.392763223207289, 1.976033568324301, -3.621532254145691, -0.939677471351701, 0.49701952117941756, 4.730143147513038, -2.824098322506962, 3.4644257806227783, -2.181476207608314, -1.5978517191791315, -0.10553317170453562), 2, 18), alpha = c(1, 1), colskip = 3)
tr = data.matrix(read.csv("~/Documents/picture_dimensionality/training/training_data/near/PCA02.csv"))
slpALCOVE(st, tr)
|
9e2c73d8f3a092025ff627dcf3944ff98206e3c8
|
d3f20ed2a13eb9ca153094f15c2e351e2642cb19
|
/ds/assignment.R
|
274d02474f41fb55c20649beb1182166806053ee
|
[] |
no_license
|
apoorvakarn/R-in-Action
|
b7d0003d9d0be0755b7710903fb56984a59dda9b
|
923dfe1f12cecfdd1511d2d55e4a4796499f9c82
|
refs/heads/master
| 2021-09-11T19:49:45.127777
| 2018-04-11T18:01:48
| 2018-04-11T18:01:48
| 100,679,882
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 706
|
r
|
assignment.R
|
#assignment
marks2a= c(10,12,13,14,17)
marks2a
sum(marks2a)/length(marks2a)
#discrete
marks2b=c(10,12,13,14,17)
marks2b
student2b=c(5,7,9,8,4)
student2b
sum(student2b)
(markstotal = marks2b *student2b)
sum(markstotal12b/sum(student2b))#13.06061
x=rep(marks2b, student2b)
table(x)
mean(x)
#continuous
marks2c=c("10-20","20-30","30-40","40-50")
student2c=c(5,7,9,8)
(df2c= data.frame(marks2c,student2c))
midpt2c= c(15,25,35,45)
str(df2c)
markstotal2c=student2c*midpt2c
df2c$midpt2c=midpt2c
df2c$midpt2c
df2c$total= df2c$student2c*df2c$midpt2c
df2c$total
(df2c=cbind(df2c,markstotal2c))
mean2c1
sum(df2c$total)/sum(student2c)
x=rep(df2c$midpt2c,df2c$student2c)
x
marks=c(5,7,9,8,4)
x3a=rep(marks,student)
st
|
c2e7dba3515f2ef4b60c25c4de757188e901e206
|
7489894195a361f5b66b3b2e75c86270e83fa9bc
|
/R Assignments/Logistic.R
|
cd9967948a5fb9a3cdc2837a49d624ca35c6710d
|
[] |
no_license
|
nityanandkore/6211-Advanced-BI
|
daabfc2dfe25d26a74a201be98ed4a65be3f1561
|
105ce19871ae7298183ab87ac7297a09dc4919da
|
refs/heads/master
| 2022-11-28T09:53:35.916492
| 2020-08-05T13:53:23
| 2020-08-05T13:53:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,152
|
r
|
Logistic.R
|
#install.packages('caret')
#install.packages('car')
#install.packages('e1071', dependencies=TRUE)
#install.packages('pROC')
#install.packages('dplyr')
#install.packages('Hmisc')
library(caret)
library(car)
library(pROC)
library(dplyr)
library(Hmisc)
getwd()
setwd("C:\\Users\\P2190101\\Desktop\\NK Personal\\NK Study\\UNCC\\6211 - Advanced BI\\Dataset")
getwd()
df <- read.csv('HEMQ.csv',na.strings = c('NA',''))
summary(df)
str(df)
df$BAD <- factor(df$BAD)
str(df)
vif(glm(formula = BAD~.,family=binomial(link='logit'),data=df))
df$VALUE <-with(df,impute(VALUE,mean))
df$REASON <-with(df,impute(REASON,max))
df$JOB <- with(df,impute(JOB,max))
df$CLAGE <-with(df,impute(CLAGE,mean))
df$CLNO <-with(df,impute(CLNO,mean))
pl <-ggplot(df,aes(x=YOJ))+geom_histogram()
df$YOJ <- log10(df$YOJ+1)
pl
pl <- ggplot(df,aes(x=NINQ))+geom_histogram()
pl
combine.NINQ <- function(x){
if(is.na(x)){
return(NA)
}else if (x>0){
return('High')
}else{
return('Low')
}
}
df$NINQ <-sapply(df$NINQ,combine.NINQ)
table(df$NINQ)
summary(df)
set.seed(101)
trainIndex <- createDataPartition(df$BAD,
p=0.7,
list=FALSE,
times = 1)
df.train<-df[trainIndex,]
df.valid <- df[-trainIndex,]
baseline.model <- train(BAD~.,
data=df.train,
method='glm',
family='binomial',
na.action=na.pass)
summary(baseline.model)
prediction <- predict(baseline.model,
newdata=df.valid)
df.valid.nonmissing <- na.omit(df.valid)
confusionMatrix(prediction,df.valid.nonmissing$BAD)
pred.probabilities <- predict(baseline.model,
newdata=df.valid,
type='prob')
regression.ROC <- roc(predictor=pred.probabilities$'1',
response = df.valid.nonmissing$BAD,
levels=levels(df.valid.nonmissing$BAD))
plot(regression.ROC)
regression.ROC$auc
|
27271d6612a6524e8d536ce139a8042e1ee9d4e3
|
cba1565eb4da432bfed9251b135dc36448db91d4
|
/Intro to the Tidyverse/Intro to the Tidyverse worked example.R
|
8ecf63aa501fcb71bc63c8a0fa1fdb8099f4c34a
|
[] |
no_license
|
NeuroShepherd/R_Coding_Presentations
|
b92118385d9befc9e3827d6453800a781a75c9e9
|
b19afd0ac417a4c14efe2e19c5d337ee7f546bc2
|
refs/heads/master
| 2022-12-13T02:38:31.200054
| 2020-09-02T00:43:00
| 2020-09-02T00:43:00
| 260,514,647
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,325
|
r
|
Intro to the Tidyverse worked example.R
|
# Turn mtcars into a .csv to later demonstrate read-in of it
# mtcars %>%
# tibble::rownames_to_column(var = "car_name") %>%
# write_csv("./Intro to the Tidyverse/mtcars.csv", na="")
install.packages("tidyverse") # Run 1 time
library(tidyverse)
# Pipe
read_csv("./Intro to the Tidyverse/mtcars.csv", na="") %>%
select(-c(qsec,drat)) %>%
dplyr::filter(mpg > 15 | am == 0) %>%
mutate(hp_lvl = cut(hp, 3, labels=c("low","medium","high"))) %>%
mutate_at(vars(mpg), ~round(.)) %>%
arrange(desc(wt)) %>%
group_by(hp_lvl) %>%
summarize(mean_mpg = mean(mpg), sd_mpg = sd(mpg))
# No pipe
summarize(group_by(arrange(mutate_at(mutate(dplyr::filter(select(
read_csv("./Intro to the Tidyverse/mtcars.csv", na=""), -c(qsec,drat)), mpg > 15 | am == 0),
hp_lvl = cut(hp, 3, labels=c("low","medium","high"))), vars(mpg), ~round(.)), desc(wt)), hp_lvl),
mean_mpg = mean(mpg), sd_mpg = sd(mpg))
# Pipe to plot
read_csv("./Intro to the Tidyverse/mtcars.csv", na="") %>%
select(-c(qsec,drat)) %>%
dplyr::filter(mpg > 5 | am == 0) %>%
mutate(hp_lvl = cut(hp, 3, labels=c("low","medium","high"))) %>%
mutate_at(vars(mpg), ~round(.)) %>%
ggplot(aes(wt, mpg)) +
geom_point(aes(color= factor(cyl))) +
geom_smooth(method="lm", se=F, color="black") +
facet_wrap(~hp_lvl) +
theme_dark()
|
4381d880532e490583c4b259ddab61a84c384505
|
1fff2d2af75f6912effd3a9751fa8a8025f17c34
|
/run_analysis.R
|
4cd7d0706172d71ce8412047f06ea9b1d2b61495
|
[] |
no_license
|
harishsmenon/getting-and-cleaning-data-project
|
8e127d9af5634c6e3b0d64f5899e38c6755e0f14
|
15d4a8856246c02d3b982016c2a8a9584249af7e
|
refs/heads/master
| 2021-01-10T11:04:00.356431
| 2015-10-25T22:04:36
| 2015-10-25T22:04:36
| 44,931,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,205
|
r
|
run_analysis.R
|
##Coursera Getting and Cleaning Data Course Project
##Data: https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
##1) Merges the training and the test sets to create one data set.
##2) Extracts only the measurements on the mean and standard deviation for each measurement.
##3) Uses descriptive activity names to name the activities in the data set
##4) Appropriately labels the data set with descriptive variable names.
##5) From the data set in step 4, creates a second, independent tidy data set with the average of
##each variable for each activity and each subject.
##1) Merges the training and the test sets to create one data set.
##Download data
library(RCurl)
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="libcurl")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
pathData <- file.path("./data" , "UCI HAR Dataset")
##Reads datafiles
features = read.table(file.path(pathData, "features.txt"), head=FALSE)
activityType = read.table(file.path(pathData, "activity_labels.txt"), head=FALSE)
subjectTrain = read.table(file.path(pathData, "train", "subject_train.txt"), header=FALSE)
xTrain = read.table(file.path(pathData, "train", "x_train.txt"), header=FALSE)
yTrain = read.table(file.path(pathData, "train", "y_train.txt"), header=FALSE)
subjectTest = read.table(file.path(pathData, "test", "subject_test.txt"), header=FALSE)
xTest = read.table(file.path(pathData, "test", "x_test.txt"), header=FALSE)
yTest = read.table(file.path(pathData, "test", "y_test.txt"), header=FALSE)
##Assigin column names to the data imported above
colnames(activityType) = c('activityId','activityType')
colnames(subjectTrain) = "subjectId"
colnames(xTrain) = features[,2]
colnames(yTrain) = "activityId"
colnames(subjectTest) = "subjectId"
colnames(xTest) = features[,2]
colnames(yTest) = "activityId"
##Merges subjectTrain, xTrain, yTrain
trainingData = cbind(subjectTrain, xTrain, yTrain)
testData = cbind(subjectTest, xTest, yTest)
finalData = rbind(trainingData,testData);
##2) Extracts only the measurements on the mean and standard deviation for each measurement.
##Extracts column names from merged dataset
colNames = colnames(finalData)
##Create a logicalVector that contains TRUE values for the ID, mean() & stddev() columns and FALSE for others
##logicalVector = (grepl("activity..",colNames) | grepl("subject..",colNames) | grepl("-mean..",colNames) & !grepl("-meanFreq..",colNames) & !grepl("mean..-",colNames) | grepl("-std..",colNames) & !grepl("-std()..-",colNames))
logicalVector = (grepl("activity..",colNames) | grepl("subject..",colNames) | grepl("mean\\(\\)|std\\(\\)",colNames))
##Subset finalData table based on the logicalVector to keep only desired columns
finalData = finalData[logicalVector==TRUE]
##3) Uses descriptive activity names to name the activities in the data set
##Merge finalData with acitivityType to include descriptive activity names
finalData = merge(finalData,activityType,by='activityId',all.x=TRUE)
##Update colNames to include the new column names after merge
colNames = colnames(finalData)
##4) Appropriately labels the data set with descriptive variable names.
names(finalData)<-gsub("^t", "time", names(finalData))
names(finalData)<-gsub("^f", "frequency", names(finalData))
names(finalData)<-gsub("Acc", "Accelerometer", names(finalData))
names(finalData)<-gsub("Gyro", "Gyroscope", names(finalData))
names(finalData)<-gsub("Mag", "Magnitude", names(finalData))
names(finalData)<-gsub("BodyBody", "Body", names(finalData))
##Update colNames to include the new column names after labeling with descriptive variable names
colNames = colnames(finalData)
##5) From the data set in step 4, creates a second, independent tidy data set with the average of
##each variable for each activity and each subject.
library(plyr);
tidyData <- aggregate(. ~subjectId + activityId, finalData, mean)
tidyData <- tidyData[order(tidyData$subjectId,tidyData$activityId),]
write.table(tidyData, file = "tidydata.txt",row.name=FALSE)
|
89b827cdcda0b504c655d42d36e88c5e4d69c93d
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Thermodynamics_And_Heat_Power_by_I_Granet_And_M_Bluestein/CH6/EX6.6/Ex6_6.R
|
e6d142deb98f1dc2fedf03e1749ae5944cc4f1d7
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 144
|
r
|
Ex6_6.R
|
# page no. 245
p = (200+14.7)*(144)
T = (460+73)
V = 120/1728
R = 1545/28
v = (R*T)/p
print(v)
print(V/v)
m = (p*V)/(R*T)
print(m)
|
856ffc4ea72089385133421975c764ad415f1f88
|
0458269a55d2b0fcf5fd8994b20a326e607f3951
|
/tests/testthat/test-4-guess-hdTypes.R
|
a6a51483c9165d5205b8d0d238944e0033ac13f9
|
[
"MIT"
] |
permissive
|
brazadas/homodatum
|
9b403b1addc243838f8c1d3b0d0a3a6a7c8ed312
|
6f939b13cc5851a18331cff9d5d09edb54dc935e
|
refs/heads/master
| 2023-01-02T15:24:00.094165
| 2020-10-28T19:04:05
| 2020-10-28T19:04:05
| 298,450,580
| 0
| 0
|
NOASSERTION
| 2020-09-25T02:48:14
| 2020-09-25T02:48:13
| null |
UTF-8
|
R
| false
| false
| 2,097
|
r
|
test-4-guess-hdTypes.R
|
test_that("Test guess", {
# Guess Cat
v <- c("MSPS-CD-166-2020", "003-2020", "0811 - 2020")
expect_false(isDate(v)) # Test if there are letters
expect_equal(guess_hdType(v), hdType("Cat"))
v <- c("10306908", "900935265", "9010385043", "9010385043", "9010385043",
NA, "901046823", "830035101", "900417425-2")
expect_false(isDate(v)) # Test if many are parsed as NA. 60% failed to parse
expect_equal(guess_hdType(v), hdType("Cat"))
# Guess Num
expect_equal(guess_hdType(c("1",NA,"2")), hdType("Num"))
expect_equal(guess_hdType(c(0.3, 2, NA)), hdType("Num"))
v <- c("4,59", "5,38", "10,78", "123",NA)
expect_equal(guess_hdType(v), hdType("Num"))
v <- c("343.755,08", "5.380,00", NA, "21.555,11", "1.550.000")
maybeNum(v)
expect_equal(guess_hdType(v), hdType("Num"))
# Guess Pct
expect_equal(guess_hdType(c(0.3, 0.4, 1)), hdType("Pct"))
expect_equal(guess_hdType(c("30%", "200%", NA)), hdType("Pct"))
# Guess Dat
v <- c("2020-04-04", NA)
isDate(v)
expect_equal(guess_hdType(v), hdType("Dat"))
v <- "24/06/2020"
expect_true(isDate(v))
expect_equal(guess_hdType(v), hdType("Dat"))
v <- "24/6/2020"
expect_true(isDate(v))
expect_equal(guess_hdType(v), hdType("Dat"))
h <- Dat(v)
attributes(h)$format
v <- c("25/03/2020","31/03/2020","06/04/2020","17/04/2020")
expect_true(isDate(v))
d <- tibble::tibble(x = "24/06/2020",
y = "2020/12/31",
z = "2020:05:20")
expect_equal(as.character(guess_frType(d)),"Dat-Dat-Dat")
f <- fringe(d)
expect_equal(attributes(f$data[[1]])$format, "%d/%m/%Y")
expect_equal(attributes(f$data[[2]])$format, "%Y/%m/%d")
expect_equal(attributes(f$data[[3]])$format, "%Y:%m:%d")
# v <- c("2014-03-01","2043-04-04","2014-04-04")
# isDate(v)
# isDatetime(v)
# isTime(v)
# whichDTH(v)
# v <- c("2014-03-01","2043-04-04","20140404")
# parseDatetime(v, "D")
# v <- c("2014-03-01 5:04:00","2043-04-04 5:04:00","2014-04-04 5:04:00")
# parseDatetime(v, "T")
# v <- c("04:00","13:05:00","5:04:00")
# parseDatetime(v, "H")
})
|
1f0ef022e999e3dbba43329e7823d909b1d651a3
|
cf2cb09f424281adde2c9411a6a2cea8011b01e6
|
/R/addRepeats.R
|
a520009acfd9795788c7ac2b5388bcfc8c3a5ab4
|
[
"MIT"
] |
permissive
|
crisprVerse/crisprDesign
|
b58eb93beaf1699b1eb585875939d0249d0dce4c
|
98724dbafe87863723bf829f9300cafa5852c130
|
refs/heads/master
| 2023-07-22T06:35:25.615889
| 2023-07-19T18:09:09
| 2023-07-19T18:09:09
| 523,800,050
| 11
| 3
|
MIT
| 2023-07-19T18:09:11
| 2022-08-11T16:48:52
|
R
|
UTF-8
|
R
| false
| false
| 4,786
|
r
|
addRepeats.R
|
#' @title Annotate a \linkS4class{GuideSet} object with repeat elements
#' @description Add an annotation column to a \linkS4class{GuideSet} object
#' that identifies spacer sequences overlapping repeat elements.
#'
#' @param object A \linkS4class{GuideSet} object or a
#' \linkS4class{PairedGuideSet} object.
#' @param gr.repeats A \linkS4class{GRanges} object containing repeat
#' elements regions.
#' @param ignore.strand Should gene strand be ignored when annotating?
#' TRUE by default.
#' @param ... Additional arguments, currently ignored.
#'
#' @return \code{guideSet} with an \code{inRepeats} column appended in
#' \code{mcols(guideSet)} that signifies whether the spacer sequence
#' overlaps a repeat element.
#'
#' @author Jean-Philippe Fortin, Luke Hoberecht
#'
#' @seealso \code{link{removeRepeats}}.
#'
#' @examples
#' data(guideSetExample, package="crisprDesign")
#' data(grRepeatsExample, package="crisprDesign")
#' guideSet <- addRepeats(guideSetExample,
#' gr.repeats=grRepeatsExample)
#'
#' @export
#' @importFrom GenomicRanges findOverlaps
#' @importFrom S4Vectors queryHits
#' @rdname addRepeats
setMethod("addRepeats", "GuideSet", function(object,
gr.repeats=NULL,
ignore.strand=TRUE
){
object <- .validateGuideSet(object)
stopifnot("gr.repeats must be a GRanges object" = {
is(gr.repeats, "GRanges")
})
repeatOverlaps <- GenomicRanges::findOverlaps(object,
gr.repeats,
ignore.strand=ignore.strand)
guidesInRepeats <- S4Vectors::queryHits(repeatOverlaps)
object$inRepeats <- seq_along(object) %in% guidesInRepeats
return(object)
})
#' @export
#' @rdname addRepeats
setMethod("addRepeats", "PairedGuideSet", function(object,
gr.repeats=NULL,
ignore.strand=TRUE
){
object <- .validatePairedGuideSet(object)
unifiedGuideSet <- .pairedGuideSet2GuideSet(object)
unifiedGuideSet <- addRepeats(unifiedGuideSet,
gr.repeats=gr.repeats,
ignore.strand=ignore.strand)
out <- .addColumnsFromUnifiedGuideSet(object,
unifiedGuideSet)
return(out)
})
#' @rdname addRepeats
#' @export
setMethod("addRepeats", "NULL", function(object){
return(NULL)
})
#' @title Remove \linkS4class{GuideSet} gRNAs that overlap repeat elements
#' @description Remove \linkS4class{GuideSet} gRNAs that overlap repeat
#' elements.
#'
#' @param object A \linkS4class{GuideSet} object or a
#' \linkS4class{PairedGuideSet} object.
#' @param gr.repeats A \linkS4class{GRanges} object containing
#' repeat elements regions.
#' @param ignore.strand Should gene strand be ignored when annotating?
#' TRUE by default.
#' @param ... Additional arguments, currently ignored.
#'
#' @return \code{object} filtered for spacer sequences not overlapping
#' any repeat elements. An \code{inRepeats} column is also appended in
#' \code{mcols(object)}.
#'
#' @author Jean-Philippe Fortin, Luke Hoberecht
#'
#' @seealso \code{link{addRepeats}}.
#'
#' @examples
#' data(guideSetExample, package="crisprDesign")
#' data(grRepeatsExample, package="crisprDesign")
#' guideSet <- removeRepeats(guideSetExample,
#' gr.repeats=grRepeatsExample)
#'
#' @export
#' @rdname removeRepeats
setMethod("removeRepeats", "GuideSet", function(object,
gr.repeats=NULL,
ignore.strand=TRUE
){
object <- addRepeats(object,
gr.repeats=gr.repeats,
ignore.strand=ignore.strand)
object <- object[!object$inRepeats]
return(object)
})
#' @export
#' @rdname removeRepeats
setMethod("removeRepeats", "PairedGuideSet", function(object,
gr.repeats=NULL,
ignore.strand=TRUE
){
object <- .validatePairedGuideSet(object)
gs1 <- addRepeats(first(object),
gr.repeats=gr.repeats,
ignore.strand=ignore.strand)
gs2 <- addRepeats(second(object),
gr.repeats=gr.repeats,
ignore.strand=ignore.strand)
toKeep <- !gs1$inRepeats & !gs2$inRepeats
object <- object[toKeep]
return(object)
})
#' @rdname removeRepeats
#' @export
setMethod("removeRepeats", "NULL", function(object){
return(NULL)
})
|
b2985fa0a07330afa6e208a00ae62557d509de65
|
ce0a89ee1244e995c160d3202ebedb3ff860c3f3
|
/Scripts/Others/3GO-KEGG 2.R
|
20c56764c423fd34da4dfeeb947c204dec5e4ede
|
[] |
no_license
|
Iceylee/NGS-Pacbio
|
1dc8d9f7e6327414dffe8500921fa2cf1e56fe72
|
4d4f6052eff9ae13b170e8f8431e79ed5b39d6ca
|
refs/heads/master
| 2020-04-05T06:32:17.031168
| 2019-09-04T03:34:24
| 2019-09-04T03:34:24
| 156,641,487
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,238
|
r
|
3GO-KEGG 2.R
|
library(clusterProfiler)
library(DOSE)
library(dplyr)
require(AnnotationHub)
library(tidyr)
hub <- AnnotationHub()
db <- hub[[dbname]]
groups = paste(exp_group,base_group,sep="vs")
sig_path = paste("result/2sigGene-volcano/",groups,"_sig_genes_exprData.txt",sep="")
gene_list <- read.csv(sig_path, sep = '\t',header = T,stringsAsFactors=F)
countData = read.table(count_table, header=TRUE, sep=",")
gene_table = countData[,c(1,2)]
colnames(gene_table) = c("key","id")
gene_list2 = left_join(gene_list,gene_table,by="id")
gene_id = gene_list2$key
########################################################
#####################6.GO-result########################
########################################################
##enrichGO
info_go_BP <- enrichGO(gene = gene_id,
OrgDb = db,
keyType = GO_KEY,
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = pSet,
qvalueCutoff = pSet)
info_go_CC <- enrichGO(gene = gene_id,
OrgDb = db,
keyType = GO_KEY,
ont = "CC",
pAdjustMethod = "BH",
pvalueCutoff = pSet,
qvalueCutoff = pSet)
info_go_MF <- enrichGO(gene = gene_id,
OrgDb = db,
keyType = GO_KEY,
ont = "MF",
pAdjustMethod = "BH",
pvalueCutoff = pSet,
qvalueCutoff = pSet)
#output table
fileBP = paste(groups,"_GO_BP_out.txt",sep="")
fileCC = paste(groups,"_GO_CC_out.txt",sep="")
fileMF = paste(groups,"_GO_MF_out.txt",sep="")
write.table(as.data.frame(info_go_BP@result), file=paste(path3,fileBP,sep="/"),quote=F,row.names = F,sep = "\t")
write.table(as.data.frame(info_go_CC@result), file=paste(path3,fileCC,sep="/"),quote=F,row.names = F,sep = "\t")
write.table(as.data.frame(info_go_MF@result), file=paste(path3,fileMF,sep="/"),quote=F,row.names = F,sep = "\t")
########################################################
#####################7.GO-plot##########################
########################################################
ego_BP<-read.table(file=paste(path3,fileBP,sep="/"),header=T,sep = "\t",stringsAsFactors = F,quote="")
ego_CC<-read.table(file=paste(path3,fileCC,sep="/"),header=T,sep = "\t",stringsAsFactors = F,quote="")
ego_MF <-read.table(file=paste(path3,fileMF,sep="/"),header=T,sep = "\t",stringsAsFactors = F,quote="")
ego_MF_df <- ego_MF %>%
mutate(onco="Molecular function")
ego_BP_df <- ego_BP %>%
mutate(onco="Biological process")
ego_CC_df <- ego_CC %>%
mutate(onco="Cellular component")
ego_three <- rbind(ego_BP_df, ego_CC_df, ego_MF_df)
#top 15 select
ego_three <- ego_three %>%
arrange_(~ pvalue) %>%
group_by_(~ onco) %>%
do(head(., n = 15)) %>%
arrange(onco,Count)
ego_three$Description<- factor(ego_three$Description, order=TRUE, levels=ego_three$Description)
ego_three$onco<- factor(ego_three$onco, order=TRUE)
levels(ego_three$onco) <- c("BP","CC","MF")
##plot bar
library(ggplot2)
lable_name <- ego_three$onco[!duplicated(ego_three$onco)]
rm(p)
if (dim(ego_three)[1] != 0) {
p <- ggplot(ego_three, aes(y = Count, x = Description)) +
geom_bar(stat = "identity", aes(fill = onco), alpha = 1) +
facet_grid(onco ~ ., scales = "free", space = "free",margins = F) +
coord_flip() +
#scale_y_continuous(limits = c(0, 70))+
scale_fill_discrete(name = "Ontology", labels = lable_name) +
theme_light() +
theme(axis.text = element_text(size = 9), legend.text = element_text(size = 8)) +
labs(y = "Number of Genes", x = "Term")+
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank()) +
scale_y_continuous(labels = function (Count) floor(Count))
# pdf(file=paste(groups,"_GO_barplot.pdf",sep=""))
# p
# dev.off()
filename = paste(groups,"_GO_plot_data.txt",sep="")
write.table(ego_three, file=paste(path3,filename,sep="/"),quote=F,row.names = F,sep = "\t")
filename = paste(groups,"_GO_barplot.pdf",sep="")
ggsave(file=paste(path3,filename,sep="/"),p, width=10, height=10, units="in")
}
########################################################
#####################8.KEGG-plot########################
########################################################
#change ID type
if (GO_KEY != KEGG_NEED_KEY){
ids <- bitr(gene_id, fromType=GO_KEY, toType=KEGG_NEED_KEY, OrgDb=db)
id_kegg <- ids[,2]
filename = paste(groups,"_ID_type.csv",sep="")
write.csv(ids, paste(path3,filename,sep="/"),row.names = F,quote = F)
}else
id_kegg <- gene_id
#kegg
kk <- enrichKEGG(gene = id_kegg, organism = kegg_org, keyType = "kegg", pvalueCutoff = pSet)
kk_df <- as.data.frame(kk) %>%
dplyr::select(-ID)
filename=paste(groups,"_KEGG_out.txt",sep="")
write.table(kk_df, file=paste(path3,filename,sep="/"),quote = F,sep = "\t")
##plot
rm(p)
p = dotplot(kk) + guides(
color = guide_colorbar(order = 1),
fill = guide_legend(order = 0))
filename=paste(groups,"_KEGG_dotplot.pdf",sep="")
ggsave(file=paste(path3,filename,sep="/"),p, width=10, height=10, units="in")
|
310d368e70bea4e6c3e07cba41d69d8e09b4d963
|
2e088636e49553ac6840b71ce44b16d2840fea4e
|
/cachematrix.R
|
16c0bef6ba7b0c36ac98ac63cf009e9175b78499
|
[] |
no_license
|
marco2508/ProgrammingAssignment2
|
55caae2712a3b30114e2341bb81d8b60594ff533
|
0b3e2eaec326ea24c8e5d3092007ad3745c81114
|
refs/heads/master
| 2021-05-23T08:38:38.365390
| 2020-04-05T10:45:27
| 2020-04-05T10:45:27
| 253,203,125
| 0
| 0
| null | 2020-04-05T09:56:50
| 2020-04-05T09:56:49
| null |
UTF-8
|
R
| false
| false
| 3,714
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
#lastig; first have to understeand what an inverted matrix is
#https://masterr.org/r/how-to-cache-a-matrix-inversion-in-r/
# change 1 2 3
# 4 5 6
# 7 8 9
# to
# 1 4 7
# 2 5 8
# 3 6 9
# is this what we are talking about?
#no, this is totally not interesting :(
## @x: a square invertible matrix
## return: a list containing functions to
## 1. set the matrix -> meaning assign the parameter
## 2. get the matrix -> meaning getting the stored parameter
## 3. set the inverse -> storing the parameter inversed
## 4. get the inverse -> getting the inversed stored parameter
## this list is used as the input to cacheSolve()
m <- NULL
# Define function to set the value of the matrix.
# It also clears the old inverse from the cache
set <- function(y) {
x <<- y # Set the value
m <<- NULL # Clear the cache
}
# Define function to get the value of the matrix
get <- function() x
# Define function to set the inverse. This is only used by getinverse() when
# there is no cached inverse
setInverse <- function(inverse) m <<- inverse
# Define function to get the inverse
getInverse <- function() m
# Return a list with the above four functions
# this is what i understand :)
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# here we have to do something about finding what matrixes are available?
## @x: output of makeCacheMatrix()
## return: inverse of the original matrix input to makeCacheMatrix()
inv = x$getInverse() #check if the object is gettable
# if the inverse has already been calculated
if (!is.null(inv)){
# get it from the cache and skips the computation.
message("getting cached data")
return(inv)
}
# otherwise, calculates the inverse
mat.data = x$get()
inv = solve(mat.data, ...)
# sets the value of the inverse in the cache via the setinv function.
x$setInverse(inv)
return(inv)
}
#this is the description of the assignment copied from the website
#Matrix inversion is usually a costly computation and there may be some
#benefit to caching the inverse of a matrix rather than compute it repeatedly
#(there are also alternatives to matrix inversion that we will not discuss here). Your assignment is to write a pair of functions that cache the inverse of a matrix.
#Write the following functions:
#makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
#cacheSolve: This function computes the inverse of the special "matrix" returned
#by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
#Computing the inverse of a square matrix can be done with the solve function in R.
#For example, if X is a square invertible matrix, then solve(X) returns its inverse.
#For this assignment, assume that the matrix supplied is always invertible.
inv_mat_test_frame = function(mat){
## @mat: an invertible matrix
temp = makeCacheMatrix(mat)
start.time = Sys.time()
cacheSolve(temp)
dur = Sys.time() - start.time
print(dur)
start.time = Sys.time()
cacheSolve(temp)
dur = Sys.time() - start.time
print(dur)
}
|
9238a9f2b1b61e0311c7f49e0613bece07e644d7
|
c52178f855f814c05e75ab1559a5c7de129a4ef8
|
/R/problem.R
|
3555b8795d72e4d7af371a8a64fa2da9d2e1685d
|
[] |
no_license
|
prioritizr/prioritizrutils
|
36471915f8334ec53d954cd7c7676075fee5e0b7
|
fa5c2d446e1fef5eba6000f4377396fa99fb60fc
|
refs/heads/main
| 2021-11-25T01:01:03.703861
| 2017-03-08T01:59:51
| 2017-03-08T01:59:51
| 82,646,326
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,422
|
r
|
problem.R
|
#' @include internal.R ConservationProblem-proto.R
NULL
#' Conservation planning problem
#'
#' Create a systematic conservation planning problem. This function is used to
#' specify the basic data used in a spatial prioritization problem: the
#' spatial distribution of the planning units and their costs, as well as
#' the features (eg. species, ecosystems) that need to be conserved. After
#' constructing this object, it can be customized to meet specific
#' objectives using targets (see \code{\link{targets}}) and constraints
#' (see \code{\link{constraints}}).
#'
#' @param x \code{\link[raster]{Raster-class}},
#' \code{\link[sp]{SpatialPolygonsDataFrame-class}}, or
#' \code{\link[sp]{SpatialLinesDataFrame-class}} object specifying the
#' planning units to use in the reserve design exercise and their
#' corresponding cost. It may be desirable to exlcude some planning units
#' from the analysis, for example those outside the study area. To exclude
#' planning units, set the cost for those raster cells to \code{NA}.
#'
#' @param features The correct argument for \code{features} depends on the
#' input to \code{x}.
#' \describe{
#' \item{\code{\link[raster]{Raster-class}},
#' \code{\link[sp]{Spatial-class}}}{\code{\link[raster]{Raster-class}}
#' object showing the distribution of conservation features. Missing
#' values (i.e. \code{NA} values) can be used to indicate the absence of
#' a feature in a particular cell instead of explicitly setting these
#' cells to zero.}
#' \item{\code{data.frame}}{\code{data.frame} object containing information
#' on the features. The argument to \code{feature_data} must follow the
#' conventions used by Marxan. Each row corresponds to a different
#' feature. It must also contain the following columns:
#' \describe{
#' \item{\code{"id"}}{\code{integer} unique identifier for each feature
#' These identifiers are used in the argument to \code{rij}.}
#' \item{\code{"name"}}{\code{character} name for each feature.}
#' \item{\code{"prop"}}{\code{numeric} relative target for each feature
#' (optional).}
#' \item{\code{"amount"}}{\code{numeric} absolute target for each
#' feature (optional).}
#' }
#' }
#' }
#'
#' @param cost_column \code{character} name or \code{integer} indicating the
#' column in the attribute table of a \code{\link[sp]{Spatial-class}} object
#' with the cost data.
#'
#' @param rij \code{data.frame} containing information on the amount of
#' each feature in each planning unit. This argument is only used argument to
#' \code{x} is a \code{data.frame}. Similar to \code{features}, the
#' argument to \code{rij} must follow the conventions used by
#' Marxan. It must contain the following columns:
#' \describe{
#' \item{\code{"pu"}}{\code{integer} planning unit identifier.}
#' \item{\code{"species"}}{\code{integer} feature identifier.}
#' \item{\code{"amount"}}{\code{numeric} amount of the feature in the
#' planning unit.}
#' }
#'
#' @param ... not used.
#'
#' @return A \code{\link{ConservationProblem-class}} object containing the
#' basic data used to build a prioritization problem.
#'
#' @seealso \code{\link{constraints}}, \code{\link{objectives}},
#' \code{\link{targets}}.
#'
#' @examples
#' # create problem using raster planning unit data
#' p1 <- problem(sim_pu_raster, sim_features) %>%
#' add_min_set_objective() %>%
#' add_relative_targets(0.2) %>%
#' add_binary_decision()
#'
#' # create problem using polygon planning unit data
#' p2 <- problem(sim_pu_polygons, sim_features) %>%
#' add_min_set_objective() %>%
#' add_relative_targets(0.2) %>%
#' add_binary_decision()
#'
#' # create problem using line planning unit data
#' p3 <- problem(sim_pu_lines, sim_features) %>%
#' add_min_set_objective() %>%
#' add_relative_targets(0.2) %>%
#' add_binary_decision()
#'
#' # create problem using point planning unit data
#' p4 <- problem(sim_pu_points, sim_features) %>%
#' add_min_set_objective() %>%
#' add_relative_targets(0.2) %>%
#' add_binary_decision()
#'
#' \donttest{
#' # solve problems
#' s <- list(solve(p1), solve(p2), solve(p3), solve(p4))
#'
#' # plot solutions
#' par(mfrow=c(2,2))
#' plot(s[[1]], main = "raster data")
#'
#' plot(s[[2]], main = "polygon data")
#' plot(s[[2]][s[[2]]$solution == 1, ], col = "darkgreen", add = TRUE)
#'
#' plot(s[[3]], main = "line data")
#' lines(s[[3]][s[[3]]$solution == 1, ], col = "darkgreen", lwd = 2)
#'
#' plot(s[[4]], main = "point data", pch = 19)
#' points(s[[4]][s[[4]]$solution == 1, ], col = "darkgreen", cex = 2, pch = 19)
#' }
#'
#' @export
problem <- function(x, features, ...) UseMethod("problem")
#' @rdname problem
#' @method problem Raster
#' @export
problem.Raster <- function(x, features, ...) {
assertthat::assert_that(inherits(x, "Raster"), inherits(features, "Raster"))
assertthat::assert_that(isTRUE(raster::cellStats(x, "min") > 0),
isTRUE(all(raster::cellStats(features, "max") > 0)),
raster::nlayers(x) == 1, raster::nlayers(features) >= 1,
raster::compareRaster(x, features, res = TRUE, tolerance = 1e-5,
stopiffalse = FALSE))
if (inherits(x, c("RasterStack", "RasterBrick")))
x <- x[[1]]
pproto(NULL, ConservationProblem,
constraints = pproto(NULL, Collection), penalties = pproto(NULL, Collection),
data = list(cost = x, features = features,
rij_matrix = rij_matrix(x, features)))
}
#' @rdname problem
#' @method problem Spatial
#' @export
problem.Spatial <- function(x, features, cost_column = names(x)[1], ...) {
assertthat::assert_that(inherits(x, c("SpatialPolygonsDataFrame",
"SpatialLinesDataFrame", "SpatialPointsDataFrame")))
cost_column <- match.arg(cost_column, names(x))
x <- x[is.finite(x[[cost_column]]), ]
assertthat::assert_that(
isTRUE(all(x[[1]] > 0)),
isTRUE(all(raster::cellStats(features, "max", na.rm = TRUE) > 0)),
raster::nlayers(features) >= 1,
raster::compareCRS(x@proj4string, features@crs),
isTRUE(rgeos::gIntersects(methods::as(raster::extent(x), "SpatialPolygons"),
methods::as(raster::extent(features), "SpatialPolygons"))))
pproto(NULL, ConservationProblem,
constraints = pproto(NULL, Collection),
penalties = pproto(NULL, Collection),
data = list(cost = x, features = features, cost_column = cost_column,
rij_matrix = rij_matrix(x[, cost_column], features)))
}
#' @rdname problem
#' @method problem data.frame
#' @export
problem.data.frame <- function(x, features, rij, ...) {
# assert that arguments are valid
assertthat::assert_that(
# inputs are data.frames
inherits(x, "data.frame"), inherits(features, "data.frame"),
inherits(rij, "data.frame"),
# x$cost
assertthat::has_name(x, "cost"), is.numeric(x$cost), all(is.finite(x$cost)),
# x$id
assertthat::has_name(x, "id"), is.numeric(x$id), all(is.finite(x$id)),
anyDuplicated(x$id) == 0,
# features$id
assertthat::has_name(features, "id"), is.numeric(features$id),
all(is.finite(features$id)), anyDuplicated(features$id) == 0,
# features$name
assertthat::has_name(features, "name"),
is.character(features$name) || is.factor(features$name),
all(!is.na(features$name)), anyDuplicated(features$name) == 0,
# rij$species
assertthat::has_name(rij, "species"), is.numeric(rij$species),
all(is.finite(rij$species)),
all(rij$species %in% features$id),
# rij$pu
assertthat::has_name(rij, "pu"), is.numeric(rij$pu),
all(is.finite(rij$x)), all(rij$pu %in% x$id),
# rij$amount
assertthat::has_name(rij, "amount"), is.numeric(rij$amount),
all(is.finite(rij$amount)))
# standardize ids
rij$pu <- match(rij$pu, x$id)
rij$species <- match(rij$species, features$id)
# create rij matrix
rij_mat <- Matrix::sparseMatrix(i = rij$species, j = rij$pu,
x = rij$amount, giveCsparse = TRUE,
index1 = TRUE, use.last.ij = FALSE)
# create new problem object
p <- pproto(NULL, ConservationProblem,
constraints = pproto(NULL, Collection),
penalties = pproto(NULL, Collection),
data = list(cost = x, features = features, cost_column = "cost",
rij_matrix = rij_mat))
# return problem
return(p)
}
|
d2b3172fe7fd79e6c31a8031f509d61c1d339935
|
4e1488f653357a617512d0576d36596397f41ed5
|
/scripts/pronostico/drafts/comportamiento_horario/creacion_automatica.R
|
1f62b13d152d1e44619f2c4f9fa2ccba5b12b22c
|
[] |
no_license
|
DanielRZapataS/general_forecast_engine
|
3e66f0f79d831c04fb234ac45523ae7023afb480
|
ac8ab701b6028d85156a95ea35751b9f7393a7a8
|
refs/heads/master
| 2020-11-25T14:09:05.396161
| 2019-12-17T22:25:11
| 2019-12-17T22:25:11
| 228,707,456
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,887
|
r
|
creacion_automatica.R
|
#' #clean a process original bases to create month staging table
#' #' @param original_path : path field where original base_list[[i]] places
#' #' @param staging_path : path field where staging base_list[[i]] places
#' #' @return : staging table
original_path <- "Y:/V2.0/data/staging"
staging_path<- "Y:/V2.0/data/comportamiento_horarios"
compare_maker <-
function(original_path,
staging_path,
month_to_create = NULL) {
setwd("Y:/V2.0/scripts/pronostico/drafts/comportamiento_horario")
source("extraer_numeros.R")
'%!in%' <- function(x,y)!('%in%'(x,y))
#Compara la data en origina contra staging para halla posibles tablas faltantes
####original####
files_original <- list.files(original_path)
position_original <-
as.vector(sapply(files_original, extraer_numeros))
files_original <-
data.frame(files = files_original , position = position_original)
####staging####
files_staging <- list.files(staging_path)
position_staging <-
sapply(str_extract_all(files_staging, "[0-9]+"), "[[", 1) %>% as.numeric
files_staging <-
data.frame(files = files_staging , position = position_staging)
####compare####
compare <-
files_original$position[(which(files_original$position %!in% files_staging$position))]
if (length(compare) == 0) {
stop("Files Complete")
}
compare <- as.list(compare)
#Evaluar deacuedo al origen del archivo.
if (original_path == "Y:/V2.0/data/staging") {
source("comportamiento_horario.R")
staging <- "Y:/V2.0/data/staging"
for (i in compare) {
print(paste0("Creando staging mes ausente ", i))
comportamiento_horario(staging, i)
}
print("Archivos completos")
}
print("xd")
}
|
a7ded974f1d181e5342d6917d6750f75d6414c75
|
2646dea4810b1fcf9b0c6e198a3a994f22ddaada
|
/cachematrix.R
|
4fb4763c3f2952c8b94b73a56566b82cb9fc11fd
|
[] |
no_license
|
sangitadas118/ProgrammingAssignment2
|
aac1e357d08ec52917c558332695888cc0e0d12e
|
ce025016dcbe0511465654cc8e34eff5a4c40db1
|
refs/heads/master
| 2022-12-16T22:48:56.251576
| 2020-09-19T13:15:17
| 2020-09-19T13:15:17
| 296,857,446
| 0
| 0
| null | 2020-09-19T11:51:17
| 2020-09-19T11:51:16
| null |
UTF-8
|
R
| false
| false
| 731
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inve<-NULL
set<-function(j){
x<<-j
inve<<-NULL
}
get<-function() x
setInverse<-function(solveMatrix) inve<<-solveMatrix
getInverse<-function() inve
list(set=set,get=get,setInverse=setInverse,getInverse=getInverse)
}
## It is used to solve the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inve <- x$getInverse()
if(!is.null(inve)){
message("cached data available")
return(inve)
}
data <- x$get()
inve <- solve(data)
x$setInverse(inve)
inve
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.