blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd10acd84d900539097bc0d96b6bd60531f05bfc | 603d3f3d0be5baf9e458686f3d8e90d1a95f7161 | /man/gardasil.Rd | d0983603d09c7d2996314f8ebeae619ec8795b55 | [
"CC0-1.0"
] | permissive | cgpu/ProjectAsPackage | 27f1b79269622b238eb67193afe1ddc78c2ed838 | 1db6c34597a3081f36b9b1dc356faa454aa5fe41 | refs/heads/master | 2021-03-03T02:24:54.400135 | 2020-03-08T18:32:35 | 2020-03-08T18:32:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,256 | rd | gardasil.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gardasil.R
\docType{data}
\name{gardasil}
\alias{gardasil}
\title{The Gardasil dataset in R data.frame format}
\format{A data.frame with 1413 rows and 11 variables.
\describe{
\item{Age}{Age in years}
\item{AgeGroup}{Categorical age: 18-26 or 17-27}
\item{Race}{white/black/hispanic/"other/unknown"}
\item{Shots}{}
\item{Completed}{yes/no}
\item{InsuranceType}{medical assistance / private payer / hospital based / military}
\item{MedAssist}{yes/no}
\item{Location}{Four locations}
\item{LocationType}{urban/suburban}
\item{PracticeType}{pediatric / family practice / OB-GYN}
\item{RaceSummary}{white/minority/"other/unknown"}
}}
\source{
Chou B, Krill LS, Horton BB, Barat CE, Trimble CL:
Disparities in human papillomavirus vaccine completion among
vaccine initiators. Obstet. Gynecol. 2011, 118:14–20.
}
\usage{
gardasil
}
\description{
The Gardasil dataset in R data.frame format
}
\details{
Note that a cleaned dataset like this is provided, it is recommended
to place the code that produces it in data-raw. You can then use
usethis::use_data_raw() to update the cleaned dataset in data/. See
http://r-pkgs.had.co.nz/data.html.
}
\keyword{datasets}
|
ae4c45e13a6e5f1ef2c333a9cd5b16e681778b74 | b78104f893c4a6c2e1bb531e4d1a03949df51396 | /inst/PhyloProfile/global.R | ba11798cf0ababe6781a4a50dc150dae48afa218 | [
"MIT"
] | permissive | ermali7/PhyloProfile | 3de81f4538edfacb3134683d3b888c03bc0dd4a4 | e9ff67a00586cb61ae9a8d3c0dcc1b22c6fcdc4b | refs/heads/master | 2020-05-16T15:27:57.915585 | 2019-03-26T18:28:27 | 2019-03-26T18:28:27 | 183,132,241 | 0 | 0 | MIT | 2019-04-24T02:32:23 | 2019-04-24T02:32:22 | null | UTF-8 | R | false | false | 1,963 | r | global.R | #' Startup script for PhyloProfile
#' 1) install and load packages
#' 2) start the PhyloProfile app
source("R/functions.R")
# List of dependent packages --------------------------------------------------
packages <- c("shiny", "shinyBS", "shinyjs", "colourpicker", "DT",
"devtools", "ggplot2", "reshape2",
"plyr", "dplyr", "tidyr", "scales", "grid",
"gridExtra", "ape", "stringr", "gtable",
"dendextend", "ggdendro", "gplots", "data.table",
"taxize", "zoo", "RCurl", "energy",
"RColorBrewer")
# Set path for install packages while deploy into shiny server ----------------
# (from https://gist.github.com/wch/c3653fb39a00c63b33cf)
# Find & install missing packages ---------------------------------------------
installPackages(packages)
# Load packages
lapply(packages, library, character.only = TRUE)
# Check version and install ggplot2 (require v >= 2.2.0) ----------------------
version_above <- function(pkg, than) {
compareVersion(as.character(packageVersion(pkg)), than)
}
if ("ggplot2" %in% rownames(installed.packages())) {
installPackages("ggplot2")
library(ggplot2)
}
# Install packages from bioconductor ------------------------------------------
bioconductor_pkgs <- c("Biostrings", "bioDist")
installPackagesBioconductor(bioconductor_pkgs)
lapply(bioconductor_pkgs, library, character.only = TRUE)
# Install OmaDB and its dependencies
oma_pkgs <- c("GO.db", "GenomeInfoDbData")
installPackagesBioconductor(oma_pkgs)
lapply(oma_pkgs, library, character.only = TRUE)
if (!("OmaDB" %in% rownames(installed.packages()))) {
devtools::install_github("trvinh/OmaDB", force = TRUE)
}
library(OmaDB)
# Install shinycssloaders from github -----------------------------------------
if (!("shinycssloaders" %in% rownames(installed.packages()))) {
devtools::install_github("andrewsali/shinycssloaders", force = TRUE)
library(shinycssloaders)
}
|
754caac8cc9fdbb1397f15586d676c99a4b9e6d1 | 0f8a97baf9c9373ea62476abbce05bf8f89a9363 | /alloy.R | 87f647febc81b65aff3d1c7a78b6fc8c8a3ced74 | [] | no_license | tastyCanOfMalk/historical.MAL | 642d5b3fcef9a020f48d92d0593f6a72cf9dceaf | 7f34f2496e4ef70182415a85df1a7c614f73d268 | refs/heads/master | 2020-04-20T09:35:06.681740 | 2019-07-24T13:59:06 | 2019-07-24T13:59:06 | 168,768,666 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,744 | r | alloy.R | unique(x$alloy)
length(unique(x$alloy)) # 60
# remove double spaces, commas, periods, caps
# filter for unique sounds
xx <- x %>%
mutate(alloy = str_replace_all(alloy, '\\ ', '')) %>%
mutate(alloy = str_replace_all(alloy, '\\,', '')) %>%
mutate(alloy = str_replace_all(alloy, '\\.', '')) %>%
mutate(alloy = str_to_lower( alloy))
# select(request, alloy)
xx$alloy
unique(xx$alloy)
length(unique(xx$alloy)) # 47
# search aluminums
y <- dplyr::filter(xx, grepl('al', alloy))
unique(y$alloy)
# convert aluminums
y <- xx %>%
mutate(alloy.new = alloy) %>%
mutate(alloy.new = ifelse(grepl('al',alloy), "aluminum", alloy.new))
# confirm
unique(y$alloy.new)
length(unique(y$alloy.new)) # 41
# search
y <- dplyr::filter(xx, grepl('ductile', alloy))
unique(y$alloy)
# convert
y <- xx %>%
mutate(alloy.new = alloy) %>%
mutate(alloy.new = ifelse(grepl('al',alloy), "aluminum", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('ductile',alloy), "ductile iron", alloy.new))
# confirm
unique(y$alloy.new)
length(unique(y$alloy.new)) # 36
# search
y <- dplyr::filter(xx, grepl('gray', alloy))
unique(y$alloy)
# convert
y <- xx %>%
mutate(alloy.new = alloy) %>%
mutate(alloy.new = ifelse(grepl('al',alloy), "aluminum", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('ductile',alloy), "ductile iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('di',alloy), "ductile iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('gray',alloy), "grey iron", alloy.new))
# confirm
unique(y$alloy.new)
length(unique(y$alloy.new)) # 29
# search
y <- dplyr::filter(xx, grepl('cg', alloy))
unique(y$alloy)
# convert
y <- xx %>%
mutate(alloy.new = alloy) %>%
mutate(alloy.new = ifelse(grepl('al',alloy), "aluminum", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('ductile',alloy), "ductile iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('di',alloy), "ductile iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('gray',alloy), "grey iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('cg',alloy), "cgi", alloy.new))
# confirm
unique(y$alloy.new)
length(unique(y$alloy.new)) # 28
# search
y <- dplyr::filter(xx, grepl("ss", alloy))
unique(y$alloy)
# convert
y <- xx %>%
mutate(alloy.new = alloy) %>%
mutate(alloy.new = str_replace_all(alloy.new, "[:punct:]","none")) %>%
mutate(alloy.new = ifelse(grepl('al',alloy), "aluminum", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('di',alloy), "ductile iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('ductile',alloy), "ductile iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('le iron',alloy), "ductile iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('gray',alloy), "grey iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('y iron',alloy), "grey iron", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('cg',alloy), "cgi", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('brass',alloy), "bras", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('s steel',alloy), "stainless", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('44',alloy), "stainless", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('ss',alloy), "stainless", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('teel',alloy), "lc steel", alloy.new)) %>%
mutate(alloy.new = ifelse(grepl('bras',alloy), "brass", alloy.new)) %>%
mutate(alloy.new = ifelse(alloy.new == "0" |
alloy.new == "none" |
alloy.new == "unknown", NA, alloy.new))
# confirm
unique(y$alloy.new)
length(unique(y$alloy.new)) # 11
# yy <- dplyr::filter(y, grepl("ss", alloy.new))
# unique(yy$alloy.new)
yy <- y %>%
filter(is.na(alloy.new))
yy <- y[2915:2920,]
|
b1c0b734c80b5aea3b9ff9fab516d0b77488577e | df96d28aba4caa3dcfb0b7afe5aadd8fb2a6526c | /poisson_models.R | 7e7e65c134260994eedd0c8a2eec38fbba8d4a2b | [] | no_license | mikemiller442/TCP_Connection_Analysis | 87d43002a6758ca75afd969bc73423ecec64dbd8 | 1132c7d938ca4648e40ca447d72bf6c5d68c6f45 | refs/heads/master | 2020-12-02T22:01:20.689456 | 2020-01-06T16:56:41 | 2020-01-06T16:56:41 | 231,139,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,651 | r | poisson_models.R | library(pscl)
library(tidyverse)
tcp_data <- read.csv(file="TCP_dataset3.csv", header=TRUE, sep=",")
nrow(tcp_data)
tcp_data <- tcp_data %>%
mutate(zero_RST_post_fin = post_fin_resets == 0) # makes an indicator variable of whether or not
# there were zero TCP resets post FIN/ACK
# The code below fits a logistic regression model. GLM stands for generalized linear model, so in
# a nutshell this means that the log odds of zero TCP resets post FIN/ACK is a linear function of
# these predictors.
model1 <- glm(zero_RST_post_fin ~ avg_TCP_delta + avg_TCP_delta:avg_DupAcks +
avg_TCP_delta:avg_KAs + avg_TCP_delta:avg_iRTT + avg_ack_RTT + avg_TCP_delta:avg_iRTT +
avg_iRTT + avg_Retransmissions + avg_WindowUpdates + avg_ack_RTT +
avg_KAs + avg_DupAcks, family = "binomial", data = tcp_data)
print(summary(model1))
# The code below fits a zero inflated poisson model. There are two parts: the first part accounts
# for the probability that there are zero TCP resets post FIN/ACK, and the second part predicts
# the number of poisson counts of TCP resets post FIN/ACK after accounting for the probability
# that there are zero.
model2 <- zeroinfl(formula = post_fin_resets ~ avg_TCP_delta + avg_TCP_delta:avg_DupAcks +
avg_TCP_delta:avg_KAs + avg_TCP_delta:avg_iRTT + avg_TCP_delta:avg_iRTT +
avg_iRTT + avg_WindowUpdates + avg_ack_RTT +
avg_KAs + avg_DupAcks | avg_TCP_delta + avg_TCP_delta:avg_DupAcks +
avg_TCP_delta:avg_KAs + avg_TCP_delta:avg_iRTT + avg_ack_RTT + avg_TCP_delta:avg_iRTT +
avg_iRTT + avg_Retransmissions + avg_WindowUpdates + avg_ack_RTT +
avg_KAs + avg_DupAcks, data = tcp_data)
print(summary(model2))
# The code below fits a zero inflated model exactly like the model above, but it uses a different
# probability called the negative binomial distribution. This allows the variance of the response
# to be larger than its mean, which would otherwise violate the assumptions of the poisson model.
# Standard errors are higher, so the model must be pruned down to reflect that we probably overfit
# before.
model3 <- zeroinfl(formula = post_fin_resets ~ avg_TCP_delta + avg_TCP_delta:avg_iRTT +
avg_iRTT + avg_WindowUpdates + avg_DupAcks +
avg_Retransmissions + avg_ack_RTT | avg_TCP_delta + avg_ack_RTT + avg_ack_RTT +
avg_KAs + avg_TCP_delta:avg_KAs + avg_DupAcks, dist = "negbin", data = tcp_data)
print(summary(model3))
|
9273260447523400b1cfcbdd12cf024a6d6abbd2 | 1c81330dcc0ce4982319859ffc0147fc4c8d504f | /loadTheme.R | b0fca2875c9060a7e16b27e68a8809b1721afe43 | [] | no_license | eioe/0303_INCASI | b2babc50d39e9251d1d299fd40e0f1f7d875544b | 689a4f15e29d240a435e4f9676550f6b6ab27044 | refs/heads/master | 2021-04-03T08:33:50.274672 | 2018-03-08T18:42:01 | 2018-03-08T18:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,030 | r | loadTheme.R | # Define mytheme
fontsize = 11
mytheme <- theme_bw() + theme(legend.position="none") +
# Set information about ticks
theme(axis.ticks=element_line(size=0.2358491)) +
theme(axis.ticks.length=unit(0.05,"cm")) +
# Remove all pre-defined lines
theme(panel.grid.major=element_blank()) +
theme(panel.grid.minor=element_blank()) +
theme(panel.background=element_blank()) +
theme(panel.border=element_blank()) +
theme(plot.background=element_blank()) +
# Determine style of box
theme(axis.line = element_line(color= "black",size=0.2358491)) + #results in 0.5pt
# Determine font size of axes
theme(text = element_text(size=fontsize)) +
theme(axis.title.y=element_text(vjust=0.3,size=fontsize)) +
theme(axis.title.x=element_text(vjust=0.3,size=fontsize)) +
theme(axis.text.x = element_text(size= fontsize)) +
theme(axis.text.y = element_text(size= fontsize)) +
theme(strip.text.x = element_text(size= fontsize)) +
theme(strip.text.y = element_text(size= fontsize))
theme(strip.background=element_blank())
|
f81e0bfcb442606e9835cb2c5ca86fea2b8e94d5 | 6249a9849904a3d584ffd4b4b0fd602d842a8d57 | /shinylayouts/shinythemes demo/server.R | aead9c6c1bfedd728f014ef1dde2f10f995ce09e | [] | no_license | kumar-sameer/R-Shinyapp-Tutorial | 737da46ffeb48db27326ed4876f58340e595d7c0 | 2d30b20212c1f1bf19f96ba8b319b110344fb816 | refs/heads/master | 2020-03-18T10:29:30.453843 | 2018-05-22T08:24:53 | 2018-05-22T08:24:53 | 134,615,528 | 0 | 1 | null | 2018-05-23T19:19:48 | 2018-05-23T19:19:48 | null | UTF-8 | R | false | false | 301 | r | server.R | library(shiny)
shinyServer(function(input, output, session){
dat <- reactive({
switch(input$dataset,
m = mtcars,
p = pressure)
})
output$table <- renderTable({
head(dat(), input$n)
})
output$summary <- renderPrint({
summary(dat())
})
})
|
40782fed2057379d2ed532436090af196234c3ac | 5aa95a422c7af9d310bdf3230f92c73968ea8119 | /man/revpairs.Rd | 2782be57539a11d4ce1bbdf1b068a2af8227d4f2 | [] | no_license | robert19960424/IndGOterm | d28d07eb4a46015f29a00e635c95eb13ed9cb0f4 | 4ceb1a6dd44c3cbe2f803421702d095251fd7bd2 | refs/heads/master | 2023-03-06T02:03:01.140347 | 2021-02-04T15:30:45 | 2021-02-04T15:30:45 | 335,992,001 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,006 | rd | revpairs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/revpairs.R
\name{revpairs}
\alias{revpairs}
\title{get reverse pairs from all the tumor samples}
\usage{
revpairs(
stable.pair,
patients,
threshold = 0.05,
spairs_threshold = 0.99,
threads = 1L,
capacity = 300000L
)
}
\arguments{
\item{stable.pair}{a matrix or data.frame of stable pairs from normal samples with two columns, the expression of the genes in the first column is higher than that in the second column.}
\item{patients}{a matrix of data.frame of tumor samples , the first column must be the geneID ,and tumor samples start with the second column.}
\item{threshold}{a numeric value which is used to control false discovery rate under the p_value of the chip-square test or fisher's exact probability test , default is 0.05.}
\item{spairs_threshold}{a threshold same with the "threshold" in function "spairs".}
\item{threads}{an integer value to make sure how many threads you will use to complete the computation}
\item{capacity}{an integer value to depict the computation capacity, ruling how many lines of stable pairs would be computed within one time.the default is 300000}
}
\value{
a matrix containing four columns respectively represent higher expression gene , lower expression gene , p_value under binomial distribution , false discovery rate under p.adjust
}
\description{
the function is used to get reverse gene pairs from a host of disease samples.
this function support parallel computation . You need to set thread numbers to make sure how many threads do you need to use .
}
\examples{
stable.pair<-t(combn(sample(1:10,10),2));
geneid<-1:10;
samples<-runif(100,min = 0,max = 50);
patients<-matrix(c(geneid,samples),nrow = 10,byrow=F);
reverse_pairs<-revpairs(stable.pair,patients,threshold=0.05,spairs_threshold=0.99,threads=1L,capacity=300000L)
#compute with parallel
reverse_pairs<-revpairs(stable.pair,patients,threshold=0.05,spairs_threshold=0.99,threads=10L,capacity=300000L)
}
|
5b2c90bd6a966f3f185a389b92e0f61e4d9551f0 | 81c5ec43a8b8712dc540edb3eafdc1b0efbc42f4 | /Rproject/esem_analysis_ver2.R | 8a8467fe4205ee45a788b5eaf18b5fdc640e27e9 | [] | no_license | bellmit/dependency-replication-package | 26157f5518bed1416a7f09205eaf620001486609 | 6969ab06fb379431a468697854324eed19e21e92 | refs/heads/master | 2023-08-28T08:36:19.500705 | 2021-11-04T22:40:01 | 2021-11-04T22:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,165 | r | esem_analysis_ver2.R | ##--- Load packages ---##
list.of.packages <-
c("ggplot2", "lubridate", "dplyr", "corrplot", "tidyverse", "flipTime", "remotes")
# list any missing packages
new.packages <-
list.of.packages[!(list.of.packages %in% installed.packages()[, "Package"])]
# if packages missing --> install
if (length(new.packages) > 0) {
install.packages(new.packages, dependencies = TRUE)
}
remotes::install_github("Displayr/flipTime", force = T)
# load all packages
lapply(list.of.packages, require, character.only = TRUE)
##--- Load data ---##
dataset<-read.csv("Data/repos_dep_0630.csv", header = T)[c(1:3419),]
type<-read.csv("Data/repos_type_new.csv", header = T)
##--- Transform date columns ---##
dataset$created_at<-as.character(dataset$created_at)
dataset$created_at<-AsDate(dataset$created_at, us.format = T)
dataset$updated_at<-as.character(dataset$updated_at)
dataset$updated_at<-AsDate(dataset$updated_at, us.format = T)
dataset$last_commit_date<-as.character(dataset$last_commit_date)
dataset$last_commit_date<-AsDate(dataset$last_commit_date, us.format = T)
##--- Transform dependencies column ---##
dataset$dependencies<-as.character(dataset$dependencies)
|
2572e9dbb3870263df00dc723464e7b7d2c4aa62 | b961b2734d9c1ddea03be0d9d3d4c2be909cd76f | /Ford_Fukelson.R | 06a4f170a491b62cd098850860cff95704ef9b67 | [] | no_license | vobon/Implementation-sous-R-Algos-Recherche-Operationelle | bbaddc8fc3b165c3a8de07b37975e7c5736b57bf | 871a63a4cc74c133c3686a7af2e80966a6d975b1 | refs/heads/main | 2023-04-30T14:00:53.074480 | 2021-05-10T18:03:43 | 2021-05-10T18:03:43 | 366,132,812 | 0 | 0 | null | null | null | null | IBM852 | R | false | false | 2,147 | r | Ford_Fukelson.R |
#Tableau des sommets
X=1:7
#Matrice d'adjacence du reseau avec ses capacitÚs
A=rbind(c(0,5,8,0,0,0,0),c(0,0,0,4,2,0,0),c(0,0,0,0,5,2,0),
c(0,0,0,0,0,0,7),c(0,0,0,0,0,0,3),c(0,0,0,0,0,0,3),c(0,0,0,0,0,0,0))
#Matrice des flots realisables
P=matrix(0,nrow=length(X),ncol=length(X))
#Matrice de la marque m
m= matrix(NA,nrow =length(X),ncol =3)
#definition Flotmax
Ford_Fukelson = function(X,A,P,m)
{
#definition du Flotmax
Flotmax = 0
#definition de alphaj
alphaj = 0
#definition de l'infinit
inf = 50000
#L'ensemble S
S = vector()
s = 1
p = 7
m[s,] = c(NA,inf,1)
S = append(S,s)
#permet d'obtenir la position des arcs ou le flot est diff de la capacite de l'arc
R1=A-P>0
#permet d'obtenir la position des arcs ou le flot de j a i est sup a zero
R2=t(P)>0
#l'union de R1 et R2
C=R1|R2
#Permet d'avoir S barre
Sb=setdiff(X,S)
#renvoi la position des arcs respectant les conditions de la boucle
Cnd=which(matrix(C[S,Sb]==TRUE,nrow=length(S),ncol=length(Sb)),arr.ind=TRUE)
while(length(Cnd)>0)
{
x = S[Cnd[1,1]]
y = Sb[Cnd[1,2]]
if(R1[x,y]==TRUE)
{
V = A[x,y]-P[x,y]
alphaj = min(c(m[x,2],V))
m[y,] = c(x,alphaj,1)
}
else if(R2[x,y]==TRUE){
V = P[y,x]
alphaj = min(c(m[x,2],V))
m[y,] = c(x,alphaj,-1)
}
S = append(S,y)
if(y == p){
Flotmax = Flotmax + alphaj
break
}
Sb=setdiff(X,S)
Cnd=which(matrix(C[S,Sb]==TRUE,nrow=length(S),ncol=length(Sb)),arr.ind=TRUE)
}
if(is.element(p,S))
{
y = p
x = m[y,1]
while(y != s)
{
if(m[y,3]==1)
{
P[x,y] = P[x,y] + m[p,2]
}
else if(m[y,3]==-1)
{
P[x,y] = P[x,y] - m[p,2]
}
y = m[y,1]
x = m[y,1]
}
Flotmax = Flotmax + Ford_Fukelson(X,A,P,m)
}
else
{
print(P)
print(m)
return(Flotmax)
}
}
valflotMax = Ford_Fukelson(X,A,P,m)
print(paste("Le flot de valeur max obtenu par l'algo de Ford Fukelson est : ",valflotMax))
|
963dc08632f3bd673536c9acd4fa10774d7c0346 | 983faccc7fd0d4184fc8657cb6b0d0e5c4100e84 | /scripts/4_model/regressionMultivariateLassoByGroup.R | fd0ed33ada59ffe0aabace5f59c1e707912243f1 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | ldecicco-USGS/GLPF | 668137679a9765d562312515ca32eba037ea8bb2 | 3a36dc344a92205ce65e620806e97191722bf72e | refs/heads/master | 2020-12-25T16:57:37.364703 | 2019-06-05T19:20:38 | 2019-06-05T19:20:38 | 59,784,854 | 0 | 1 | null | 2017-05-23T14:17:31 | 2016-05-26T21:37:49 | R | UTF-8 | R | false | false | 11,637 | r | regressionMultivariateLassoByGroup.R | # Next steps:
# consider refitting with lm() and examining slopes by event
# consider automated way to keep track of slopes for individual events within group regressions
# -maybe use median regression for this.
# Heuristic overlap analysis: decision tree
library(glmnet)
library(dplyr)
library(RColorBrewer)
library(parallel)
library(doParallel)
#set up parallel cores for cv.glmnet
# Calculate the number of cores
no_cores <- detectCores() - 1
# Register parallel backend
registerDoParallel(no_cores)
#setwd("D:/SRCData/Git/GLPF")
source("na.info.R")
df.orig <- readRDS("./cached_data/8_process_new_categories/rds/summary_noWW_noQA.rds")
#df.orig <- summaryDF
df <- df.orig
response <- c("lachno","bacHum")
df <- df[-which(is.na(df$lachno)),]
beginIV <- "Sag240_255"
endIV <- "rBS44_S45_BF"
begin <- which(names(df)==beginIV)
end <- which(names(df)==endIV)
IVs <- names(df)[begin:end]
na.info.list <- na.info(df[,-dim(df)[2]],first.col = beginIV)
rmRows <- unique(c(which(df$CAGRnumber %in% na.info.list$na.rows),
na.info.list$nan.rows,
na.info.list$inf.rows))
rmCols <- unique(which(names(df) %in% c(na.info.list$na.cols.partial,
na.info.list$nan.cols,
na.info.list$inf.cols)))
dfrmCols <- df[,-rmCols]
dfRmRows <- df[rmRows,]
df <- df[,-rmCols]
beginIV <- "Sag240_255"
endIV <- "rBS44_S45_BF"
begin <- which(names(df)==beginIV)
end <- which(names(df)==endIV)
IVs <- names(df)[begin:end]
groupFreq <- table(df$eventGroup2)
groups <- names(groupFreq)[which(groupFreq>21)]
mg.List <- list()
mg.cv.List <- list()
lambdaType <- "lambda.min"
filenm <- "MVLassoByGroupLminFull2.pdf"
pdf(filenm)
modelCoefList <- list()
for(i in 1:length(groups)){
subdf <- df[which(df$eventGroup2==groups[i]),]
IVs <- names(df)[begin:end]
#subdf <- df
foldID <- as.numeric(as.factor(subdf$eventNum))
events <- unique(subdf$eventNum)
# # Add events as separate dichotomous IVs
# if(length(events)>1){
# # eventDF <- data.frame(E1 = ifelse(subdf$eventNum == events[2],1,0))
# for(j in 2:length(events)){
# if(j==2)eventDF <- as.data.frame(ifelse(subdf$eventNum == events[j],1,0))
# else eventDF <- cbind(eventDF,ifelse(subdf$eventNum == events[j],1,0))
# }
# names(eventDF) <- events[-1]
# subdf <- cbind(subdf,eventDF)
# IVs <- c(IVs,names(eventDF))
# }
y <- log10(as.matrix(subdf[,response]))
x <- as.matrix(subdf[IVs])
#If more than 2 events included in group, use event as fold ID, otherwise, use 5-fold XV
if(length(unique(foldID))>2){
mg.cv <- cv.glmnet(x=x, y=y,family="mgaussian",alpha=1,foldid = foldID,parallel = TRUE)
mg <- glmnet(x=x, y=y,family="mgaussian", alpha=1)
}else{
mg.cv <- cv.glmnet(x=x, y=y,family="mgaussian",alpha=1,nfolds=5,parallel = TRUE)
mg <- glmnet(x=x, y=y,family="mgaussian", alpha=1)
}
#Extract Coefficients from cv-determined model using lambda.1se
if(lambdaType == "lambda.1se"){
Coefficients <- coef(mg, s = mg.cv$lambda.1se)
Active.Index <- which(Coefficients[[1]] != 0)
Active.Coefficients <- Coefficients[[1]][Active.Index];Active.Coefficients
Active.Coef.names <- row.names(Coefficients[[1]])[Active.Index];Active.Coef.names
}
#Extract Coefficients from cv-determined model using lambda.min
if(lambdaType == "lambda.min"){
Coefficients <- coef(mg, s = mg.cv$lambda.min)
Active.Index <- which(Coefficients[[1]] != 0)
Active.Coefficients <- Coefficients[[1]][Active.Index];Active.Coefficients
Active.Coef.names <- row.names(Coefficients[[1]])[Active.Index];Active.Coef.names
}
modelCoefList[[i]] <- Active.Coef.names[-1]
#Plot cross validated errors and other model results
plot(mg.cv)
predictions <- predict(mg.cv,newx=as.matrix(subdf[,IVs]),s=lambdaType,type = "response")
plotpch <- 20
colorOptions <- brewer.pal(9, "Set1")
plotCol <- colorOptions[1:length(events)]
names(plotCol) <- events
plotcolors <- plotCol[subdf$eventNum]
par(mfcol=c(2,1),mar=c(3,4,3,1),oma=c(0,2,0,4))
#Plot Lachno
plot(subdf[,response[1]],predictions[,1,1],col=plotcolors,pch=plotpch,log='x',xlab="",ylab="")
mtext(response[1],line=1)
mtext(paste(Active.Coef.names[-1],collapse=' + '),cex=0.7)
mtext(groups[i],line=2,font=2)
#Plot bacHum
plot(subdf[,response[2]],predictions[,2,1],col=plotcolors,pch=plotpch,log='x',xlab="",ylab="")
mtext("Predicted",side=2,line=-2,font=2,xpd=NA,outer=TRUE)
mtext("Observed",side=1,line=2,font=2)
mtext(response[2],line=1)
legendNames <- names(plotCol)
legend('bottomright',legend = legendNames,col=plotCol,pch=plotpch,inset = c(-0.15,0),bty = "n",xpd=NA)
# calibrate Tobit regression and plot
library(survival)
IVs <- Active.Coef.names[-1]
response <- response
LOQ <- 225
## Compute survival coefficients for Lachno regression ##
if(length(IVs) > 0){
y <- Surv(log10(subdf[,response[1]]), subdf[,response[1]]>LOQ, type="left")
#dfPredStd <- as.data.frame(scale(dfPred[,IVs]))
form <- formula(paste('y ~',paste(IVs,collapse=' + ')))
msurvStd <- survreg(form,data=subdf,dist='weibull')
summary(msurvStd)
predictions <- predict(msurvStd,newdata = subdf)
par(mfcol=c(2,1),mar=c(3,4,3,1),oma=c(0,2,0,4))
#Plot Lachno
plot(subdf[,response[1]],predictions,col=plotcolors,pch=plotpch,log='x',xlab="",ylab="")
mtext(paste(response[1],"Survival"),line=1)
mtext(paste(Active.Coef.names[-1],collapse=' + '),cex=0.7)
mtext(groups[i],line=2,font=2)
abline(h=4,v=10000,col="blue",lty=2)
## Compute survival coefficients for Lachno regression ##
y <- Surv(log10(subdf[,response[2]]), subdf[,response[2]]>LOQ, type="left")
#dfPredStd <- as.data.frame(scale(dfPred[,IVs]))
form <- formula(paste('y ~',paste(IVs,collapse=' + ')))
msurvStd <- survreg(form,data=subdf,dist='weibull')
summary(msurvStd)
predictions <- predict(msurvStd,newdata = subdf)
#Plot BacHum
plot(subdf[,response[2]],predictions,col=plotcolors,pch=plotpch,log='x',xlab="",ylab="")
mtext(paste(response[2],"Survival"),line=1)
mtext(groups[i],line=2,font=2)
abline(h=4,v=10000,col="blue",lty=2)
}else{
par(mfrow=c(1,1))
plot(1:10,1:10,xaxt="n",yaxt="n",ylab="",xlab="",pch="")
text(5,5,"No Lasso Variables")
}
mg.List[[i]] <- mg
mg.cv.List[[i]] <- mg.cv
#----------------------------------------------------------------------------------
# calibrate model with all data from individual group model
subdf <- df
y <- log10(as.matrix(subdf[,response]))
x <- as.matrix(subdf[Active.Coef.names[-1]])
foldIDs <- as.numeric(as.factor(subdf$eventNum))
which(table(foldIDs)<10)
if(length(modelCoefList[[i]]) > 1) {
mg.cv <- cv.glmnet(x=x, y=y,family="mgaussian",alpha=1,foldid = foldIDs,parallel = TRUE)
mg <- glmnet(x=x, y=y,family="mgaussian", alpha=1)
#Extract Coefficients from cv-determined model using lambda.1se
if(lambdaType == "lambda.1se"){
Coefficients <- coef(mg, s = mg.cv$lambda.1se)
Active.Index <- which(Coefficients[[1]] != 0)
Active.Coefficients <- Coefficients[[1]][Active.Index];Active.Coefficients
Active.Coef.names <- row.names(Coefficients[[1]])[Active.Index];Active.Coef.names
}
#Extract Coefficients from cv-determined model using lambda.min
if(lambdaType == "lambda.min"){
Coefficients <- coef(mg, s = mg.cv$lambda.min)
Active.Index <- which(Coefficients[[1]] != 0)
Active.Coefficients <- Coefficients[[1]][Active.Index];Active.Coefficients
Active.Coef.names <- row.names(Coefficients[[1]])[Active.Index];Active.Coef.names
}
#Plot cross validated errors and other model results
plot(mg.cv)
predictions <- predict(mg.cv,newx=x,s=lambdaType,type = "response")
plotpch <- 20
colorOptions <- brewer.pal(9, "Set1")
plotCol <- colorOptions[1:length(events)]
names(plotCol) <- events
plotcolors <- "grey"
# plotCol[subdf$eventNum]
par(mfcol=c(2,1),mar=c(3,4,3,1),oma=c(0,2,0,4))
#Plot Lachno
plot(subdf[,response[1]],predictions[,1,1],col=plotcolors,pch=plotpch,log='x',xlab="",ylab="")
mtext(response[1],line=1)
mtext(paste(Active.Coef.names[-1],collapse=' + '),cex=0.7)
mtext(groups[i],line=2,font=2)
#Plot bacHum
plot(subdf[,response[2]],predictions[,2,1],col=plotcolors,pch=plotpch,log='x',xlab="",ylab="")
mtext("Predicted",side=2,line=-2,font=2,xpd=NA,outer=TRUE)
mtext("Observed",side=1,line=2,font=2)
mtext(response[2],line=1)
legendNames <- names(plotCol)
legend('bottomright',legend = legendNames,col=plotCol,pch=plotpch,inset = c(-0.15,0),bty = "n",xpd=NA)
}
}
dev.off()
shell.exec(filenm)
###------------------------------------------------------------------------
#Plot with all data by group and then by event
plotAll <- TRUE
lambdaType <- "lambda.min"
if(plotAll) {
filenm <- "GroupLassoByEvent.pdf"
pdf(filenm)
subdf <- df
y <- log10(as.matrix(subdf[,response]))
x <- as.matrix(subdf[,IVs])
events <- unique(subdf$eventNum)
for(i in 1:length(groups)){
mg <- mg.List[[i]]
mg.cv <- mg.cv.List[[i]]
#Extract Coefficients from cv-determined model using lambda.1se
if(lambdaType == "lambda.1se"){
Coefficients <- coef(mg, s = mg.cv$lambda.1se)
Active.Index <- which(Coefficients[[1]] != 0)
Active.Coefficients <- Coefficients[[1]][Active.Index];Active.Coefficients
Active.Coef.names <- row.names(Coefficients[[1]])[Active.Index];Active.Coef.names
}
#Extract Coefficients from cv-determined model using lambda.min
if(lambdaType == "lambda.min"){
Coefficients <- coef(mg, s = mg.cv$lambda.min)
Active.Index <- which(Coefficients[[1]] != 0)
Active.Coefficients <- Coefficients[[1]][Active.Index];Active.Coefficients
Active.Coef.names <- row.names(Coefficients[[1]])[Active.Index];Active.Coef.names
}
predictions <- predict(mg.cv,newx=x,s=lambdaType,type = "response")
for(j in 1:length(events)){
event <- events[j]
plotpch <- 20
# colorOptions <- brewer.pal(9, "Set1")
#
# plotCol <- colorOptions[1:length(events)]
# names(plotCol) <- events
plotcolors <- "grey"
eventcolor <- ifelse(subdf$eventNum==event,"blue",NA)
# plotCol[subdf$eventNum]
ylim <- range(predictions[,1,1])
ylim[1] <- ifelse(ylim[1] < 0,0,ylim[1])
ylim[2] <- ifelse(ylim[2] > 8,8,ylim[2])
par(mfcol=c(2,1),mar=c(3,4,3,1),oma=c(0,2,0,4))
#Plot Lachno
plot(subdf[,response[1]],predictions[,1,1],col=plotcolors,pch=plotpch,log='x',
xlab="",ylab="",ylim=ylim)
points(subdf[,response[1]],predictions[,1,1],col=eventcolor,pch=plotpch)
mtext(response[1],line=1)
mtext(paste(Active.Coef.names[-1],collapse=' + '),cex=0.7)
mtext(paste(groups[i],";",event),line=2,font=2)
#Plot bacHum
ylim <- range(predictions[,2,1])
ylim[1] <- ifelse(ylim[1] < 0,0,ylim[1])
ylim[2] <- ifelse(ylim[2] > 8,8,ylim[2])
plot(subdf[,response[2]],predictions[,2,1],col=plotcolors,pch=plotpch,log='x',
xlab="",ylab="",ylim = ylim)
points(subdf[,response[2]],predictions[,2,1],col=eventcolor,pch=plotpch)
mtext("Predicted",side=2,line=-2,font=2,xpd=NA,outer=TRUE)
mtext("Observed",side=1,line=2,font=2)
mtext(response[2],line=1)
legendNames <- names(plotCol)
legend('bottomright',legend = legendNames,col=plotCol,pch=plotpch,inset = c(-0.15,0),bty = "n",xpd=NA)
}
}
}
dev.off()
shell.exec(filenm)
|
8a5515587717f661d137f28fe9e6b4dc9c439bda | f2643256c6611d7de0db96d162f594388c2c2c50 | /analyses/Field_Monitoring/duplicates.R | 09dd8d3ab6756d94382d410c797b169f383c6236 | [] | no_license | raubreywhite/trial_dofiles | e06a5b3b39e9195eda79dd33856d67c918ec4053 | eface3b83b107cf7e621b3c654e65b5cbd45b711 | refs/heads/master | 2022-06-14T03:26:17.492945 | 2022-06-02T07:27:04 | 2022-06-02T07:27:04 | 114,857,557 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,123 | r | duplicates.R |
###### SETUP STARTS ######
setwd("C:/data processing/trial_dofiles")
fileSources = file.path("r_code", list.files("r_code", pattern = "*.[rR]$"))
fileSources=file.path(getwd(),fileSources)
sapply(fileSources, debugSource)
Setup(IS_GAZA=FALSE)
###### SETUP ENDS ######
###### LOAD DATA ######
d <- LoadDataFileFromNetwork()
# duplicate bookings or files
# time difference between bookdates for multiple bookings to see differences
setorder(d,booknum)
d[,bookdateprevious:=shift(bookdate,n=1L),by=motheridno]
d[,diftimebook:=difftime(bookdate,
bookdateprevious,
units="days")]
xtabs(~d[ident_dhis2_booking==T]$diftimebook, addNA=T)
xtabs(~d[booknum==1]$diftimebook, addNA=T)
d[,fullname:=stringr::str_c(firstname,
' ',
fathersname)]
d[,fullname:=stringr::str_c(fullname,
' ',
middlename)]
d[,fullname:=stringr::str_c(fullname,
' ',
familyname1)]
d[,fullname:=stringr::str_c(fullname,
' ',
familyname2)]
d[, fullname:=stringr::str_replace(fullname," *"," ")]
duplicates <- d[,c("motheridno",
"fullname",
"familyname1",
"familyname2",
"ident_dhis2_booking",
"ident_dhis2_ppc",
"bookdate",
"booknum",
"diftimebook",
"bookorgname",
"bookorgdistrict")]
xtabs(~duplicates$diftimebook)
setorder(duplicates, bookorgdistrict,bookorgname,fullname, motheridno)
duplicates[,fullnamenum:=1:.N, by=fullname]
xtabs(~duplicates$fullnamenum)
xtabs(~duplicates$booknum, addNA=TRUE)
# duplicates[,halfnamenum:=1:.N, by=halfname]
# xtabs(~duplicates$halfnamenum)
# xtabs(~duplicates$halfnamenum, addNA=TRUE)
openxlsx::write.xlsx(unique(duplicates),file.path(FOLDER_DATA_RESULTS,
"quality_control",
sprintf("duplicates_%s.xlsx", lubridate::today())))
################
# demographics #
################
#
# demo <- fread(fs::path(FOLDER_DATA_RAW,
# "e.reg-intervention",
# "2021-08-12",
# "Clinical Demographics.csv"))
#
#
demo <- fread("C:/data processing/data_raw/e.reg-intervention/2021-08-12/Clinical Demographics.csv",
encoding="UTF-8")
nrow(demo)
for (i in names(demo)) setnames(demo, i, ExtractOnlyEnglishLettersAndNumbers(i)[[1]])
setnames(demo,"instance","uniqueid")
setnames(demo,"created","datecreated")
setnames(demo,"lastupdated","dateupdated")
setnames(demo,"organisationunit","demoorgunit")
setnames(demo,"organisationunitname","demoorgname")
# if(badname %in% names(d)) setnames(demo, badname, goodname)
if("trackedentitytype" %in% names(d)) setnames(demo, "trackedentitytype", "trackedentity")
# if(!goodname %in% names(d)) ERROR!!
if(!"trackedentity" %in% names(demo)) stop("cant find trackedentity")
setnames(demo,"inactive","dummy")
setnames(demo,"identificationdocumenttype","idtype")
if(!"identificationdocumentnumber" %in% names(demo)){
warning("no identification document number -- we create one")
demo[,identificationdocumentnumber:=1:.N]
}
setnames(demo,"identificationdocumentnumber","demoidnumber")
#setnames(demo,"areyouwillingtoreceivesmstextmessagesandremindersaboutyourvisits", "doyouwanttoreceivesms")
xtabs(~demo$identificationdocumentnumber)
demo[,datecreated:=stringr::str_extract(datecreated, "^.{10}")]
xtabs(~demo$datecreated)
demo[, datecreated:=as.Date(datecreated)]
xtabs(~demo$datecreated)
xtabs(~demo$datecreated)
str(demo$datecreated)
demo[,idnonum:=.N, by=demoidnumber]
xtabs(~demo[idnonum>1]$demoidnumber, addNA=T)
# number of digits (9, 8, 10)
demo[,idnumdigits:=nchar(as.integer(demoidnumber))]
xtabs(~demo$idnumdigits, addNA=T)
demo[,yearcreated:=lubridate::year(datecreated)]
xtabs(~demo$yearcreated)
demo[,demoorgname:=ExtractOnlyEnglishLetters(demoorgname)]
xtabs(~demo$demoorgname)
demo <- demo[!demoorgname %in% c("test", "testfacility")]
#############
# full name #
#############
demo[,fullname:=stringr::str_c(firstname,
' ',
fathersname)]
demo[,fullname:=stringr::str_c(fullname,
' ',
middlename)]
demo[,fullname:=stringr::str_c(fullname,
' ',
womanfamilyname)]
demo[,fullname:=stringr::str_c(fullname,
' ',
husbandsfamilyname)]
demo[, fullname:=stringr::str_replace(fullname," *"," ")]
#############
# half name #
#############
demo[,halfname:=stringr::str_c(firstname,
' ',
fathersname)]
demo[,halfname:=stringr::str_c(halfname,
' ',
middlename)]
demo[, halfname:=stringr::str_replace(halfname," *"," ")]
demo[,numhalfname:=.N, by=c("halfname","demoorgname")]
demo[numhalfname==7, c("fullname","datecreated")]
# merge bookordistrict stuff #
sData <- as.data.table(readxl::read_excel("../data_raw/structural_data/bookorgname.xlsx"))
setnames(demo, "demoorgname","bookorgname")
dData <- merge(
demo,
sData,
by="bookorgname",
all.x=T)
# names in english and numbers
dData[, numinname:=FALSE]
dData[stringr::str_detect(fullname,"[0-9]"), numinname:=TRUE]
xtabs(~dData$numinname, addNA=T)
iddata <- dData[,c("bookorgname",
"yearcreated",
"idnumdigits",
"numinname",
"idnonum")]
iddata[,denom:=.N, by=c("bookorgname","yearcreated")]
ag <- iddata[,.(N=.N,
Num_digits_less_than_9=sum(idnumdigits<9, na.rm = T),
Num_id_digits_9=sum(idnumdigits==9, na.rm=T),
Num_id_digits_more_than_9=sum(idnumdigits>9, na.rm=T),
Numinname=sum(numinname==T, na.rm=T),
Numinnameandmultiple=sum(numinname==T &
idnonum>1, na.rm=T)),
keyby=.(yearcreated,bookorgname)]
ag[,prop_less_than_9:=round(Num_digits_less_than_9/N, digits=3)]
ag[,prop_id_digits_9:=round(Num_id_digits_9/N, digits=3)]
ag[,prop_id_digits_more_than_9:=round(Num_id_digits_more_than_9/N, digits=5)]
ag[,prop_numinname:=round(Numinname/N, digits=5)]
ag[,prop_numinnamemultipleidno:=round(Numinnameandmultiple/N, digits=5)]
tokeep <- ag[prop_id_digits_9<1.0]
openxlsx::write.xlsx(ag,file.path(FOLDER_DATA_RESULTS,
"quality_control",
"duplicates",
sprintf("id_quality_digits_%s.xlsx", lubridate::today())))
toanalyze <- dData[numhalfname>1,c("uniqueid",
"bookorgname",
"demoorgunit",
"datecreated",
"dateupdated",
"idtype",
"demoidnumber",
"firstname",
"fathersname",
"middlename",
"womanfamilyname",
"husbandsfamilyname",
"dateofbirth",
"ageatmarriage",
"ageatfirstpregnancy",
"consanguinity",
"educationinyears",
"yearcreated",
"idnonum",
"idnumdigits",
"fullname",
"halfname",
"numhalfname")]
setorder(toanalyze, halfname, yearcreated, demoorgunit)
openxlsx::write.xlsx(toanalyze,
file.path(FOLDER_DATA_RESULTS,
"quality_control",
"duplicates",
sprintf("halfnamedups_%s.xlsx",CLINIC_INTERVENTION_DATE)))
# either they have the same id number or they have a previous half name and name repeated but need to include # even if prevfile name is missing
#############
# analyses #
#############
# shift last names
toanalyze[,womanfamilynameprev:=shift(womanfamilyname,n=1L),by=c("halfname","bookorgname")]
toanalyze[,husbandsfamilynameprev:=shift(husbandsfamilyname,n=1L),by=c("halfname","bookorgname")]
toanalyze[,newhalfname:=halfname]
toanalyze[,halfnameprev:=shift(newhalfname,n=1L),by=c("halfname","bookorgname")]
toanalyze[,newhalfname:=NULL]
toanalyze[halfnameprev==halfname,prevfilesame:=1]
xtabs(~toanalyze$prevfilesame, addNA=T)
toanalyze[halfnameprev==halfname &
womanfamilynameprev==womanfamilynameprev,prevfilesame:=2]
xtabs(~toanalyze$prevfilesame, addNA=T)
toanalyze[halfnameprev==halfname &
husbandsfamilynameprev==husbandsfamilyname,prevfilesame:=3]
xtabs(~toanalyze$prevfilesame, addNA=T)
toanalyze[halfnameprev==halfname &
womanfamilynameprev==womanfamilynameprev &
husbandsfamilynameprev==husbandsfamilyname,prevfilesame:=4]
xtabs(~toanalyze$prevfilesame, addNA=T)
# differnces in id numbers for these cases
setorder(toanalyze,bookorgname,halfname,demoorgname,yearcreated)
toanalyze[!is.na(prevfilesame),yearcreatedbefore:=shift(yearcreated,n=1L),by=c("halfname","bookorgname")]
# shift year created to get difference
toanalyze[,yearDif:=as.numeric(yearcreated-yearcreatedbefore)]
xtabs(~toanalyze$yearDif, addNA=T)
# possible multiple names #
##################
# multiple fields #
##################
setorder(demo,demoorgname, halfname, womanfamilyname,husbandsfamilyname,demoidnumber, yearcreated)
demo[,numhalfname:=.N, by=c("halfname","demoorgname")]
xtabs(~demo$numhalfname)
|
379be78cab28e2d97315642dd9c2af99a7f7aad6 | 9c226a1557a08165e08bc660150a4e20868efbf7 | /R/print.R | c48936c3c44463c7170d5cf078cf76f7b1f70101 | [] | no_license | cran/ExhaustiveSearch | 55dfc7c515a98079400fabfcd79e1937717103c8 | 0215018c8769856fd9fb6aa4e30716f026bf7622 | refs/heads/master | 2023-02-24T13:47:10.245395 | 2021-01-18T16:00:11 | 2021-01-18T16:00:11 | 334,106,119 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,106 | r | print.R |
formatSecTime = function(sec) {
days = sec / 60 / 60 / 24
hour = (sec / 60 / 60) %% 24
min = (sec / 60) %% 60
osec = sec %% 60
paste(paste0(sprintf("%02d", floor(c(days, hour, min, osec))),
c("d ", "h ", "m ", "s")), collapse = "")
}
#' Print ExhaustiveSearch
#'
#' Prints a compact summary of the results of an ExhaustiveSearch object.
#'
#' @param x Object of class 'ExhaustiveSearch'.
#' @param ... Further arguments passed to or from other methods.
#'
#' @return No return value. The function is only called to print results to the
#' console.
#'
#' @author Rudolf Jagdhuber
#'
#' @seealso [ExhaustiveSearch()]
#'
#' @importFrom utils capture.output
#' @export
print.ExhaustiveSearch = function(x, ...) {
evalOn = ifelse(x$setup$nTest == 0,
paste0("training set (n = ", format(x$setup$nTrain, big.mark = ","), ")\n"),
paste0("test set (n = ", format(x$setup$nTest, big.mark = ","), ")\n"))
cat("\n+-------------------------------------------------+")
cat("\n| Exhaustive Search Results |")
cat("\n+-------------------------------------------------+\n")
cat("Model family: ", x$setup$family, "\n")
cat("Intercept: ", x$setup$intercept, "\n")
cat("Performance measure: ", x$setup$performanceMeasure, "\n")
cat("Models fitted on: ", " training set (n = ", x$setup$nTrain, ")\n",
sep = "")
cat("Models evaluated on: ", evalOn)
cat("Models evaluated: ", format(x$nModels, big.mark = ","),
ifelse(x$evaluatedModels != x$nModels, " (Incomplete!)", ""), "\n")
cat("Models saved: ", format(x$setup$nResults, big.mark = ","), "\n")
cat("Total runtime: ", formatSecTime(x$runtimeSec), "\n")
cat("Number of threads: ", x$batchInfo$nBatches, "\n")
cat("\n+-------------------------------------------------+")
cat("\n| Top Feature Sets |")
cat("\n+-------------------------------------------------+\n")
cat(paste(capture.output(resultTable(x, 5, " ")), collapse = "\n"), "\n\n")
}
|
4c2aed07feddc067e97d25e68cda7c2aa12c8dfd | b471305a84799b631091683dda7b2a8f4ab2fe4f | /R/TwilightFree.R | 290f1e048b8fa088cfd509cd4f3377ca1deab79b | [] | no_license | ABindoff/TwilightFree | 3c168f099b8119cb05aafcf63ce7820339951415 | d65928df5367077c4abbb89f37f10c950f8d9bc0 | refs/heads/master | 2021-05-15T06:33:46.454401 | 2021-02-24T10:26:09 | 2021-02-24T10:26:09 | 113,830,244 | 9 | 1 | null | 2021-01-14T23:57:10 | 2017-12-11T08:05:52 | HTML | UTF-8 | R | false | false | 4,838 | r | TwilightFree.R | require(SGAT)
require(raster)
#' Specify a model for forwards-backwards estimation
#'
#' @param df data.frame containing `Light`, `Date`, and optionally `Temp` data
#' @param alpha hyperparameters for the noise (shading) assumption
#' @param beta hyperparameters for the movement assumption
#' @param dt optional parameter specifying the number of seconds in a segment (day)
#' @param threshold tag-specific value for luminance at twilight (obtained by calibration)
#' @param zenith solar zenith angle at twilight
#' @param deployed.at deployment location c(lon, lat) for first day of observation
#' @param retrieved.at retrieval location c(lon, lat) for last day of observation
#' @param fixd optional data.frame of fixed (known) locations containing `Date`, `Lon`, `Lat` (will overwrite deployed.at and retrieved.at locations if != NULL)
#' @param sst raster of SST data from NOAA OI SST
#' @importFrom stats dgamma dnorm median
#' @importFrom raster extract getZ
#' @export
#' @return a TwilightFree model object which can be fitted using SGAT::essie()
TwilightFree <- function(df,
alpha = c(1, 1/10),
beta = c(1, 1/4),
dt = NULL,
threshold = 5,
zenith = 96,
deployed.at = F,
retrieved.at = F,
fixd = NULL,
sst = NULL){
if(is.null(df$Temp)){ ## fix bug, needs a $Temp column even if it's NA
df$Temp <- NA
}
# Define segment by date
seg <- floor((as.numeric(df$Date)- as.numeric(min(df$Date)))/(24*60*60))
# Split into `slices`
slices <- split(df,seg)
slices <- slices[-c(1,length(slices))]
# find min date in each slice
dmin <- c()
for (i in 1:length(slices[])) {
dmin[i] <- min(slices[[i]]$Date)
}
dmin <- strptime(as.POSIXct(dmin, "GMT", origin = "1970-01-01"),
"%Y-%m-%d",
"GMT")
## sst raster from ncdf file at
# https://www.esrl.noaa.gov/psd/repository/
# (NOAA OI SST -> Weekly and Monthly -> sst.wkmean.*)
indices <- NA
if(!is.null(sst)){
indices <<- .bincode(as.POSIXct(dmin), as.POSIXct(strptime(raster::getZ(sst), "%Y-%m-%d", "GMT"), "GMT"),
right = FALSE)
}
# fixed locations, if retrieved.at and deployed.at are supplied it these will be used unless fixd != NULL
x0 <- matrix(0, length(slices), 2)
x0[1,] <- deployed.at
x0[length(slices),] <- retrieved.at
fixed <- rep_len(c(as.logical(deployed.at[1L]),
logical(length(slices)-2),
as.logical(retrieved.at[1L])),
length.out = length(slices))
# if a data.frame containing `Date` in %Y-%m-%d format, `Lon` and `Lat` is supplied these will be utilised here
if(!is.null(fixd)) {
slice_date <- lapply(slices, function(x) min(x$Date))
slice_date <- as.vector(unlist(lapply(slice_date, function(x)
as.character(strptime(x, format = "%Y-%m-%d")))))
indx <- which(slice_date %in% fixd$Date)
locs <- matrix(0, length(slices), 3)
for (i in seq_along(indx)) {
locs[indx[i], ] <- c(fixd$Lon[i], fixd$Lat[i], 1)
}
x0 <- locs[, 1:2]
fixed <- as.logical(locs[, 3])
}
## Times (hours) between observations
time <- .POSIXct(sapply(slices,
function(d) mean(d$Date)), "GMT")
if (is.null(dt))
dt <- diff(as.numeric(time) / 3600)
## Contribution to log posterior from each x location
logpk <- function(k, x) {
n <- nrow(x)
logl <- double(n)
ss <- SGAT::solar(slices[[k]]$Date)
obsDay <- (slices[[k]]$Light) >= threshold
## Loop over location
for (i in seq_len(n)) {
## Compute for each x the time series of zeniths
expDay <- SGAT::zenith(ss, x[i, 1], x[i, 2]) <= zenith
## comparison to the observed light -> is L=0 (ie logl=-Inf)
if (any(obsDay & !expDay)) {
logl[i] <- -Inf
} else {
count <- sum(expDay & !obsDay)
logl[i] <- dgamma(count, alpha[1], alpha[2], log = TRUE)
}
}
## Return sum of likelihood + prior
logl + logp0(k, x, slices)
}
## Behavioural (movement) contribution to the log posterior
logbk <- function(k, x1, x2) {
spd <- pmax.int(SGAT::gcDist(x1, x2), 1e-06) / dt[k]
dgamma(spd, beta[1L], beta[2L], log = TRUE)
}
list(
logpk = logpk,
logbk = logbk,
fixed = fixed,
x0 = x0,
time = time,
alpha = alpha,
beta = beta,
sst = sst
)
}
#' calculate SST component of log-posterior in TwilightFree model
logp0 <- function(k, x, slices) {
x[, 1] <- x[, 1] %% 360
tt <- median(slices[[k]]$Temp, na.rm = TRUE)
if (is.na(tt)) {
0
} else {
dnorm(tt, raster::extract(sst[[indices[k]]], x), 2, log = T)
}
}
|
f84e927da0d7eb35060efb87483227c0030a2f39 | 37d46d358f9289b3099aad8c5066b3e87dc9de6e | /Black Friday Sales/Bivariate.R | d3c0ab56ae7358e9df73c1fa72c49ed147b3479a | [
"MIT"
] | permissive | sachinshubhams/Black-Friday-Sales | 297a0cd8398f0f599bb1eceb534484887be9628c | eb8dcf357dd2758e55ed615235090aa7a87f8e0a | refs/heads/main | 2023-03-08T10:40:32.994828 | 2021-02-19T23:56:55 | 2021-02-19T23:56:55 | 340,280,848 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,280 | r | Bivariate.R | frow_f <- fluidRow(
box(
title = "Bivariate Chart"
,status = "primary"
,solidHeader = TRUE,background = 'purple'
,collapsible = TRUE
,plotOutput("bar1", height = "500px",width = "500px")
),
box(title = "Variables in the data",status = "primary"
,solidHeader = TRUE,background = 'aqua'
,collapsible = TRUE,selectInput("x_axis","Select the value for x-axis",colnames(data),selected = ""),
selectInput("yaxis","Select the value for y-axis",colnames(data),selected = "Purchase")
))
ui<-shinyUI(
dashboardPage(
dashboardHeader(title = "BLACK FRIDAY SALES",titleWidth = 300),
dashboardSidebar(
sidebarMenu(id = 'sidebarmenu',
menuItem("Bivariate Plots",
icon = icon('bar-chart'),
tabName = 'chart1'
))),
dashboardBody(
tabItems(
tabItem("chart1",frow_f)
)
),skin = 'red'
)
)
server <- function(input, output,session){
output$bar1<-renderPlot({
bar1<-tapply(data[,input$yaxis], list(data[,input$x_axis]), mean)
barplot(bar1,col = 'red',ylab = "Purchase Amount")
})
}
shinyApp(ui, server) |
0b6a42317df83fee5b6aabb075b84ca5a20ee223 | ecde3684aba9a09fb61bedf6b646e656a2ff2a13 | /Unit 4/qplot.R | d635d48588b400301f778234c66d66b0590417b4 | [] | no_license | AdarshMundra/R-Programing-Udemy | a565b2f2f1ab7037dc9da600614476cecba90936 | 2f1aa056e8fe75493fdaf5db618e07da52ee8794 | refs/heads/master | 2020-12-14T20:26:35.539351 | 2020-01-19T07:29:36 | 2020-01-19T07:29:36 | 234,858,674 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,159 | r | qplot.R | #code with Adarsh Mundra
a<-read.csv(file.choose())
a
getwd()
rm(a)
a<-read.csv("DemographicData.csv")
#-------------------------------------------------------------#
a
nrow(a)# number of rows
ncol(a)# number of col
head(a)# head file
head(a, n=10)# head file of 10 dataset
tail(a)# tail file
tail(a,n=10)# tail file of 10 dataset
str(a) #structrue of dataset
summary(a) #summary of a
#------------------------------------------------------ Using the $ sign
a[3,3]
a[3,"Birth.rate"]
a$Internet.users #extract whole column
a$Internet.users[2]*a$Birth.rate[2]
a$Birth.rate[2]#extract specific cell in col
#=====================QPLOT
library("ggplot2")
qplot(data = a,x=Internet.users)
qplot(data = a,x=Income.Group,y=Birth.rate)
qplot(data = a,x=Income.Group,y=Birth.rate,size =10)
qplot(data = a,x=Income.Group,y=Birth.rate,size=I(10))
qplot(data = a,x=Income.Group,y=Birth.rate,size= I(3)
,colour =I("RED "))
qplot(data = a,x=Income.Group,y=Birth.rate,geom = "boxplot")
qplot(data = a,x=Income.Group,y=Birth.rate)
qplot(data = a,x=Internet.users,y=Birth.rate,size=I(4))
qplot(data = a,y=Internet.users,x=Birth.rate,size=I(6),colour=Income.Group)
#-------------------------------------Create DataFrame
mydf<- data.frame(Countries_2012_Dataset,Codes_2012_Dataset,Regions_2012_Dataset)
mydf
head(mydf)
colnames(mydf)<-c("Country","Code","Region")
#-------------------------------------Mergging DataFrame
head(a)
merg<- merge(a,mydf,by.x = "Country.Name",by.y = "Country")
head(merg)
merg<- merge(a,mydf,by.x = "Country.Code",by.y = "Code")
merg$Country<-NULL
head(merg)
#-------------------------------------QQPLOT
qplot(data = merg,x=Internet.users,y=Birth.rate)
qplot(data = merg,x=Internet.users,y=Birth.rate,colour=Region)
#-------------------------------------Shape
#Shape
qplot(data = merg,x=Internet.users,y=Birth.rate,colour=Region,
shape = I(18),size=I(6))
#TRanperemcy
qplot(data = merg,x=Internet.users,y=Birth.rate,colour=Region,size=I(5)
, shape = I(18),alpha=.7)
#TITTLe
qplot(data = merg,x=Internet.users,y=Birth.rate,colour=Region,size=I(5)
, shape = I(18),alpha=.06,main="BirthRate vs InternetUser")
|
137448ef32487a664d5d33c5020a5c312f836ae9 | bca63e5a36852745b285c801f0f1d66d79b63933 | /R Scripts/Scripts Hisam and I Worked on/EWMA Volatility.R | ae49f82efa768274a7e58c9889ef12bfbd0f7ac8 | [] | no_license | arkagogoldey/Finance_R_Files | 17201641c1ef05715bca8889dbfe7ff64cafe357 | 3b8b1fc5dd32448c7db637fc7306a7be50453710 | refs/heads/master | 2020-04-18T02:53:16.520896 | 2018-09-25T16:43:05 | 2018-09-25T16:43:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 540 | r | EWMA Volatility.R | #EWMA (Exponentially Weighted Moving Average Vol)
library(quantmod)
getSymbols("UNP", from='2010-01-01')
head(x)
x<-UNP$UNP.Adjusted
EWMA<-function(x,lambda)
{
returns<-Delt(x,type="log")
return_sq<-returns^2
y<-as.matrix(x)
n=(1:nrow(y)-1)
z<-as.matrix(n)
weights<-(1-lambda)*lambda^z
weights<-sort(weights,decreasing=FALSE)
product<-weights*return_sq
product<-as.matrix(product)
product<-na.omit(product)
Variance<-colSums(product)
Volatility<-sqrt(Variance)
final<-cbind(Variance,Volatility)
}
a<-EWMA(x,.94)
a
|
3fd20fc1a3a3485d9f1ca5cf53e5e16142767409 | a804ccc49e485d31f1432261678618da956df6e9 | /Ad-hocs/wordcloud_comparison.R | 49dd30a42a186f591417b8ecad189e5e4b718e8b | [] | no_license | mark-me/lyric_mining | b63d45a81f3a15332980e20050038dfc2e0d267e | 9932eff4ab7abe684b48ce1765272a04133abd3c | refs/heads/master | 2020-05-21T05:43:27.929771 | 2019-05-20T14:11:54 | 2019-05-20T14:11:54 | 185,927,171 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,396 | r | wordcloud_comparison.R | df_artist_lyrics <- df_lyrics %>%
filter(!is.na(lyric)) %>%
group_by(artist) %>%
summarise(lyric = paste0(lyric, collapse = " ")) %>%
ungroup() %>%
mutate(doc_id = row_number()) %>%
dplyr::select(doc_id, text = lyric, everything())
# Make factor of artists for modelling
levels_artists <- unique(df_artist_lyrics$artist)
df_artist_lyrics$artist <- factor(df_artist_lyrics$artist, levels = levels_artists)
# Convert df_source to a corpus: df_corpus
corpus_artist_lyrics <- Corpus(VectorSource(df_artist_lyrics$text))
# Clean corpus
corpus_artist_lyrics %<>%
tm_map(removeWords, c("-", "—","“", "‘","…", "NA", "character")) %>%
tm_map(content_transformer(tolower)) %>%
tm_map(content_transformer(removeNumbers)) %>%
tm_map(content_transformer(removePunctuation)) %>%
tm_map(removeWords, stopwords("english")) %>%
tm_map(content_transformer(stripWhitespace))
tdm_lyrics <- TermDocumentMatrix(corpus_lyrics)
tdm_lyrics = as.matrix(tdm_lyrics)
colnames(tdm_lyrics) <- levels_artists
dev.new(width = 1000, height = 1000, unit = "px")
comparison.cloud(tdm_lyrics, random.order=FALSE,
colors = c("aquamarine","darkgoldenrod","tomato", "aquamarine","darkgoldenrod","tomato"),
title.colors = c("aquamarine","darkgoldenrod","tomato", "aquamarine","darkgoldenrod","tomato"),
title.size=1, max.words=300)
|
c0ded228f470e8e39aff3d14f03b5b932c0090e4 | 044970cb5b2b45ba98de18d9e3b105dc9ce79849 | /man/MLeffort.Rd | f597ecce96f578dad8b0c2e9b08df2522978cace | [] | no_license | quang-huynh/MLZ | f1d1a8f293e09e1b6b813814ca4f27ad10a4e34d | 604045a7f5e8629ad7f79e2c7b2649916864af72 | refs/heads/master | 2022-05-07T23:03:56.165755 | 2022-03-31T06:15:26 | 2022-03-31T06:16:03 | 83,587,314 | 3 | 3 | null | 2018-05-14T18:11:50 | 2017-03-01T18:29:51 | R | UTF-8 | R | false | true | 2,048 | rd | MLeffort.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimation.R
\name{MLeffort}
\alias{MLeffort}
\title{Mean length with effort mortality estimator}
\usage{
MLeffort(MLZ_data, start, n_age, estimate.M = TRUE, log.par = FALSE,
eff_init = 0, n_season = 1L, obs_season = 1L, timing = 0,
figure = TRUE)
}
\arguments{
\item{MLZ_data}{An object of class \code{\linkS4class{MLZ_data}} containing mean lengths and
life history data of stock.}
\item{start}{A list of starting values. Names of start list must contain \code{q} and \code{M}.}
\item{n_age}{The number of ages above age tc in the model.}
\item{estimate.M}{If \code{TRUE}, natural mortality (M) will be estimated. Otherwise, the value of M
is obtained from slot \code{MLZ_data@M}.}
\item{log.par}{Whether parameters are estimated in logspace (\code{TRUE}) or untransformed space (\code{FALSE}).}
\item{eff_init}{The assumed equilibrium effort prior to the first year of the model (0 = virgin conditions).}
\item{n_season}{The number of seasons modeled in a year.}
\item{obs_season}{The season corresponding to the observed mean lengths.}
\item{timing}{The fraction of time (i.e., between 0 - 1) within \code{obs_season} that mean lengths are observed.}
\item{figure}{If \code{TRUE}, a call to \code{plot} of observed and predicted mean lengths will be produced.}
}
\value{
An object of class \code{\linkS4class{MLZ_model}}.
}
\description{
Estimator of fishing and natural mortality from a time series of mean length and effort data.
}
\examples{
\dontrun{
data(Nephrops)
Nephrops <- calc_ML(Nephrops, sample.size = FALSE)
res <- MLeffort(Nephrops, start = list(q = 0.1, M = 0.2),
n_age = 24, eff_init = Nephrops@Effort[1])
}
}
\references{
Then, A.Y, Hoenig, J.M, and Huynh, Q.C. In revision. Estimating fishing and natural
mortality rates, and catchability coefficient, from a series of observations on mean length and
fishing effort. ICES Journal of Marine Science.
}
|
bf69b60d8a100f1f72c84c22fbc8764d071738eb | 02db52e1ab4453e85f03c4d7dd19274626033dbd | /man/new_report.Rd | 2e87ed9ffdd31456729f53e29ffc1d006e8ac764 | [] | no_license | riverlee/reports | def177c335b880bc0345de4a6d4a2984ea07f3fa | 26ea8b52f2a5d4f92c70fa9101eac648aa432038 | refs/heads/master | 2021-01-15T21:03:15.405664 | 2013-08-30T18:07:03 | 2013-08-30T18:07:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,888 | rd | new_report.Rd | \name{new_report}
\alias{new_report}
\title{Report Template}
\usage{
new_report(report = "report",
template = getOption("temp.reports"),
bib.loc = getOption("bib.loc"),
name = getOption("name.reports"),
github.user = getOption("github.user"),
sources = getOption("sources.reports"), path = getwd(),
AN.xlsx = TRUE,
slidify = getOption("slidify.template"), open = FALSE,
...)
}
\arguments{
\item{report}{A character vector of length two or one:
(1) the main directory name and (2) sub directory names
(i.e., all the file contents will be imprinted with this
name). If the length of \code{report} is one this name
will be used as the main directory name and all sub
directories and files.}
\item{template}{A character string of the internal
reports template or an external path to a template in the
reports package style. This argument allows the user to
change the contents of the report directory that is
generated. See \code{templates} for more.}
\item{bib.loc}{Optional path to a .bib resource.}
\item{path}{The path to where the project should be
created. Default is the current working directory.}
\item{name}{A character string of the user's name to be
used on the report.}
\item{github.user}{GitHub user name (character string).}
\item{sources}{A vector of path(s) to other scripts to be
sourced in the report project upon startup (adds this
location to the report project's \code{.Rprofile}).}
\item{AN.xlsx}{logical. If \code{TRUE} the article notes
(AN) will be in .xlsx format. If \code{FALSE} the
document will be a .csv file.}
\item{slidify}{The template to be used in the
PRESENTATION .Rmd. This can be one of the types from
\code{slidify_templates} or a path to an .Rmd file. This
argument will be overrode if a custom reports template is
supplied with an .Rmd file in the inst directory named
slidify.Rmd (\code{/inst/slidify.Rmd}).}
\item{open}{logical. If \code{TRUE} the project will be
opened in RStudio.}
\item{\ldots}{Other arguments passed to
\code{\link[slidify]{author}}.}
}
\value{
Creates a report template.
}
\description{
Generate a report/paper template to increase efficiency.
}
\section{Suggestion}{
The user may want to set \code{\link[base]{options}} for
\code{bib.loc}, \code{github.user}, \code{name.reports}
\code{sources.reports},\code{slidify.template} and
\code{reveraljs.loc} in the user's primary
\code{.Rprofile}: \enumerate{ \item{\bold{bib.loc} - The
path to the users primary bibliography}
\item{\bold{name.reports} - The name to use on reports}
\item{\bold{temp.reports} - The primary template to use
to generate reports (see \code{template})}
\item{\bold{github.user} - GitHub user name}
\item{\bold{speed.temp} - A speed dial like interface
that allows the template argument to take a numeric
arguement. Setting this option takes the form of:\cr
\code{options(speed.temp=list(`1`="wordpress_rmd",
`2`="basic_rmd"))}} \item{\bold{sources.reports} -
Path(s) to additional files/scripts that should be
included to be sourced in the project startup}
\item{\bold{slidify.template} - Path to, or defualt, .Rmd
file tempalte for use in as the .Rmd used in the slidify
presentations (see \code{slidify_templates} for possible
non-path arguments)} }
}
\section{Additional Guide}{
Introductory video
\url{http://www.youtube.com/watch?v=ArHQjQyIS70}
}
\examples{
## new_report()
}
\references{
\href{https://github.com/ramnathv/slidifyExamples/tree/gh-pages/examples}{slidify
examples}
}
\seealso{
\code{\link[reports]{doc_temp}},
\code{\link[reports]{presentation}},
\code{\link[reports]{templates}},
\code{\link[reports]{slidify_templates}},
\code{\link[slidify]{author}}
\href{https://github.com/hakimel/reveal.js/}{Installation
section of reveal.js GitHub}
}
|
be3df5cb9f85cc9a89c6a18eda4f29c7531647bd | bf46cf1cee1c44300bccb82c19d4e0599c60583f | /conditions.R | 92fc6fa4106b613720810b3d9bbdeeecb0839c0a | [] | no_license | juhnowski/r_lesson | e13183a8f72fce14db0e139eb22b72890ed00983 | 5f59b07a9dfcb0d553d8cb950e055d8b2951a330 | refs/heads/master | 2020-04-21T23:17:35.585155 | 2019-02-11T00:40:56 | 2019-02-11T00:40:56 | 169,942,137 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 329 | r | conditions.R | var1 = 5
var2 = 35
if((var1+var2)>100){
print(">100")
} else if ((var1+var2)>75){
print(">75")
} else if ((var1+var2)>50){
print(">50")
} else {
print("less")
}
switch(1,
"1" = print("one"),
"2" = print("two")
)
switch("%",
"1" = print("one"),
"2" = print("two"),
print("default")
) |
b94ba171e2279c6fc79afaf351e30a59fcddb4c4 | 8d1180a293cf536c4e4aa8ea5bd765ebebdd7f48 | /tests/testthat/empdist.R | 0c455b73c7689b587eab66839ee0cc267aea2589 | [] | no_license | TaoYang-dev/mTDR | 6ce029223029372fb39e08fb760f942d1f057a3d | 5edad2fad3c31724c63ff011c0c9513ced3651d6 | refs/heads/master | 2021-09-06T05:08:24.761722 | 2018-02-02T16:21:56 | 2018-02-02T16:21:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 326 | r | empdist.R | library(mTDR)
context("empdist")
test_that("empdist() returns cumulative density.", {
data("chipseq")
u <- empdist(chipseq$R1, chipseq$R2)
expect_is(u, "matrix")
expect_equal(length(pd), nrow(chipseq))
expect_equal(nrow(u), nrow(chipseq))
expect_equal(ncol(u), 2)
expect_true(all(u>=0&u<=1))
})
|
d03618cd843eae337131bdf597dcdf18eeefbd2a | 463b2d717b0548bd969b236014e843372db5a795 | /BusulfanPackages.R | a422b74c595ed77964366a1f7278359f0ab345a3 | [] | no_license | Zylatis/BusulfanOriginal | dafc4a27f33c428a20db82c7c9650cffbf3db198 | a10c952e316f085d49d056d5e9c938d7294cd527 | refs/heads/master | 2016-08-12T08:52:55.923169 | 2016-04-04T16:38:35 | 2016-04-04T16:38:35 | 55,422,896 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 201 | r | BusulfanPackages.R | require(GenSA) #library for simualted annealing
require(foreach)
require(xlsx)#for exporting
require(iterators)
require(parallel)
require(doParallel)
require(ggplot2)
require(gridExtra)
require(gdata)
|
8d38325b2bc42e8a506538b990c26faa9528c02e | 30b4837c2e6954c0e589f8cbf48aacdd24204361 | /config.R | e508d51d63f4f94fc47d9c78d02b655ed3355ae6 | [] | no_license | svsgvarma/RGDB | 4cbe6ae2d7e42610a4061efc802436e3e6011512 | ceed1d88bb35b9862c878c5a74506d25eb477236 | refs/heads/main | 2023-01-01T03:37:55.592375 | 2020-10-25T15:31:15 | 2020-10-25T15:31:15 | 306,975,624 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 283 | r | config.R | print("Run config.R")
#install relied packages
source("global.R")
#init input and library
rm(list=ls())
library(shiny)
library(dplyr)
library(data.table)
library(DT)
library(readr)
#change if necessary
dataDir="./InData/"
dataSuffix=".tsv"
#global variables if any
#maxRect=2500 |
ab87cac000a38d2533deb11959699f240712da5d | ec2e6a32bdf14f9d0cf19b34429e41eeeecf6328 | /Spatial Interaction Models /Spatial Interaction Modelling for Dummies.R | 979a3b6cbdd69f615bcc45e4d493a245cb9a5134 | [] | no_license | YX577/Urban-Simulation | 199efa70985dea1f7ef7b9987dbe4719664d9811 | 8ffcf2d7deb035b880d41d00ed1c7e2f442cf554 | refs/heads/master | 2022-11-20T01:36:37.629944 | 2020-07-07T21:45:15 | 2020-07-07T21:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,461 | r | Spatial Interaction Modelling for Dummies.R | # http://rpubs.com/adam_dennett/257231
library(sp)
library(MASS)
library(reshape2)
library(geojsonio)
library(rgdal)
library(downloader)
library(maptools)
library(dplyr)
library(broom)
library(stplanr)
library(ggplot2)
library(leaflet)
#Fetch a GeoJson of some district-level boundaries from the ONS Geoportal. First add the URL to an object
EW <- geojson_read("http://geoportal.statistics.gov.uk/datasets/8edafbe3276d4b56aec60991cbddda50_2.geojson",
what = "sp")
#have a quick look at the top of the data file
head(EW@data)
#pull out london using grep and the regex wildcard for'start of the string' (^) to to look for the bit of
#the district code that relates to London (E09) from the 'lad15cd' column in the data slot of our spatial polygons dataframe
London <- EW[grep("^E09",EW@data$lad15cd),]
#plot it
plot(London)
#and have a look under the bonnet
summary(London)
#CALCULATING A DISTANCE MATRIX
#boundaries we have are not in British National Grid - the bit that says proj4string tells me
#that we are in WGS84 or using latitude and longitude coordinates. We need to change this to
#British National Grid so our distances are in metres and not decimal degrees, then do everything
#we need to do to generate a distance matrix.
#first transfrom to BNG - this will be important for calculating distances using spTransform
BNG = "+init=epsg:27700"
LondonBNG <- spTransform(London, BNG)
#now, order by borough code - *this step will be imporant later on*
LondonBNG <- LondonBNG[order(LondonBNG$lad15cd),]
#now use spDists to generate a big distance matrix of all distances between boroughs in London
dist <- spDists(LondonBNG)
#melt this matrix into a list of origin/destination pairs using melt. Melt in in the reshape2 package. Reshape2, dplyr and ggplot, together, are some of the best packages in R, so if you are not familiar with them, get googling and your life will be much better!
distPair <- melt(dist)
# FLOW DATA
#read in your London Commuting Data
cdata <- read.csv("https://www.dropbox.com/s/7c1fi1txbvhdqby/LondonCommuting2001.csv?raw=1")
#read in a lookup table for translating between old borough codes and new borough codes
CodeLookup <- read.csv("https://www.dropbox.com/s/h8mpvnepdkwa1ac/CodeLookup.csv?raw=1")
#read in some population and income data
popincome <- read.csv("https://www.dropbox.com/s/84z22a4wo3x2p86/popincome.csv?raw=1")
#now merge these supplimentary data into your flow data dataframe
cdata$OrigCodeNew <- CodeLookup$NewCode[match(cdata$OrigCode, CodeLookup$OldCode)]
cdata$DestCodeNew <- CodeLookup$NewCode[match(cdata$DestCode, CodeLookup$OldCode)]
cdata$vi1_origpop <- popincome$pop[match(cdata$OrigCodeNew, popincome$code)]
cdata$vi2_origsal <- popincome$med_income[match(cdata$OrigCodeNew, popincome$code)]
cdata$wj1_destpop <- popincome$pop[match(cdata$DestCodeNew, popincome$code)]
cdata$wj2_destsal <- popincome$med_income[match(cdata$DestCodeNew, popincome$code)]
#Data needs to be ordered by borough code, if it's not, we will run into problems when
#we try to merge our distance data back in later, so to make sure, we can arrange by orign
#and then destination using dplyr's 'arrange' function
cdata <- arrange(cdata, OrigCodeNew, DestCodeNew)
#First create a new total column which excludes intra-borough flow totals (well sets them to a very very small number for reasons you will see later...)
cdata$TotalNoIntra <- ifelse(cdata$OrigCode == cdata$DestCode,0,cdata$Total)
cdata$offset <- ifelse(cdata$OrigCode == cdata$DestCode,0.0000000001,1)
# now add the distance column into the dataframe
cdata$dist <- distPair$value
head(cdata)
# to make this demonstration even easier, let’s just select a small subset of these
# flows (we can come back to the whole dataset later on
#We'll just use the first 7 boroughs by code, so first, create a vector of these 7 to match with our data
toMatch<-c("00AA", "00AB", "00AC", "00AD", "00AE", "00AF", "00AG")
#subset the data by the 7 sample boroughs
#first the origins
cdatasub <- cdata[grep(paste(toMatch,collapse = "|"), cdata$OrigCode),]
#then the destinations
cdatasub <- cdatasub[grep(paste(toMatch,collapse = "|"), cdata$DestCode),]
#now chop out the intra-borough flows
cdatasub <- cdatasub[cdatasub$OrigCode!=cdatasub$DestCode,]
#now unfortunately if you look at the file, for some reason the grep process has left a lot of empty data cells in the dataframe, so let's just chop out everything after the 7*7 - 7 (42) pairs we are interested in...
cdatasub <- cdatasub[1:42,]
#now re-order so that OrigCodeNew, DestCodeNew and TotalNoIntra are the first three columns *note that you have to be explicit about the select function in the dplyr package as MASS also has a 'select' function and R will try and use this by default. We can be explict by using the syntax package::function
cdatasub <- dplyr::select(cdatasub, OrigCodeNew, DestCodeNew, Total, everything())
# re-order so that 'lad15cd' is the first column in LondonBNG
# HUSSEIN
library(sf)
LondonBNG_sf <- st_as_sf(LondonBNG)
# re-order so that 'lad15cd' is the first column in LondonBNG - OTHERWISE od2line WON'T WORK
LondonBNG_sf <- dplyr::select(LondonBNG_sf, lad15cd, everything())
# convert back to sp
LondonBNG <- as(LondonBNG_sf, 'Spatial')
# End HUSSEIN
#use the od2line function from Robin Lovelace's excellent stplanr package
travel_network <- od2line(flow = cdatasub, zones = LondonBNG)
#and set the line widths to some sensible value according to the flow
w <- cdatasub$Total / max(cdatasub$Total) * 10
#now plot it...
plot(travel_network, lwd = w)
plot(LondonBNG, add=T)
# leaflet map
#transform to wgs84
travel_networkwgs <- spTransform(travel_network, "+init=epsg:4326")
#plot in leaflet
leaflet() %>% addTiles() %>% addPolylines(data = travel_networkwgs)
#now we can create pivot table to turn paired list into matrix (and compute the margins as well)
cdatasubmat <- dcast(cdatasub, Orig ~ Dest, sum, value.var = "Total", margins=c("Orig", "Dest"))
cdatasubmat
# MODELLING
#First plot the commuter flows against distance and then fit a model line with a ^-2 parameter
# -2 is the parameter used for Newton's Gravity Model. We are just using it as a starting point
plot1 <- qplot(cdata$dist, cdata$Total)
#and now the model fit...
plot1 + stat_function(fun=function(x)x^-2, geom="line", aes(colour="^-2"))
#now, what about the origin and destination data...
plot2 <- qplot(cdata$vi1_origpop, cdata$Total)
plot2 + stat_function(fun=function(x)x^1, geom="line", aes(colour="^1"))
plot3 <- qplot(cdata$wj2_destsal, cdata$Total)
plot3 + stat_function(fun=function(x)x^1, geom="line", aes(colour="^1"))
#OK, so it looks like we’re not far off (well, destination salary doesn’t look too promising as a predictor,
#but we’ll see how we get on…), so let’s see what flow estimates with these starting parameters look like.
#set up some variables to hold our parameter values in:
mu <- 1
alpha <- 1
beta <- -2
k <- 1
T2 <- sum(cdatasub$Total)
#Now let’s create some flow estimates using Equation 2 above… Begin by applying the parameters to the variables:
vi1_mu <- cdatasub$vi1_origpop^mu
wj2_alpha <- cdatasub$wj2_destsal^alpha
dist_beta <- cdatasub$dist^beta
T1 <- vi1_mu*wj2_alpha*dist_beta
k <- T2/sum(T1) #balancing parameter, ensures total flow matches reality
#run the model and store all of the new flow estimates in a new column in the dataframe
cdatasub$unconstrainedEst1 <- round(k*vi1_mu*wj2_alpha*dist_beta,0)
#check that the sum of these estimates makes sense
sum(cdatasub$unconstrainedEst1)
#turn it into a little matrix and have a look at your handy work
cdatasubmat1 <- dcast(cdatasub, Orig ~ Dest, sum, value.var = "unconstrainedEst1", margins=c("Orig", "Dest"))
cdatasubmat1
# How do the flow estimates compare with the actual flows? Eyeballing works, but we need something more mathy
# TESTING THE “GOODNESS-OF-FIT”.
# METHOD 1: R-Squared
CalcRSquared <- function(observed,estimated){
r <- cor(observed,estimated)
R2 <- r^2
R2
}
CalcRSquared(cdatasub$Total,cdatasub$unconstrainedEst1)
# our model accounts for about 51% of the variation of flows in the system. Not bad, but not brilliant either.
# METHOD 2: RMSE
CalcRMSE <- function(observed,estimated){
res <- (observed - estimated)^2
RMSE <- round(sqrt(mean(res)),3)
RMSE
}
CalcRMSE(cdatasub$Total,cdatasub$unconstrainedEst1)
# The closer to 0 the RMSE value, the better the model. (Now it is 2503...let's try to improve it)
# POISSON REGRESSION - To Calibrate
# Flow distribution
qplot(cdata$Total) + geom_histogram() # If it looks like Poisson, and it quacks like Poisson...
qplot(log(dist), log(Total), data=cdata) + geom_smooth(method = lm)
#run the unconstrained model
uncosim <- glm(Total ~ log(vi1_origpop)+log(wj2_destsal)+log(dist), na.action = na.exclude, family = poisson(link = "log"), data = cdatasub)
summary(uncosim)
# Calibrated values
# k (intercept) = -15.631802
# μ = 1.747997
# α = 1.642331
# β = -1.411889
# Calculate Flow Estimates Using Calibrated Coefficients
#first asign the parameter values from the model to the appropriate variables
k <- uncosim$coefficients[1]
mu <- uncosim$coefficients[2]
alpha <- uncosim$coefficients[3]
beta <- -uncosim$coefficients[4]
#now plug everything back into the Equation 6 model... (be careful with the positive and negative signing of the parameters as the beta parameter may not have been saved as negative so will need to force negative)
cdatasub$unconstrainedEst2 <- exp(k+(mu*log(cdatasub$vi1_origpop))+(alpha*log(cdatasub$wj2_destsal))-(beta*log(cdatasub$dist)))
#which is exactly the same as this...
cdatasub$unconstrainedEst2 <- (exp(k)*exp(mu*log(cdatasub$vi1_origpop))*exp(alpha*log(cdatasub$wj2_destsal))*exp(-beta*log(cdatasub$dist)))
#and of course, being R, there is an even easier way of doing this...
cdatasub$fitted <- fitted(uncosim)
#run the model and store all of the new flow estimates in a new column in the dataframe
cdatasub$unconstrainedEst2 <- round(cdatasub$unconstrainedEst2,0)
sum(cdatasub$unconstrainedEst2)
#turn it into a little matrix and have a look at your handy work
cdatasubmat2 <- dcast(cdatasub, Orig ~ Dest, sum, value.var = "unconstrainedEst2", margins=c("Orig", "Dest"))
cdatasubmat2
# And the $1,000,000 question - has calibrating the parameters improved the model…?
CalcRSquared(cdatasub$Total,cdatasub$unconstrainedEst2)
CalcRMSE(cdatasub$Total,cdatasub$unconstrainedEst2)
|
ae544cd1dc03b7d4ca537a9ef047db2ad83a8708 | b695598eca6723476811d8898be2a50bc6d9250c | /NLPSENTfiles/R_files/AMTD & AmeriTrade Term.R | d719a24a90db2a8c247ba5791cc9701c36960e46 | [] | no_license | ATCUWgithub/GoogleTrends | 0f127363a4159f14d0deb2349cdf9352ebb4d4d6 | 878955d9af0bf7c74f415e4cfc6778115fb23278 | refs/heads/master | 2020-12-27T10:39:37.409911 | 2020-03-05T04:32:48 | 2020-03-05T04:32:48 | 237,873,265 | 1 | 1 | null | 2020-02-21T00:22:22 | 2020-02-03T02:53:48 | null | UTF-8 | R | false | false | 5,815 | r | AMTD & AmeriTrade Term.R | library('quantmod')
library('corrplot')
getSymbols(c("AMTD"), from="2015-01-19", to="2020-01-17", src="yahoo", periodicity = 'weekly')
data = read.csv('AMTD Trend.csv', header = T)
AMTDT = data[,c("Week", "TDAmeritrade")]
##Earnings
AMTD[53]
AMTDT[223,]
x = c(2, 15, 28, 41, 54, 67, 80, 93, 106, 119, 132, 145, 158, 171, 184, 197, 210, 223, 236, 249, 261)
AMTDC = AMTD$AMTD.Close[x] ##Quarterly Dates
AMTDO = AMTD$AMTD.Open[x]
AMTDD = (AMTDC - AMTDO)/(AMTDO)
AMTDD #Move one week after earnings result
AMTDTE = c() #Trend Sums
AMTDTE[1] = sum(AMTDT$TDAmeritrade[1:x[1]])
AMTDTE[2] = sum(AMTDT$TDAmeritrade[x[1]:x[2]])
AMTDTE[3] = sum(AMTDT$TDAmeritrade[x[2]:x[3]])
AMTDTE[4] = sum(AMTDT$TDAmeritrade[x[3]:x[4]])
AMTDTE[5] = sum(AMTDT$TDAmeritrade[x[4]:x[5]])
AMTDTE[6] = sum(AMTDT$TDAmeritrade[x[5]:x[6]])
AMTDTE[7] = sum(AMTDT$TDAmeritrade[x[6]:x[7]])
AMTDTE[8] = sum(AMTDT$TDAmeritrade[x[7]:x[8]])
AMTDTE[9] = sum(AMTDT$TDAmeritrade[x[8]:x[9]])
AMTDTE[10] = sum(AMTDT$TDAmeritrade[x[9]:x[10]])
AMTDTE[11] = sum(AMTDT$TDAmeritrade[x[10]:x[11]])
AMTDTE[12] = sum(AMTDT$TDAmeritrade[x[11]:x[12]])
AMTDTE[13] = sum(AMTDT$TDAmeritrade[x[12]:x[13]])
AMTDTE[14] = sum(AMTDT$TDAmeritrade[x[13]:x[14]])
AMTDTE[15] = sum(AMTDT$TDAmeritrade[x[14]:x[15]])
AMTDTE[16] = sum(AMTDT$TDAmeritrade[x[15]:x[16]])
AMTDTE[17] = sum(AMTDT$TDAmeritrade[x[16]:x[17]])
AMTDTE[18] = sum(AMTDT$TDAmeritrade[x[17]:x[18]])
AMTDTE[19] = sum(AMTDT$TDAmeritrade[x[18]:x[19]])
AMTDTE[20] = sum(AMTDT$TDAmeritrade[x[19]:x[20]])
AMTDTE[21] = sum(AMTDT$TDAmeritrade[x[20]:x[21]])
##Breaking down by quarter Q1
y = c(1, 5, 9, 13, 17, 21)
AMTDQ1D = AMTDD[y]
AMTDTQ1 = AMTDTE[y]
AMTDTQ1
AMTDQ1D2 = AMTDQ1D[2:5]
AMTDTQ10.log = diff(as.vector(log(AMTDTQ1)))
length(AMTDTQ10.log)
AMTDTQ1.log = AMTDTQ10.log[1:4]
AMTDQ1D2[1] ##price change on 1st event,
AMTDTQ1.log[1] ##Trends change for second event - from 1st event
fitQ1L <- lm(AMTDQ1D2 ~ AMTDTQ1.log)
summary(fitQ1L)
ratesALQ1 <- data.frame(AMTDQ1D2, AMTDTQ1.log)
corrplot.mixed(cor(ratesALQ1), upper = "ellipse")
fitQ1L
Q1FuncL <- function(x){
y = -0.04138*x - 0.02034
y
}
AMTDTE[21]
AMTDTE[17]
AMTDTEV = c(AMTDTE[21], AMTDTE[17])
AMTDTENOW = diff(as.vector(log(AMTDTEV)))
AMTDTENOW
val = AMTDTENOW
val
Q1FuncL(val)
AMTDQ1D2
AMTDTQ10.log
##Breaking down by quarter Q2
y = c(4, 8, 12, 16, 20)
AMTDQ1D = AMTDD[y]
AMTDTQ2 = AMTDTE[y]
AMTDQ2D2 = AMTDQ1D[2:5]
AMTDTQ2.log = diff(as.vector(log(AMTDTQ1)))
fitQ2L <- lm(AMTDQ2D2 ~ AMTDTQ2.log)
summary(fitQ2L)
ratesALQ2 <- data.frame(AMTDQ2D2, AMTDTQ2.log)
corrplot.mixed(cor(ratesALQ2), upper = "ellipse")
fitQ2L
Q2FuncL <- function(x){
y = -0.003427*x + 0.049751
y
}
AMTDTE[20]
AMTDTE[16]
AMTDTEV = c(AMTDTE[20], AMTDTE[16])
AMTDTENOW = diff(as.vector(log(AMTDTEV)))
AMTDTENOW
Q2FuncL(AMTDTENOW)
AMTDQ2D2[4]
##2014 - 2019 Q1 Analysis Data Monthly
##0 = earnings 1/21/2014
## sum = 1, 2, 3, 4 months
##1 = earnings 4/23/2014
## sum = 4,5,6,7 months
getSymbols(c("AMTD"), from="2014-01-19", to="2019-01-18", src="yahoo", periodicity = 'weekly')
data2 = read.csv('AMTD Trends 2014-2019.csv', header = T)
AMTDT2 = data2[,c("Week", "TDAmeritrade")]
length(AMTDT2$Week)
AMTDT2$Week[41]
x = c(1, 14, 27, 40, 53, 66, 79, 92, 105, 118, 131, 144, 157, 170, 183, 196, 209, 222, 235, 248, 260)
## 13
length(x)
xE = c(1, 14, 27, 41, 53, 66, 79, 93, 105, 118, 131, 145, 157, 170, 183, 197, 210, 223, 236, 249, 261)
trendSum = c()
trendSum[1] = sum(AMTDT2$TDAmeritrade[x[1]:x[2]])
trendSum[2] = sum(AMTDT2$TDAmeritrade[x[2]:x[3]])
trendSum[3] = sum(AMTDT2$TDAmeritrade[x[3]:x[4]])
trendSum[4] = sum(AMTDT2$TDAmeritrade[x[4]:x[5]])
trendSum[5] = sum(AMTDT2$TDAmeritrade[x[5]:x[6]])
trendSum[6] = sum(AMTDT2$TDAmeritrade[x[6]:x[7]])
trendSum[7] = sum(AMTDT2$TDAmeritrade[x[7]:x[8]])
trendSum[8] = sum(AMTDT2$TDAmeritrade[x[8]:x[9]])
trendSum[9] = sum(AMTDT2$TDAmeritrade[x[9]:x[10]])
trendSum[10] = sum(AMTDT2$TDAmeritrade[x[10]:x[11]])
trendSum[11] = sum(AMTDT2$TDAmeritrade[x[11]:x[12]])
trendSum[12] = sum(AMTDT2$TDAmeritrade[x[12]:x[13]])
trendSum[13] = sum(AMTDT2$TDAmeritrade[x[13]:x[14]])
trendSum[14] = sum(AMTDT2$TDAmeritrade[x[14]:x[15]])
trendSum[15] = sum(AMTDT2$TDAmeritrade[x[15]:x[16]])
trendSum[16] = sum(AMTDT2$TDAmeritrade[x[16]:x[17]])
trendSum[17] = sum(AMTDT2$TDAmeritrade[x[17]:x[18]])
trendSum[18] = sum(AMTDT2$TDAmeritrade[x[18]:x[19]])
trendSum[19] = sum(AMTDT2$TDAmeritrade[x[19]:x[20]])
trendSum[20] = sum(AMTDT2$TDAmeritrade[x[20]:x[21]])
AMTDC2 = AMTD$AMTD.Close[xE] ##Quarterly Dates
AMTDO2 = AMTD$AMTD.Open[xE]
AMTDD2 = (AMTDC2 - AMTDO2)/(AMTDO2)
##Breaking down by quarter Q1
y = c(4,8,12,16,20)
yE = c(5,9,13,17,21)
AMTDQ1D2 = AMTDD2[yE]
AMTDTrendQ1 = trendSum[y]
AMTDTrendQ1
AMTDQ1D2 = AMTDQ1D2[2:4]
AMTDQ1D2
AMTDTQ10.log = diff(as.vector(log(AMTDTrendQ1)))
AMTDTQ10.log
AMTDTQ10.log = AMTDTQ10.log[1:3]
fitQ1LY <- lm(AMTDQ1D2$AMTD.Close ~ AMTDTQ10.log)
summary(fitQ1LY)
ratesALQ2 <- data.frame(AMTDQ1D2, AMTDTQ10.log)
corrplot.mixed(cor(ratesALQ2), upper = "ellipse")
##lfit=loess(AMTDQ1D2$AMTD.Close~AMTDTQ10.log + AMTDTQ10.log, control = loess.control(surface = "direct"))
##Yh = predict(lfit, 0.08712293)
fitQ1LY
Q1FuncL <- function(x){
y = 0.02612*x -0.01037
y
}
Q1FuncL(0.08712293)
Q1FuncL(0.297)
## Breaking Down by Q2
y = c(1,5,9,13,17)
yE = c(2,6,10,14,18)
AMTDQ2D2 = AMTDD2[yE]
AMTDTrendQ2 = trendSum[y]
AMTDTrendQ2
AMTDQ2D2 = AMTDQ2D[2:4]
AMTDQ2D2
AMTDTQ10.log = diff(as.vector(log(AMTDTrendQ1)))
AMTDTQ10.log
AMTDTQ10.log = AMTDTQ10.log[1:3]
fitQ2L <- lm(AMTDQ1D2$AMTD.Close ~ AMTDTQ10.log)
summary(fitQ2L)
ratesALQ2 <- data.frame(AMTDQ1D2, AMTDTQ10.log)
corrplot.mixed(cor(ratesALQ2), upper = "ellipse")
fitQ1LY
Q1FuncL <- function(x){
y = 0.0678*x -0.01183
y
}
Q1FuncL(0.08712293)
Q1FuncPoly <- function(x){
y = -0.1442*x^2 + 0.0382*x - 0.0037
y
}
Q1FuncPoly(0.08712293)
|
499b9a1cb2c35261fc3b72cc3db8436db86af371 | 6daeb33a35fd354502e1c23e977355295eef6f6c | /man/approx_dt.Rd | fe702eefbc104c8b8127b1b83e277e9fdaf3e998 | [] | no_license | pik-piam/rmndt | 2642f3b2703b148f37bd942b3b96ae7a8a0bbbbc | f7b0704d78f2058c690885726247c703d9677277 | refs/heads/master | 2023-07-10T13:14:27.388585 | 2023-07-10T09:32:59 | 2023-07-10T09:32:59 | 243,305,595 | 0 | 3 | null | 2023-07-10T09:33:00 | 2020-02-26T16:07:29 | R | UTF-8 | R | false | true | 2,197 | rd | approx_dt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/approx.R
\name{approx_dt}
\alias{approx_dt}
\title{Approximate missing values in a data.table.}
\usage{
approx_dt(
dt,
xdata,
xcol,
ycol,
idxcols = NULL,
keepna = FALSE,
extrapolate = FALSE
)
}
\arguments{
\item{dt}{a data.table.}
\item{xdata}{the range to interpolate to. This is the range the result will have along the dimension `xcol`.}
\item{xcol}{name of the column for interpolation.}
\item{ycol}{name of the column that contains the value to be interpolated.}
\item{idxcols}{columns that identify a row (besides xcol), i.e., the remaining index dimensions.}
\item{keepna}{keep NA values for rows that can not be interpolated (since they are outside of [min(xcol), max(xcol)]), default is FALSE.}
\item{extrapolate}{use the closest values to fill `ycol` outside of the interpolation domain, default is FALSE. This will also work if there is only one value along `ycol`, i.e., no interpolation is taking place.}
}
\value{
a data.table with the range given by `xdata` along `xcol`. Columns not given in `idxcols` will be kept but NAs will appear on extrapolated and interpolated rows.
}
\description{
Similar to, but not quite like, `stats::approx`.
Does only support constant extrapolation and linear interpolation.
The resulting `data.table` only contains the range provided by `xdata` along `xcol`.
Without extrapolation, `xcol` in the resulting `data.table` may not
cover the range given by `xdata`.
}
\examples{
dt <- as.data.table(ChickWeight)
## delete all values but 1
dt[Chick == 1 & Time > 0, weight := NA]
## delete all values but 2
dt[Chick == 2 & Time > 2, weight := NA]
## extrapolation from 1 value
approx_dt(dt, 0:21, "Time", "weight", idxcols=c("Chick", "Diet"), extrapolate = TRUE)[Chick == 1]
## extrapolation and interpolation
approx_dt(dt, 0:21, "Time", "weight", idxcols=c("Chick", "Diet"), extrapolate = TRUE)[Chick == 2]
## column not in idxcols
approx_dt(dt, 0:21, "Time", "weight", idxcols="Chick", extrapolate = TRUE)[Chick == 2]
dt <- as.data.table(ChickWeight)
## interpolation only
approx_dt(dt, 0:21, "Time", "weight", idxcols=c("Chick", "Diet"))[Chick == 2]
}
|
69a23072ea6b9643f039c8d6ef6028acd46a9e01 | 2defda9def564e710c2af56daf8f84819f03bcfe | /R/functions.R | fdf50c1e1d77731182cb2a37eaff686fa89c1f8c | [
"MIT"
] | permissive | fdrennan/dockerflow | 6337febd256242c6717d99a77f6c67bbee06627d | 85b0eb38de48b1e323ac95cadf9d8af88f903a97 | refs/heads/master | 2022-12-22T02:26:36.432464 | 2020-10-01T03:23:05 | 2020-10-01T03:23:05 | 300,122,502 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,803 | r | functions.R | #' drf_base
#' @export drf_base
drf_base = function(name = 'Dockerfile', version='r-base:4.0.2', workdir = '/') {
base_version <- glue('{version}')
return(list(base = base_version, name = name, workdir = workdir))
}
#' drf_apt_get
#' @export drf_apt_get
drf_apt_get = function(apt_get='everything', packages = NULL) {
if (apt_get == 'everything' & is.null(packages)) {
packages <- c(
"sudo", "gdebi-core", "pandoc", "pandoc-citeproc",
"libcurl4-gnutls-dev", "libcairo2-dev", "libxt-dev",
"xtail", "wget", "libssl-dev", "libxml2-dev",
"python3-venv", "libpq-dev", "libsodium-dev",
"libudunits2-dev", "libgdal-dev", "systemctl",
"git", "libssh2-1", "libssh2-1-dev",
"unzip", "curl"
)
}
return(list(apt_get = packages))
}
#' drf_packages
#' @export drf_packages
drf_packages <- function(renv_location = NULL, packages = 'devtools') {
if (!is.null(renv_location)) {
message('In renv_location')
}
list(packages = packages, renv = renv_location)
}
#' drf_copy
#' @export drf_copy
drf_copy <- function(localpath_vector = NULL) {
return(localpath_vector)
}
#' build_container
#' @export build_container
build_container <- function(dockerflow_path = '.dockerflow.DockerPlumber.json', build = FALSE) {
config_file <- read_json(dockerflow_path)
toJSON(config_file, pretty = TRUE)
meta_data <- map(config_file[[1]], ~ unlist(.))
apt_get <- map(config_file[[2]], ~ unlist(.))
r_packages <- map(config_file[[3]], ~ unlist(.))
copy_in <- map(config_file[[4]], ~ unlist(.))
title <- glue('# {meta_data$name}')
base <- glue('FROM {meta_data$base}')
workdir <- glue('WORKDIR {meta_data$workdir}')
base_apt <- 'RUN apt-get update --allow-releaseinfo-change -qq && apt-get install -y '
apt_get_query <- paste(c(base_apt, apt_get$apt_get), collapse = ' \\\n\t')
# install_renv <- 'RUN R -e "install.packages(\'renv\');renv::consent(provided=TRUE);renv::init()"'
# install_renv <- 'RUN R -e "install.packages(\'renv\')"'
preferred_packages <- install_packages <- map_chr(
r_packages$packages,
function(pkg, dependencies = TRUE) {
glue('RUN R -e "install.packages(\'{pkg}\', dependencies = {dependencies})"')
}
)
copy_in <- map_chr(copy_in, function(file) {
glue('COPY ./{file} ./{file}')
})
dockername <- meta_data$name
if(file_exists(dockername))
file_delete(dockername)
walk(
list(title, base, workdir, apt_get_query,
# install_renv,
preferred_packages, copy_in),
function(file_line) {
file_line <- unlist(file_line)
file_line <- paste(file_line, '\n')
map(file_line, message)
map(file_line, ~ write_file(x = ., path = dockername, append = TRUE))
}
)
# write_file(x = title, path = dockername, append = TRUE)
# docker build -t productor_api --file ./DockerfileApi .
command_to_run <- glue('docker build -t {tolower(dockername)} --file ./{dockername} . >> .dockerfiles.txt')
message('please run tail -f .dockerfiles.txt to follow the installation')
message(command_to_run)
if (build) {
system(command_to_run)
}
}
#' build_me_docker
#' @export build_me_docker
build_me_docker <- function(name = 'DockerPlumber',
version = 'rocker/shiny:4.0.1',
packages = c('shiny'),
localpath_vector = 'app.R') {
container <- list(
drf_base(name = name, version = version ),
drf_apt_get(),
drf_packages(packages = packages),
drf_copy(localpath_vector = localpath_vector)
)
meta <- container[[1]]
json_path <- glue('.dockerflow.{meta$name}.json')
container <- prettify(toJSON(container))
write_file(x = container, path = json_path)
list(json_path = json_path, container = container)
}
|
a382c499c5c51e33db2b0db94bed83207c7dfb96 | b613548fe00caefe8aee62712535b3b70222f9d6 | /EJ6b.Convergence.R | 118153636637879bd34d3bb5bf4f3235360010e9 | [] | no_license | xyshell/MF793 | efc4ea711a894480f435081f7251377afe2cef19 | 3b6a0d82ac715bae3699fde69ca9f31adbdc1d64 | refs/heads/master | 2020-03-31T11:22:00.090080 | 2019-02-27T19:48:55 | 2019-02-27T19:48:55 | 152,173,829 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,074 | r | EJ6b.Convergence.R | #
# Illustrating the Inverse Transform method.
#
# 1) Simulate Uniforms
uu<-runif(10000)
# 2) Compute F-1(U) for all these uniforms
xx<-qnorm(uu)
hist(xx,nclass=100,prob=T)
#points(xx,dnorm(xx),col="red")
lines(xx,dnorm(xx),col="red",lwd=2)
#
# Some Commands to illustrate convergence of Estimators
# CLT with various Student-t Data
# Convergence of other estimators (stdev, kurtosis)
# to a normal distribution?
par(mfrow=c(2,1))
###################################################
# Simulating the sample mean
# Is is asymptotically normal as per the CLT ?
#
nobs<-2000; nsimul<-5000
returns<-matrix(rt(nsimul*nobs,df=2),nrow=nobs)
qqnorm(returns[1,],main="The data"); qqline(returns[1,])
means<-apply(returns,2,mean)
qqnorm(means,main="The sample means"); qqline(means)
####################################################
# Simulating the sample standard deviation
# In small sample:
# 1) Is it biased ?
# 2) Is its Std.Dev. equal to the theoretical value (sig/sqrt(2T))
# 3) Is it normally distributed?
nobs<-1000; nsimul<-10000
sdtrue<-1
rets<-matrix(rnorm(nsimul*nobs,sd=sdtrue),ncol=nsimul)
dof<-30
rets<-matrix(rt(nsimul*nobs,df=dof),ncol=nsimul)
sdtrue<-sqrt(dof/(dof-2))
stds <-apply(rets,2,sd)
mean(stds); sdtrue
sd(stds); sdtrue/sqrt(2*nobs)
theory<-qnorm(c(0.025,0.975),1,sdtrue/sqrt(2*nobs))
actual<-quantile(stds,c(0.025,0.975))
round(matrix(c(theory,actual),ncol=2,byrow=T,
dimnames=list(c("theory","actual"),c("2.5%","97.5%")))
,3)
qqnorm(stds);qqline(stds)
hist(stds,nclass=100,prob=T);box()
abline(v=theory)
abline(v=actual,col="red")
# Is the asymptotic Confidence Intervals "correct"?
# Do we reject 5% of the time if we use them?
round(
sum(stds>theory[1]&stds<theory[2])/nsimul
,3)
########################################################
# Estimator of Skewness
# Is it consistent?
# Does its variance equal the asymptotic approximation?
# Is it normal?
# Do we reject 5% of the time under H0?
library(moments)
nobs<-4000; nsimul<-10000; dof<-1000
returns<-matrix(rt(nsimul*nobs,dof),ncol=nsimul)
skews <-apply(returns,2,skewness)
mean(skews)
sd(skews); sqrt(6/nobs)
hist(skews,prob=T,nclass=100)
qqnorm(skews);qqline(skews)
theo<-qnorm(c(0.025,0.975),0,sqrt(6/nobs))
actu<-quantile(skews,c(0.025,0.975))
round(matrix(c(theo,actu),ncol=2,byrow=T,
dimnames=list(c("theory","actual"),c("2.5%","97.5%"))),3)
# Should reject 5% of the time under the null
sum(skews>theo[1]&skews<theo[2])/nsimul
########################################################
# Estimator of the kurtosis
# Is it consistent? Is it normal?
#
# Do we reject 5% of the time under H0?
library(moments)
nobs<-1000; nsimul<-10000; dof<-60
#returns<-matrix(rnorm(nsimul*nobs),ncol=nsimul)
returns<-matrix(rt(nsimul*nobs,dof),ncol=nsimul)
kurts <-apply(returns,2,kurtosis)
mean(kurts)
sd(kurts); sqrt(24/nobs)
hist(kurts,prob=T,nclass=50)
qqnorm(kurts);qqline(kurts)
theo<-qnorm(0.95,3,sqrt(24/nobs))
actu<-quantile(kurts,0.95)
round(c(theo,actu),3)
sum(kurts>theo)/nsimul # Should reject 5% of the time
# under the null
|
142419aa7de14f4b2b43e091224a3f29263823d5 | 75d59c8da36f2f6a9df2ca6acbf1dda4c1cee4ae | /man/graph_data.Rd | 943c5cbfa210e347fb27b0658ac40bbfe5edef5e | [] | no_license | BowenNCSU/RGraphM | 9039cd120d51ecf687bf4191264df711dd12bd63 | 5c57eaa2aae809b49e2a1a6a466adf175a3ee0ec | refs/heads/master | 2020-04-10T07:56:28.256678 | 2018-05-15T21:40:21 | 2018-05-15T21:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 599 | rd | graph_data.Rd | \name{graph_data}
\alias{a}
\alias{b}
\docType{data}
\title{
Two randomly shuffled isomorphic graphs
}
\description{
A and B are randomly generated adjacency matrices as shown in the example of run_graph_match function.
a and b are adjacency matrices from the test_simple example in graphm library.
}
\format{
\describe{
\item{\code{a}}{an 10x10 adjacency matrix representing graph G to be matched}
\item{\code{b}}{an 10x10 adjacency matrix representing graph H to be matched}
}
}
\examples{
print (a)
print (b)
## maybe str(graph_data) ; plot(graph_data) ...
}
\keyword{datasets}
|
612fcdfc7bc103285e7c6c0079cf5ff577ded144 | d6e3f8759f52fee91d1ee7dcd813751593aa906e | /Code/PLSC503-2021-WeekTwelve.R | 3e4cc8f8a87314cb7863a123f1e93e2efbb8fe35 | [] | no_license | nsflemming/PLSC503-2021-git | cf822efac01886f9294305b84c3085bd2023de1e | 55ad1a8efd901fd040724029e3d1e3f7293b161a | refs/heads/master | 2023-02-26T09:24:51.821351 | 2021-04-28T12:21:11 | 2021-04-28T12:21:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,358 | r | PLSC503-2021-WeekTwelve.R | ##########################################
# Code for PLSC 503 - Spring 2021
#
# Regression models for ordinal- and
# nominal-level outcomes...
#
##########################################
# Packages, etc.:
require(RCurl)
require(MASS)
require(mlogit)
require(VGAM)
require(aod)
require(car)
# Options:
options(scipen = 6) # bias against scientific notation
options(digits = 3) # show fewer decimal places
# setwd():
#
# setwd("~/Dropbox (Personal)/PLSC 503/Notes")
#####################################################
# Multinomial logit, etc.
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/PLSC503-2021-git/master/Data/Election1992small.csv")
nes92<-read.csv(text=temp, header=TRUE)
rm(temp)
summary(nes92)
nes92.mlogit<-vglm(presvote~partyid, multinomial, nes92)
summary(nes92.mlogit)
Bush.nes92.mlogit<-vglm(formula=presvote~partyid,
family=multinomial(refLevel=1),data=nes92)
summary(Bush.nes92.mlogit)
Clinton.nes92.mlogit<-vglm(formula=presvote~partyid,
family=multinomial(refLevel=2),data=nes92)
summary(Clinton.nes92.mlogit)
# Conditional logit...
colnames(nes92)<-c("caseid","presvote","partyid","FT.Bush","FT.Clinton","FT.Perot")
nes92$PVote<-factor(nes92$presvote,labels=c("Bush","Clinton","Perot"))
head(nes92)
nes92CL<-mlogit.data(nes92,shape="wide",choice="PVote",varying=4:6)
head(nes92CL,6)
# Conditional logistic regression:
nes92.clogit<-mlogit(PVote~FT|partyid,data=nes92CL)
summary(nes92.clogit)
# Interpretation:
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/PLSC503-2021-git/master/Data/Election1992.csv")
BigNES92<-read.csv(text=temp, header=TRUE)
rm(temp)
NES.MNL<-vglm(presvote~partyid+age+white+female,data=BigNES92,
multinomial(refLevel=1))
summaryvglm(NES.MNL)
wald.test(b=c(t(coef(NES.MNL))),Sigma=vcov(NES.MNL),Terms=c(5,6))
wald.test(b=c(t(coef(NES.MNL))),Sigma=vcov(NES.MNL),Terms=c(1,3,5,7,9))
# Hats, yo:
PickBush<-ifelse(fitted.values(NES.MNL)[,1]>fitted.values(NES.MNL)[,2]
& fitted.values(NES.MNL)[,1]>fitted.values(NES.MNL)[,3], 1,0)
PickWJC<-ifelse(fitted.values(NES.MNL)[,2]>fitted.values(NES.MNL)[,1]
& fitted.values(NES.MNL)[,2]>fitted.values(NES.MNL)[,3], 2, 0)
PickHRP<-ifelse(fitted.values(NES.MNL)[,3]>fitted.values(NES.MNL)[,1]
& fitted.values(NES.MNL)[,3]>fitted.values(NES.MNL)[,2], 3, 0)
OutHat<-PickBush+PickWJC+PickHRP
table(BigNES92$presvote,OutHat)
# Odds ratios:
mnl.or <- function(model) {
coeffs <- c(t(coef(NES.MNL)))
lci <- exp(coeffs - 1.96 * diag(vcov(NES.MNL))^0.5)
or <- exp(coeffs)
uci <- exp(coeffs + 1.96* diag(vcov(NES.MNL))^0.5)
lreg.or <- cbind(lci, or, uci)
lreg.or
}
mnl.or(NES.MNL)
# In-sample predictions:
hats<-as.data.frame(fitted.values(NES.MNL))
names(hats)<-c("Bush","Clinton","Perot")
attach(hats)
pdf("InSampleRScatterplotMatrix.pdf",8,7)
spm(~Bush+Clinton+Perot,pch=20,plot.points=TRUE,
diagonal="histogram",col=c("black","grey"))
dev.off()
pdf("InSampleMNLPredProbsR.pdf",8,6)
par(mfrow=c(1,3))
plot(BigNES92$partyid,Bush,xlab="Party ID")
plot(BigNES92$partyid,Clinton,xlab="Party ID")
plot(BigNES92$partyid,Perot,xlab="Party ID")
par(mfrow=c(1,1))
dev.off()
# Conditional logit example:
nes92.clogit<-mlogit(PVote~FT|partyid,data=nes92CL)
summary(nes92.clogit)
# In-sample predictions:
CLhats<-predict(nes92.clogit,nes92CL)
pdf("InSampleCLHatsR.pdf",7,6)
plot(nes92$FT.Bush,CLhats[,1],pch=19,
col=rgb(100,0,0,100,maxColorValue=255),
xlab="Feeling Thermometer",
ylab="Predicted Probability")
points(nes92$FT.Clinton+runif(nrow(CLhats),-1,1),
CLhats[,2],pch=4,col=rgb(0,0,100,100,maxColorValue=255))
points(nes92$FT.Perot+runif(nrow(CLhats),-1,1),
CLhats[,3],pch=17,col=rgb(0,100,0,50,maxColorValue=255))
lines(lowess(nes92$FT.Bush,CLhats[,1]),lwd=2,col="red")
lines(lowess(nes92$FT.Clinton,CLhats[,2]),lwd=2,col="blue")
lines(lowess(nes92$FT.Perot,CLhats[,3]),lwd=2,col="darkgreen")
legend("topleft",bty="n",c("Bush","Clinton","Perot"),
col=c("red","blue","darkgreen"),pch=c(19,4,17))
dev.off()
####################################################
# Now, ordinal-response models...
#
# GOP Thermometer score plot:
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/PLSC503-2021-git/master/Data/ANES-pilot-2016.csv")
ANES<-read.csv(text=temp)
rm(temp)
ANES$ftjeb<-ifelse(ANES$ftjeb==998,NA,ANES$ftjeb)
pdf("Notes/ANES-FT-Jeb-2016.pdf",7,6)
par(mar=c(4,4,2,2))
hist(ANES$ftjeb,breaks=seq(0,100,by=1),main="",
xlab="Feeling Thermometer Score for Jeb!")
dev.off()
##################################
# Ordered simulation:
set.seed(7222009)
X<-runif(1000,0,10)
Ystar<-0 + 1*X + rnorm(1000)
Y1<-Ystar
Y1[Ystar<2.5]<-1
Y1[Ystar>=2.5 & Ystar<5]<-2
Y1[Ystar>=5 & Ystar<7.5]<-3
Y1[Ystar>=7.5]<-4
table(Y1)
summary(lm(Ystar~X))
summary(lm(Y1~X))
pdf("OrdinalOneR.pdf",7,5)
par(mar=c(4,4,2,2))
par(mfrow=c(1,2))
plot(X,Ystar,pch=20,xlab="X",ylab="Y*")
abline(lm(Ystar~X),lwd=3,col="red")
abline(h=c(2.5,5,7.5),lty=2)
plot(X,Y1,pch=20,xlab="X",ylab="Y1")
abline(lm(Y1~X),lwd=3,col="red")
dev.off()
Y2<-Ystar
Y2[Ystar<2]<-1
Y2[Ystar>=2 & Ystar<8]<-2
Y2[Ystar>=8 & Ystar<9]<-3
Y2[Ystar>9]<-4
table(Y2)
summary(lm(Y2~X))
pdf("OrdinalTwoR.pdf",7,5)
par(mar=c(4,4,2,2))
par(mfrow=c(1,2))
plot(X,Ystar,pch=20,xlab="X",ylab="Y*")
abline(lm(Ystar~X),lwd=3,col="red")
abline(h=c(2,8,9),lty=2)
plot(X,Y2,pch=20,xlab="X",ylab="Y2")
abline(lm(Y2~X),lwd=3,col="red")
dev.off()
# Best Example Ever...
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/PLSC503-2021-git/master/Data/Beer.csv")
beer<-read.csv(text=temp, header=TRUE)
rm(temp)
summary(beer)
beer.logit<-polr(as.factor(quality)~price+calories+craftbeer
+bitter+malty,data=beer)
summary(beer.logit)
beer.probit<-polr(as.factor(quality)~price+calories+craftbeer+
bitter+malty,data=beer,method="probit")
summary(beer.probit)
# Odds Ratios
olreg.or <- function(model) {
coeffs <- coef(summary(beer.logit))
lci <- exp(coeffs[ ,1] - 1.96 * coeffs[ ,2])
or <- exp(coeffs[ ,1])
uci <- exp(coeffs[ ,1] + 1.96 * coeffs[ ,2])
lreg.or <- cbind(lci, or, uci)
lreg.or
}
olreg.or(beer.logit)
# Predicted probs
calories<-seq(60,200,1)
price<-mean(beer$price)
craftbeer<-median(beer$craftbeer)
bitter<-mean(beer$bitter)
malty<-mean(beer$malty)
beersim<-cbind(calories,price,craftbeer,bitter,malty)
beer.hat<-predict(beer.logit,beersim,type='probs')
pdf("ROrdinalProbs.pdf",6,5)
par(mar=c(4,4,2,2))
plot(c(60,200), c(0,1), type='n', xlab="Calories", ylab='Fitted
Probability')
lines(60:200, beer.hat[1:141, 1], lty=1, lwd=3)
lines(60:200, beer.hat[1:141, 2], lty=2, lwd=3)
lines(60:200, beer.hat[1:141, 3], lty=3, lwd=3)
lines(60:200, beer.hat[1:141, 4], lty=4, lwd=3)
dev.off()
# Cumulative probs:
xaxis<-c(60,60:200,200)
yaxis1<-c(0,beer.hat[,1],0)
yaxis2<-c(0,beer.hat[,2]+beer.hat[,1],0)
yaxis3<-c(0,beer.hat[,3]+beer.hat[,2]+beer.hat[,1],0)
yaxis4<-c(0,beer.hat[,4]+beer.hat[,3]+beer.hat[,2]+beer.hat[,1],0)
pdf("ROrdinalCumProbs.pdf",6,5)
par(mar=c(4,4,2,2))
plot(c(60,200), c(0,1), type='n', xlab="Calories",
ylab="Cumulative Probability")
polygon(xaxis,yaxis4,col="white")
polygon(xaxis,yaxis3,col="grey80")
polygon(xaxis,yaxis2,col="grey50")
polygon(xaxis,yaxis1,col="grey10")
dev.off()
# fin |
d94a88635964b6ed166b8e5116dac2fa798bd17a | c191740f13d2586c8942a8db23d81e0bab365ee9 | /R/rmAllVars.R | 4f09aba0217ec88e84b274fd4bce500c215de69b | [] | no_license | duncantl/CodeAnalysis | 683bd32ada07c24f85a4739a31d900411b573fab | 726450a07d7ace054db6d4743cf895a29e16deea | refs/heads/master | 2023-09-04T02:56:23.129429 | 2023-09-04T01:38:04 | 2023-09-04T01:38:04 | 21,444,160 | 13 | 4 | null | 2015-07-30T02:54:00 | 2014-07-02T23:58:21 | R | UTF-8 | R | false | false | 873 | r | rmAllVars.R |
isRemoveAllVars =
#
#` @title Determines which expressions are of the form rm(list = ls())
#
function(e, remove = FALSE, asIndex = TRUE)
{
w = sapply(e, function(x) is.call(x) && as.character(x[[1]]) %in% c("remove", "rm"))
if(!any(w))
return(if(remove) e else if(asIndex) integer() else list())
w2 = sapply(e[w], isRemoveAllCall)
i = which(w)[w2]
if(remove)
e[-i]
else if(asIndex)
i
else e[i]
}
isRemoveAllCall =
function(x)
{
# Currently checks only for list = ls(...)) where we could have anything in ...
!is.na(i <- match("list", names(x))) &&
is.call(x[[i]]) &&
as.character(x[[i]][[1]]) %in% c("ls", "objects") &&
# Check if using a different environment
(is.na( i <- match("envir", names(x))) || (is.call(x[[i]]) && as.character(x[[i]]) == "globalenv"))
}
|
b0a9058ef30d594d4e6a6656dd8578364598f317 | c02a1052776295260500d631e6ac6fbdd7bf674a | /R/iris-ex.R | 97b26ddb55421f6d36eb36d411ad13c524c2241a | [] | no_license | anhnguyendepocen/VisMLM-course | 877d48fe14f0f7d64fc8b29487cdb23c9c10a4ab | 97d04c6576481595b417b4a9ed0f90b38631e21a | refs/heads/master | 2023-03-25T21:27:24.418661 | 2021-03-15T17:05:23 | 2021-03-15T17:05:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,443 | r | iris-ex.R | #' ---
#' title: "Iris data examples for EqCov paper"
#' author: "Michael Friendly and Matthew Sigal"
#' date: "21 Jun 2016"
#' ---
#' This script reproduces all of the analysis and graphs for the MANOVA of the `Iris` data
#' in the paper and also includes other analyses not described there. It is set up as an
#' R script that can be "compiled" to HTML, Word, or PDF using `knitr::knit()`. This is most
#' convenient within R Studio via the `File -> Compile Notebook` option.
#+ echo=FALSE
knitr::opts_chunk$set(warning=FALSE, message=FALSE, R.options=list(digits=4))
#' ## Load packages and the data
library(heplots)
library(car) # actually, loaded by heplots
data(iris)
#' ## Initial scatterplots and data ellipses
op <- par(mfcol=c(1,2), mar=c(5,4,1,1)+.1)
scatterplot(Sepal.Width ~ Sepal.Length | Species, data=iris,
ellipse=TRUE, levels=0.68, smoother=NULL, reg.line=FALSE, grid=FALSE,
legend.coords=list(x=7, y=4.4), col=c("red", "blue", "black"))
scatterplot(Sepal.Width ~ Sepal.Length | Species, data=iris,
ellipse=TRUE, levels=0.68, smoother=NULL, grid=FALSE,
reg.line=FALSE, cex=0,
legend.plot=FALSE, col=c("red", "blue", "black"))
par(op)
#' ## Using the covEllipse function
#' Uncentered and centered, first two variables
covEllipses(iris[,1:4], iris$Species,
fill=c(rep(FALSE,3), TRUE))
covEllipses(iris[,1:4], iris$Species, center=TRUE,
fill=c(rep(FALSE,3), TRUE), fill.alpha=.1, label.pos=c(1:3,0))
#' All pairs when more than two are specified
covEllipses(iris[,1:4], iris$Species,
fill=c(rep(FALSE,3), TRUE), variables=1:4,
fill.alpha=.1)
covEllipses(iris[,1:4], iris$Species, center=TRUE,
fill=c(rep(FALSE,3), TRUE), variables=1:4,
label.pos=c(1:3,0), fill.alpha=.1)
#' ## view in PCA space
#' NB: scale.=FALSE by default
iris.pca <- prcomp(iris[,1:4])
summary(iris.pca)
op <- par(mfcol=c(1,2), mar=c(5,4,1,1)+.1)
covEllipses(iris.pca$x, iris$Species,
fill=c(rep(FALSE,3), TRUE),
label.pos=1:4, fill.alpha=.1, asp=1)
covEllipses(iris.pca$x, iris$Species,
fill=c(rep(FALSE,3), TRUE), center=TRUE,
label.pos=1:4, fill.alpha=.1, asp=1)
par(op)
# all variables
covEllipses(iris.pca$x, iris$Species,
fill=c(rep(FALSE,3), TRUE), variables=1:4,
label.pos=1:4, fill.alpha=.1)
covEllipses(iris.pca$x, iris$Species, center=TRUE,
fill=c(rep(FALSE,3), TRUE), variables=1:4,
label.pos=1:4, fill.alpha=.1)
# Plot the last two, PC 3,4
covEllipses(iris.pca$x, iris$Species,
fill=c(rep(FALSE,3), TRUE), variables=3:4,
label.pos=c(1:3,0), fill.alpha=.1, asp=1)
covEllipses(iris.pca$x, iris$Species,
fill=c(rep(FALSE,3), TRUE), center=TRUE, variables=3:4,
label.pos=c(1:3,0), fill.alpha=.1, asp=1)
#' ## compare classical and robust covariance estimates
covEllipses(iris[,1:4], iris$Species)
covEllipses(iris[,1:4], iris$Species, fill=TRUE, method="mve", add=TRUE, labels="")
#' Box's M test
iris.boxm <- boxM(iris[, 1:4], iris[, "Species"])
iris.boxm
#' covEllipses has a method for `"boxm"` objects
covEllipses(iris.boxm, fill=c(rep(FALSE,3), TRUE) )
covEllipses(iris.boxm, fill=c(rep(FALSE,3), TRUE), center=TRUE, label.pos=1:4 )
#' Boxplots of means, using `car::Boxplot`
op <- par(mfrow=c(1, 4), mar=c(5,4,1,1))
for (response in names(iris)[1:4]){
Boxplot(iris[, response] ~ Species, data=iris,
ylab=response, axes=FALSE, col=c("red", "blue", "gray"))
box()
axis(2)
axis(1, at=1:3, labels=c("Setosa", "Vers.", "Virginca"))
}
par(op)
#' ## models & plots
iris.mod <- lm(as.matrix(iris[, 1:4]) ~ Species, data=iris)
Anova(iris.mod)
iris.boxm <- boxM(iris.mod)
iris.boxm
#' ## canonical view of MANOVA test
library(candisc)
iris.can <- candisc(iris.mod)
op <- par(mar=c(5,4,1,1)+.1)
plot(iris.can, ellipse=TRUE)
par(op)
#' ## multivariate Levene test
irisdev <- abs( colDevs(iris[,1:4], iris$Species, median) )
irisdev.mod <- lm( irisdev ~ iris$Species)
Anova(irisdev.mod)
#' ## robust MLM
irisdev.rmod <- robmlm( irisdev ~ iris$Species)
Anova(irisdev.rmod)
pairs(irisdev.rmod, variables=1:4, fill=TRUE, fill.alpha=.1)
#' ## covariance ellipses of absolute deviations
covEllipses(irisdev, group=iris$Species,
variables=1:4,
fill=c(rep(FALSE,3), TRUE), fill.alpha=0.1, label.pos=c(1:3,0))
pairs(irisdev.mod, variables=1:4, fill=TRUE, fill.alpha=.1)
#' Canonical views for Levene's test
library(candisc)
irisdev.can <- candisc(irisdev.mod)
irisdev.can
plot(irisdev.can, which=1)
plot(irisdev.can, ellipse=TRUE)
|
51c0301e47dcb37c044874c7e7fa5535f877edac | 70f3c5dd76e97a7d5e79098700b65ad464fb50a3 | /cachematrix.R | f690aad519fbcee10bd3552244b763525417c760 | [] | no_license | nicolehanjing/ProgrammingAssignment2 | 93b5f78cfb1f939382e12e1a785410e424140c0e | 21023a0e32b2433e30c684217e0ec34662fded1b | refs/heads/master | 2021-06-06T16:49:34.785441 | 2016-07-27T06:14:43 | 2016-07-27T06:14:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 695 | r | cachematrix.R | ##This function is going to make the cache matrix
##Two variables called x and s, x is the matrix requiring processing,
##and s is the result after inversing
makeCacheMatrix <- function(x = numeric()){
s <- NULL
set<-function(y){
x<<-y
s<<-NULL
}
get <- function() x
setinverse<-function(solve) s<<-solve
getinverse<-function() s
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
##This function is going to cache the inverse matrix
cacheSolve <- function(x){
s <- x$getinverse()
if(!is.null(s)){
message("Getting cached data")
return(s)
}
data <- x$get()
s <- solve(data)
x$setinverse(s)
print(s)
}
|
5c1940ff458bac839dd399101670f2883d76298d | 4e5b7c10987221be70bf97f1078ff212bd9a6b87 | /man/sixCycleStat.Rd | 8ec5ce8ccdb5e205d0a7d2601f9f21a379d2b075 | [] | no_license | uvacorpnet/rem | 1f50b9becd8a7533d4388bbd4a1ea401e0aa8fb8 | e027825de55265354667575e4fe883d93f083d38 | refs/heads/master | 2020-12-04T15:09:18.431842 | 2020-03-13T15:44:13 | 2020-03-13T15:44:13 | 231,812,154 | 3 | 0 | null | 2020-01-04T18:48:00 | 2020-01-04T18:47:59 | null | UTF-8 | R | false | false | 5,098 | rd | sixCycleStat.Rd | \name{sixCycleStat}
\alias{sixCycleStat}
\alias{sixCycle}
\title{Calculate six cycle statistics}
%
\description{Calculate the endogenous network statistic \code{sixCycle} that
measures the tendency for events to close four cycles in two-mode event sequences.}
\usage{
sixCycleStat(data, time, sender, target, halflife,
weight = NULL,
eventtypevar = NULL,
eventtypevalue = 'standard',
eventfiltervar = NULL,
eventfilterAB = NULL, eventfilterAJ = NULL,
eventfilterIB = NULL, eventfilterIJ = NULL,
eventvar = NULL,
variablename = 'fourCycle',
returnData = FALSE,
dataPastEvents = NULL,
showprogressbar = FALSE,
inParallel = FALSE, cluster = NULL
)
}
\arguments{
\item{data}{ A data frame containing all the variables.}
\item{time}{ Numeric variable that represents the event sequence. The variable
has to be sorted in ascending order.}
\item{sender}{ A string (or factor or numeric) variable that represents the sender of the event.}
\item{target}{ A string (or factor or numeric) variable that represents the target of the event.}
\item{halflife}{ A numeric value that is used in the decay function.
The vector of past events is weighted by an exponential decay function using the specified halflife. The halflife parameter determins after how long a period the event weight should be halved. E.g. if \code{halflife = 5}, the weight of an event that occured 5 units in the past is halved. Smaller halflife values give more importance to more recent events, while larger halflife values should be used if time does not affect the sequence of events that much.}
\item{weight}{ An optional numeric variable that represents the weight of each event. If \code{weight = NULL} each event is given an event weight of \code{1}.
}
\item{eventtypevar}{ An optional variable that represents the type of the event. Use \code{eventtypevalue} to specify how the \code{eventtypevar} should be used to filter past events.
}
\item{eventtypevalue}{ An optional value (or set of values) used to specify how paste events should be filtered depending on their type. \code{'standard'} is implemented.}
\item{eventfiltervar}{ An optinoal variable that allows filtering of past events using an event attribute; not implemented.}
\item{eventfilterAB}{ An optional value used to specify how
paste events should be filtered depending on their attribute; not implemented.}
\item{eventfilterAJ}{ see \code{eventfilterAB}.}
\item{eventfilterIB}{see \code{eventfilterAB}.}
\item{eventfilterIJ}{see \code{eventfilterAB}.}
\item{eventvar}{ An optional dummy variable with 0 values for null-events and 1 values for true events. If the \code{data} is in the form of counting process data, use the \code{eventvar}-option to specify which variable contains the 0/1-dummy for event occurrence. If this variable is not specified, all events in the past will be considered for the calulation of the four cycle statistic, regardless if they occurred or not (= are null-events). Misspecification could result in grievous errors in the calculation of the network statistic.}
\item{variablename}{ An optional value (or values) with the name the four cycle statistic variable should be given. To be used if \code{returnData = TRUE}.}
\item{returnData}{ \code{TRUE/FALSE}. Set to \code{FALSE} by default. The new variable(s) are bound directly to the \code{data.frame} provided and the data frame is returned in full.}
\item{dataPastEvents}{ An optional \code{data.frame} with the following variables:
column 1 = time variable,
column 2 = sender variable,
column 3 = target on other variable (or all "1"),
column 4 = weight variable (or all "1"),
column 5 = event type variable (or all "1"),
column 6 = event filter variable (or all "1"). Make sure that the data frame does not contain null events. Filter it out for true events only.}
\item{showprogressbar}{\code{TRUE/FALSE}. To be implemented.}
\item{inParallel}{ \code{TRUE/FALSE}. An optional boolean to specify if the loop should be run in parallel.}
\item{cluster}{ An optional numeric or character value that defines the cluster. By specifying a single number, the cluster option uses the provided number of nodes to parallellize. By specifying a cluster using the \code{makeCluster}-command in the \code{doParallel}-package, the loop can be run on multiple nodes/cores. E.g., \code{cluster = makeCluster(12, type="FORK")}.}
}
\details{
The \code{sixCycleStat()}-function calculates an endogenous statistic that measures whether events have a tendency to form six cycles.
The effect is further described in the following paper:
D. Valeeva, F.W. Takes and E.M. Heemskerk, The duality of firms and directors in board interlock networks: A relational event modeling approach, Social Networks 62: 68-79, Elsevier, 2020.
}
% \value{
%
% }
% \references{
%
% }
% \note{
%
% }
\author{
Diliara Valeeva, Frank Takes and Eelke Heemskerk of the University of Amsterdam's CORPNET group \email{corpnet@uva.nl}
}
\seealso{
\link{rem-package}
}
\examples{See fourCycleStat() examples.
}
%\keyword{key}
|
f0c6dd5a4e942b3b0d77dda50524ce05c9c72059 | 557db962cfb493463ec6e7d52fc27d46105db081 | /R/blob_client_funcs.R | d897533da515397691259706b3ac6805430f5572 | [] | no_license | cran/AzureStor | 101eb0f24a1f7d491882be51199f8a02a9f8a38e | 6d28f84d8888eecefe83fdacb635e4eda3d8d796 | refs/heads/master | 2022-06-13T10:58:01.035371 | 2022-05-25T06:00:02 | 2022-05-25T06:00:02 | 163,115,188 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,292 | r | blob_client_funcs.R | #' Operations on a blob endpoint
#'
#' Get, list, create, or delete blob containers.
#'
#' @param endpoint Either a blob endpoint object as created by [storage_endpoint], or a character string giving the URL of the endpoint.
#' @param key,token,sas If an endpoint object is not supplied, authentication credentials: either an access key, an Azure Active Directory (AAD) token, or a SAS, in that order of priority. If no authentication credentials are provided, only public (anonymous) access to the share is possible.
#' @param api_version If an endpoint object is not supplied, the storage API version to use when interacting with the host. Currently defaults to `"2019-07-07"`.
#' @param name The name of the blob container to get, create, or delete.
#' @param confirm For deleting a container, whether to ask for confirmation.
#' @param lease For deleting a leased container, the lease ID.
#' @param public_access For creating a container, the level of public access to allow.
#' @param x For the print method, a blob container object.
#' @param ... Further arguments passed to lower-level functions.
#'
#' @details
#' You can call these functions in a couple of ways: by passing the full URL of the share, or by passing the endpoint object and the name of the container as a string.
#'
#' If authenticating via AAD, you can supply the token either as a string, or as an object of class AzureToken, created via [AzureRMR::get_azure_token]. The latter is the recommended way of doing it, as it allows for automatic refreshing of expired tokens.
#'
#' @return
#' For `blob_container` and `create_blob_container`, an S3 object representing an existing or created container respectively.
#'
#' For `list_blob_containers`, a list of such objects.
#'
#' @seealso
#' [storage_endpoint], [az_storage], [storage_container]
#'
#' @examples
#' \dontrun{
#'
#' endp <- blob_endpoint("https://mystorage.blob.core.windows.net/", key="access_key")
#'
#' # list containers
#' list_blob_containers(endp)
#'
#' # get, create, and delete a container
#' blob_container(endp, "mycontainer")
#' create_blob_container(endp, "newcontainer")
#' delete_blob_container(endp, "newcontainer")
#'
#' # alternative way to do the same
#' blob_container("https://mystorage.blob.core.windows.net/mycontainer", key="access_key")
#' create_blob_container("https://mystorage.blob.core.windows.net/newcontainer", key="access_key")
#' delete_blob_container("https://mystorage.blob.core.windows.net/newcontainer", key="access_key")
#'
#' # authenticating via AAD
#' token <- AzureRMR::get_azure_token(resource="https://storage.azure.com/",
#' tenant="myaadtenant",
#' app="myappid",
#' password="mypassword")
#' blob_container("https://mystorage.blob.core.windows.net/mycontainer", token=token)
#'
#' }
#' @rdname blob_container
#' @export
blob_container <- function(endpoint, ...)
{
UseMethod("blob_container")
}
#' @rdname blob_container
#' @export
blob_container.character <- function(endpoint, key=NULL, token=NULL, sas=NULL,
api_version=getOption("azure_storage_api_version"),
...)
{
do.call(blob_container, generate_endpoint_container(endpoint, key, token, sas, api_version))
}
#' @rdname blob_container
#' @export
blob_container.blob_endpoint <- function(endpoint, name, ...)
{
obj <- list(name=name, endpoint=endpoint)
class(obj) <- c("blob_container", "storage_container")
obj
}
#' @rdname blob_container
#' @export
print.blob_container <- function(x, ...)
{
cat("Azure blob container '", x$name, "'\n", sep="")
url <- httr::parse_url(x$endpoint$url)
url$path <- x$name
cat(sprintf("URL: %s\n", httr::build_url(url)))
if(!is_empty(x$endpoint$key))
cat("Access key: <hidden>\n")
else cat("Access key: <none supplied>\n")
if(!is_empty(x$endpoint$token))
{
cat("Azure Active Directory access token:\n")
print(x$endpoint$token)
}
else cat("Azure Active Directory access token: <none supplied>\n")
if(!is_empty(x$endpoint$sas))
cat("Account shared access signature: <hidden>\n")
else cat("Account shared access signature: <none supplied>\n")
cat(sprintf("Storage API version: %s\n", x$endpoint$api_version))
invisible(x)
}
#' @rdname blob_container
#' @export
list_blob_containers <- function(endpoint, ...)
{
UseMethod("list_blob_containers")
}
#' @rdname blob_container
#' @export
list_blob_containers.character <- function(endpoint, key=NULL, token=NULL, sas=NULL,
api_version=getOption("azure_storage_api_version"),
...)
{
do.call(list_blob_containers, generate_endpoint_container(endpoint, key, token, sas, api_version))
}
#' @rdname blob_container
#' @export
list_blob_containers.blob_endpoint <- function(endpoint, ...)
{
res <- call_storage_endpoint(endpoint, "/", options=list(comp="list"))
lst <- lapply(res$Containers, function(cont) blob_container(endpoint, cont$Name[[1]]))
while(length(res$NextMarker) > 0)
{
res <- call_storage_endpoint(endpoint, "/", options=list(comp="list", marker=res$NextMarker[[1]]))
lst <- c(lst, lapply(res$Containers, function(cont) blob_container(endpoint, cont$Name[[1]])))
}
named_list(lst)
}
#' @rdname blob_container
#' @export
create_blob_container <- function(endpoint, ...)
{
UseMethod("create_blob_container")
}
#' @rdname blob_container
#' @export
create_blob_container.character <- function(endpoint, key=NULL, token=NULL, sas=NULL,
api_version=getOption("azure_storage_api_version"),
...)
{
endp <- generate_endpoint_container(endpoint, key, token, sas, api_version)
create_blob_container(endp$endpoint, endp$name, ...)
}
#' @rdname blob_container
#' @export
create_blob_container.blob_container <- function(endpoint, ...)
{
create_blob_container(endpoint$endpoint, endpoint$name)
}
#' @rdname blob_container
#' @export
create_blob_container.blob_endpoint <- function(endpoint, name, public_access=c("none", "blob", "container"), ...)
{
public_access <- match.arg(public_access)
headers <- if(public_access != "none")
modifyList(list(...), list("x-ms-blob-public-access"=public_access))
else list(...)
obj <- blob_container(endpoint, name)
do_container_op(obj, options=list(restype="container"), headers=headers, http_verb="PUT")
obj
}
#' @rdname blob_container
#' @export
delete_blob_container <- function(endpoint, ...)
{
UseMethod("delete_blob_container")
}
#' @rdname blob_container
#' @export
delete_blob_container.character <- function(endpoint, key=NULL, token=NULL, sas=NULL,
api_version=getOption("azure_storage_api_version"),
...)
{
endp <- generate_endpoint_container(endpoint, key, token, sas, api_version)
delete_blob_container(endp$endpoint, endp$name, ...)
}
#' @rdname blob_container
#' @export
delete_blob_container.blob_container <- function(endpoint, ...)
{
delete_blob_container(endpoint$endpoint, endpoint$name, ...)
}
#' @rdname blob_container
#' @export
delete_blob_container.blob_endpoint <- function(endpoint, name, confirm=TRUE, lease=NULL, ...)
{
if(!delete_confirmed(confirm, paste0(endpoint$url, name), "container"))
return(invisible(NULL))
headers <- if(!is_empty(lease))
list("x-ms-lease-id"=lease)
else list()
obj <- blob_container(endpoint, name)
invisible(do_container_op(obj, options=list(restype="container"), headers=headers, http_verb="DELETE"))
}
#' Operations on a blob container or blob
#'
#' Upload, download, or delete a blob; list blobs in a container; create or delete directories; check blob availability.
#'
#' @param container A blob container object.
#' @param blob A string naming a blob.
#' @param dir For `list_blobs`, a string naming the directory. Note that blob storage does not support real directories; this argument simply filters the result to return only blobs whose names start with the given value.
#' @param src,dest The source and destination files for uploading and downloading. See 'Details' below.
#' @param info For `list_blobs`, level of detail about each blob to return: a vector of names only; the name, size, blob type, and whether this blob represents a directory; or all information.
#' @param confirm Whether to ask for confirmation on deleting a blob.
#' @param blocksize The number of bytes to upload/download per HTTP(S) request.
#' @param lease The lease for a blob, if present.
#' @param type When uploading, the type of blob to create. Currently only block and append blobs are supported.
#' @param append When uploading, whether to append the uploaded data to the destination blob. Only has an effect if `type="AppendBlob"`. If this is FALSE (the default) and the destination append blob exists, it is overwritten. If this is TRUE and the destination does not exist or is not an append blob, an error is thrown.
#' @param overwrite When downloading, whether to overwrite an existing destination file.
#' @param use_azcopy Whether to use the AzCopy utility from Microsoft to do the transfer, rather than doing it in R.
#' @param max_concurrent_transfers For `multiupload_blob` and `multidownload_blob`, the maximum number of concurrent file transfers. Each concurrent file transfer requires a separate R process, so limit this if you are low on memory.
#' @param prefix For `list_blobs`, an alternative way to specify the directory.
#' @param recursive For the multiupload/download functions, whether to recursively transfer files in subdirectories. For `list_blobs`, whether to include the contents of any subdirectories in the listing. For `delete_blob_dir`, whether to recursively delete subdirectory contents as well.
#' @param put_md5 For uploading, whether to compute the MD5 hash of the blob(s). This will be stored as part of the blob's properties. Only used for block blobs.
#' @param check_md5 For downloading, whether to verify the MD5 hash of the downloaded blob(s). This requires that the blob's `Content-MD5` property is set. If this is TRUE and the `Content-MD5` property is missing, a warning is generated.
#' @param snapshot,version For `download_blob`, optional snapshot and version identifiers. These should be datetime strings, in the format "yyyy-mm-ddTHH:MM:SS.SSSSSSSZ". If omitted, download the base blob.
#'
#' @details
#' `upload_blob` and `download_blob` are the workhorse file transfer functions for blobs. They each take as inputs a _single_ filename as the source for uploading/downloading, and a single filename as the destination. Alternatively, for uploading, `src` can be a [textConnection] or [rawConnection] object; and for downloading, `dest` can be NULL or a `rawConnection` object. If `dest` is NULL, the downloaded data is returned as a raw vector, and if a raw connection, it will be placed into the connection. See the examples below.
#'
#' `multiupload_blob` and `multidownload_blob` are functions for uploading and downloading _multiple_ files at once. They parallelise file transfers by using the background process pool provided by AzureRMR, which can lead to significant efficiency gains when transferring many small files. There are two ways to specify the source and destination for these functions:
#' - Both `src` and `dest` can be vectors naming the individual source and destination pathnames.
#' - The `src` argument can be a wildcard pattern expanding to one or more files, with `dest` naming a destination directory. In this case, if `recursive` is true, the file transfer will replicate the source directory structure at the destination.
#'
#' `upload_blob` and `download_blob` can display a progress bar to track the file transfer. You can control whether to display this with `options(azure_storage_progress_bar=TRUE|FALSE)`; the default is TRUE.
#'
#' `multiupload_blob` can upload files either as all block blobs or all append blobs, but not a mix of both.
#'
#' `blob_exists` and `blob_dir_exists` test for the existence of a blob and directory, respectively.
#'
#' `delete_blob` deletes a blob, and `delete_blob_dir` deletes all blobs in a directory (possibly recursively). This will also delete any snapshots for the blob(s) involved.
#'
#' ## AzCopy
#' `upload_blob` and `download_blob` have the ability to use the AzCopy commandline utility to transfer files, instead of native R code. This can be useful if you want to take advantage of AzCopy's logging and recovery features; it may also be faster in the case of transferring a very large number of small files. To enable this, set the `use_azcopy` argument to TRUE.
#'
#' The following points should be noted about AzCopy:
#' - It only supports SAS and AAD (OAuth) token as authentication methods. AzCopy also expects a single filename or wildcard spec as its source/destination argument, not a vector of filenames or a connection.
#' - Currently, it does _not_ support appending data to existing blobs.
#'
#' ## Directories
#' Blob storage does not have true directories, instead using filenames containing a separator character (typically '/') to mimic a directory structure. This has some consequences:
#'
#' - The `isdir` column in the data frame output of `list_blobs` is a best guess as to whether an object represents a file or directory, and may not always be correct. Currently, `list_blobs` assumes that any object with a file size of zero is a directory.
#' - Zero-length files can cause problems for the blob storage service as a whole (not just AzureStor). Try to avoid uploading such files.
#' - `create_blob_dir` and `delete_blob_dir` are guaranteed to function as expected only for accounts with hierarchical namespaces enabled. When this feature is disabled, directories do not exist as objects in their own right: to create a directory, simply upload a blob to that directory. To delete a directory, delete all the blobs within it; as far as the blob storage service is concerned, the directory then no longer exists.
#' - Similarly, the output of `list_blobs(recursive=TRUE)` can vary based on whether the storage account has hierarchical namespaces enabled.
#' - `blob_exists` will return FALSE for a directory when the storage account does not have hierarchical namespaces enabled.
#'
#' @return
#' For `list_blobs`, details on the blobs in the container. For `download_blob`, if `dest=NULL`, the contents of the downloaded blob as a raw vector. For `blob_exists` a flag whether the blob exists.
#'
#' @seealso
#' [blob_container], [az_storage], [storage_download], [call_azcopy], [list_blob_snapshots], [list_blob_versions]
#'
#' [AzCopy version 10 on GitHub](https://github.com/Azure/azure-storage-azcopy)
#' [Guide to the different blob types](https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs)
#'
#' @examples
#' \dontrun{
#'
#' cont <- blob_container("https://mystorage.blob.core.windows.net/mycontainer", key="access_key")
#'
#' list_blobs(cont)
#'
#' upload_blob(cont, "~/bigfile.zip", dest="bigfile.zip")
#' download_blob(cont, "bigfile.zip", dest="~/bigfile_downloaded.zip")
#'
#' delete_blob(cont, "bigfile.zip")
#'
#' # uploading/downloading multiple files at once
#' multiupload_blob(cont, "/data/logfiles/*.zip", "/uploaded_data")
#' multiupload_blob(cont, "myproj/*") # no dest directory uploads to root
#' multidownload_blob(cont, "jan*.*", "/data/january")
#'
#' # append blob: concatenating multiple files into one
#' upload_blob(cont, "logfile1", "logfile", type="AppendBlob", append=FALSE)
#' upload_blob(cont, "logfile2", "logfile", type="AppendBlob", append=TRUE)
#' upload_blob(cont, "logfile3", "logfile", type="AppendBlob", append=TRUE)
#'
#' # you can also pass a vector of file/pathnames as the source and destination
#' src <- c("file1.csv", "file2.csv", "file3.csv")
#' dest <- paste0("uploaded_", src)
#' multiupload_blob(cont, src, dest)
#'
#' # uploading serialized R objects via connections
#' json <- jsonlite::toJSON(iris, pretty=TRUE, auto_unbox=TRUE)
#' con <- textConnection(json)
#' upload_blob(cont, con, "iris.json")
#'
#' rds <- serialize(iris, NULL)
#' con <- rawConnection(rds)
#' upload_blob(cont, con, "iris.rds")
#'
#' # downloading files into memory: as a raw vector, and via a connection
#' rawvec <- download_blob(cont, "iris.json", NULL)
#' rawToChar(rawvec)
#'
#' con <- rawConnection(raw(0), "r+")
#' download_blob(cont, "iris.rds", con)
#' unserialize(con)
#'
#' # copy from a public URL: Iris data from UCI machine learning repository
#' copy_url_to_blob(cont,
#' "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data",
#' "iris.csv")
#'
#' }
#' @rdname blob
#' @export
list_blobs <- function(container, dir="/", info=c("partial", "name", "all"),
prefix=NULL, recursive=TRUE)
{
info <- match.arg(info)
opts <- list(comp="list", restype="container")
# ensure last char is always '/', to get list of blobs in a subdir
if(dir != "/")
{
if(!grepl("/$", dir))
dir <- paste0(dir, "/")
prefix <- dir
}
if(!is_empty(prefix))
opts <- c(opts, prefix=as.character(prefix))
if(!recursive)
opts <- c(opts, delimiter="/")
res <- do_container_op(container, options=opts)
lst <- res$Blobs
while(length(res$NextMarker) > 0)
{
opts$marker <- res$NextMarker[[1]]
res <- do_container_op(container, options=opts)
lst <- c(lst, res$Blobs)
}
if(info != "name")
{
prefixes <- lst[names(lst) == "BlobPrefix"]
blobs <- lst[names(lst) == "Blob"]
prefix_rows <- lapply(prefixes, function(prefix)
{
structure(list(Type="BlobPrefix", Name=unlist(prefix$Name), `Content-Length`=NA, BlobType=NA),
class="data.frame", row.names=c(NA_integer_, -1L))
})
blob_rows <- lapply(blobs, function(blob)
{
structure(c(Type="Blob", Name=blob$Name, unlist(blob$Properties)),
class="data.frame", row.names=c(NA_integer_, -1L))
})
df_prefixes <- do.call(vctrs::vec_rbind, prefix_rows)
df_blobs <- do.call(vctrs::vec_rbind, blob_rows)
no_prefixes <- nrow(df_prefixes) == 0
no_blobs <- nrow(df_blobs) == 0
if(no_prefixes && no_blobs)
return(data.frame())
else if(no_prefixes)
df <- df_blobs
else if(no_blobs)
df <- df_prefixes
else df <- vctrs::vec_rbind(df_prefixes, df_blobs)
if(length(df) > 0)
{
# reorder and rename first 2 columns for consistency with ADLS, file
ndf <- names(df)
namecol <- which(ndf == "Name")
sizecol <- which(ndf == "Content-Length")
typecol <- which(names(df) == "BlobType")
names(df)[c(namecol, sizecol, typecol)] <- c("name", "size", "blobtype")
df$size <- if(!is.null(df$size)) as.numeric(df$size) else NA
df$size[df$size == 0] <- NA
df$isdir <- is.na(df$size)
dircol <- which(names(df) == "isdir")
if(info == "all")
{
if(!is.null(df$`Last-Modified`))
df$`Last-Modified` <- as_datetime(df$`Last-Modified`)
if(!is.null(df$`Creation-Time`))
df$`Creation-Time` <- as_datetime(df$`Creation-Time`)
vctrs::vec_cbind(df[c(namecol, sizecol, dircol, typecol)], df[-c(namecol, sizecol, dircol, typecol)])
}
else df[c(namecol, sizecol, dircol, typecol)]
}
else data.frame()
}
else unname(vapply(lst, function(b) b$Name[[1]], FUN.VALUE=character(1)))
}
#' @rdname blob
#' @export
upload_blob <- function(container, src, dest=basename(src), type=c("BlockBlob", "AppendBlob"),
blocksize=if(type == "BlockBlob") 2^24 else 2^22,
lease=NULL, put_md5=FALSE, append=FALSE, use_azcopy=FALSE)
{
type <- match.arg(type)
if(use_azcopy)
azcopy_upload(container, src, dest, type=type, blocksize=blocksize, lease=lease, put_md5=put_md5)
else upload_blob_internal(container, src, dest, type=type, blocksize=blocksize, lease=lease,
put_md5=put_md5, append=append)
}
#' @rdname blob
#' @export
multiupload_blob <- function(container, src, dest, recursive=FALSE, type=c("BlockBlob", "AppendBlob"),
blocksize=if(type == "BlockBlob") 2^24 else 2^22,
lease=NULL, put_md5=FALSE, append=FALSE, use_azcopy=FALSE,
max_concurrent_transfers=10)
{
type <- match.arg(type)
if(use_azcopy)
return(azcopy_upload(container, src, dest, type=type, blocksize=blocksize, lease=lease, put_md5=put_md5,
recursive=recursive))
multiupload_internal(container, src, dest, recursive=recursive, type=type, blocksize=blocksize, lease=lease,
put_md5=put_md5, append=append, max_concurrent_transfers=max_concurrent_transfers)
}
#' @rdname blob
#' @export
download_blob <- function(container, src, dest=basename(src), blocksize=2^24, overwrite=FALSE, lease=NULL,
check_md5=FALSE, use_azcopy=FALSE, snapshot=NULL, version=NULL)
{
if(use_azcopy)
azcopy_download(container, src, dest, overwrite=overwrite, lease=lease, check_md5=check_md5)
else download_blob_internal(container, src, dest, blocksize=blocksize, overwrite=overwrite, lease=lease,
check_md5=check_md5, snapshot=snapshot, version=version)
}
#' @rdname blob
#' @export
multidownload_blob <- function(container, src, dest, recursive=FALSE, blocksize=2^24, overwrite=FALSE, lease=NULL,
check_md5=FALSE, use_azcopy=FALSE,
max_concurrent_transfers=10)
{
if(use_azcopy)
return(azcopy_download(container, src, dest, overwrite=overwrite, lease=lease, recursive=recursive,
check_md5=check_md5))
multidownload_internal(container, src, dest, recursive=recursive, blocksize=blocksize, overwrite=overwrite,
lease=lease, check_md5=check_md5, max_concurrent_transfers=max_concurrent_transfers)
}
#' @rdname blob
#' @export
delete_blob <- function(container, blob, confirm=TRUE)
{
if(!delete_confirmed(confirm, paste0(container$endpoint$url, container$name, "/", blob), "blob"))
return(invisible(NULL))
# deleting zero-length blobs (directories) will fail if the x-ms-delete-snapshots header is present
# and this is a HNS-enabled account:
# since there is no way to detect whether the account is HNS, and getting the blob size requires
# an extra API call, we try deleting with and without the header present
hdrs <- list(`x-ms-delete-snapshots`="include")
res <- try(do_container_op(container, blob, headers=hdrs, http_verb="DELETE"), silent=TRUE)
if(inherits(res, "try-error"))
res <- do_container_op(container, blob, headers=NULL, http_verb="DELETE")
invisible(res)
}
#' @rdname blob
#' @export
create_blob_dir <- function(container, dir)
{
# workaround: upload a zero-length file to the desired dir, then delete the file
destfile <- file.path(dir, basename(tempfile()))
opts <- options(azure_storage_progress_bar=FALSE)
on.exit(options(opts))
upload_blob(container, rawConnection(raw(0)), destfile)
delete_blob(container, destfile, confirm=FALSE)
invisible(NULL)
}
#' @rdname blob
#' @export
delete_blob_dir <- function(container, dir, recursive=FALSE, confirm=TRUE)
{
if(dir %in% c("/", ".") && !recursive)
return(invisible(NULL))
if(!delete_confirmed(confirm, paste0(container$endpoint$url, container$name, "/", dir), "directory"))
return(invisible(NULL))
if(recursive)
{
# delete everything under this directory
conts <- list_blobs(container, dir, recursive=TRUE, info="name")
for(n in rev(conts))
delete_blob(container, n, confirm=FALSE)
}
if(dir != "/" && blob_exists(container, dir))
delete_blob(container, dir, confirm=FALSE)
}
#' @rdname blob
#' @export
blob_exists <- function(container, blob)
{
res <- do_container_op(container, blob, headers = list(), http_verb = "HEAD", http_status_handler = "pass")
if(httr::status_code(res) == 404L)
return(FALSE)
httr::stop_for_status(res, storage_error_message(res))
return(TRUE)
}
#' @rdname blob
#' @export
blob_dir_exists <- function(container, dir)
{
if(dir == "/")
return(TRUE)
# multiple steps required to handle HNS-enabled and disabled accounts:
# 1. get blob properties
# - if no error, return (size == 0)
# - error can be because dir does not exist, OR HNS disabled
# 2. get dir listing
# - call API directly to avoid retrieving entire list
# - return (list is not empty)
props <- try(get_storage_properties(container, dir), silent=TRUE)
if(!inherits(props, "try-error"))
return(props[["content-length"]] == 0)
# ensure last char is always '/', to get list of blobs in a subdir
if(substr(dir, nchar(dir), nchar(dir)) != "/")
dir <- paste0(dir, "/")
opts <- list(comp="list", restype="container", maxresults=1, delimiter="/", prefix=dir)
res <- do_container_op(container, options=opts)
!is_empty(res$Blobs)
}
|
3df17f2679d71028648e293640b60d98af3b107f | 02c44aa82a8a6132ee5a6742256a1825bbbe38b0 | /BOXCOX-in-R/BOX_COX_Main.r | 79cd797eb5476f4bab20ca770345cfbc97e577cf | [] | no_license | coderwithpurpose/ML | a694ae14434a1d4afb650f246cc26c8066fbdcbc | 56a1cdaa1b16034f3a78f8c83fd8c638c6af7e98 | refs/heads/master | 2020-04-12T12:36:44.351026 | 2019-10-29T03:12:06 | 2019-10-29T03:12:06 | 162,497,199 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,952 | r | BOX_COX_Main.r | library(MASS)
rm(list=ls())
housing_data = scan('https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data')
housing_df = as.data.frame(matrix(housing_data, ncol=14, byrow=TRUE), stringsAsFactors=FALSE)
housing_prices = housing_df[, c(14)]
features = housing_df[, c(1:13)]
housing_reg = lm(housing_prices~as.matrix(features))
par(mfrow=c(2,2))
plot(housing_reg)
orig_fitted_values = fitted(housing_reg)
par(mfrow=c(1,1))
orig_std_res = stdres(housing_reg)
plot(orig_fitted_values, orig_std_res, main="Fitted Values vs Standardized Residuals", xlab="Fitted Values"
,ylab="Standardized Residuals")
outlier_list_extreme_stand_res = c(369, 372, 373)
features_v2 = features[-outlier_list_extreme_stand_res, ]
housing_prices_v2 = housing_prices[-outlier_list_extreme_stand_res]
housing_reg_v2 = lm(housing_prices_v2~as.matrix(features_v2))
par(mfrow=c(2,2))
plot(housing_reg_v2)
second_outlier_list = c(369, 368, 366)
features_v3 = features_v2[-second_outlier_list, ]
housing_prices_v3 = housing_prices_v2[-second_outlier_list]
housing_reg_v3 = lm(housing_prices_v3~as.matrix(features_v3))
par(mfrow=c(2,2))
plot(housing_reg_v3)
# third_outlier_list = c(407)
third_outlier_list = c(375)
features_v4 = features_v3[-third_outlier_list, ]
housing_prices_v4 = housing_prices_v3[-third_outlier_list]
housing_reg_v4 = lm(housing_prices_v4~as.matrix(features_v4))
par(mfrow=c(2,2))
plot(housing_reg_v4)
final_outlier_list = c(406)
features_v5 = features_v4[-final_outlier_list, ]
housing_prices_v5 = housing_prices_v4[-final_outlier_list]
housing_reg_v5 = lm(housing_prices_v5~as.matrix(features_v5))
par(mfrow=c(2,2))
plot(housing_reg_v5)
library(prodlim)
par(mfrow=c(1,1))
matching_rows = row.match(features, features_v5)
outlier_indices = which(is.na(matching_rows))
print(outlier_indices)
# run the box-cox transformation
bc <- boxcox(housing_prices_v5~as.matrix(features_v5))
# find the best parameter
(lambda <- bc$x[which(bc$y==max(bc$y))])
# transforamting the dependant variable
new_dep_var = ((((housing_prices_v5)^lambda)-1)/lambda)
# now apply regression model again
afterboxcox <- lm(new_dep_var ~ (as.matrix(features_v5)))
par(mfrow=c(2,2))
plot(afterboxcox)
# plotting the fitted house price against the true price
par(mfrow=c(1,1))
# plot(new_dep_var, housing_prices_v4)
## TODO is to get the predicted value using the last model ..
stand_red_after_box_cox = stdres(afterboxcox)
fitted_box_cox_vals = predict(afterboxcox)
# reverted_fitted_box_cox_vals = 10^(log10(fitted_box_cox_vals*lambda + 1)/lambda)
reverted_fitted_box_cox_vals = (fitted_box_cox_vals*lambda + 1)^(1/lambda)
plot(reverted_fitted_box_cox_vals, housing_prices_v5, main="Fitted Values vs Actual housing prices",
xlab="Fitted Values", ylab="Housing Prices")
plot(reverted_fitted_box_cox_vals, stand_red_after_box_cox, main="Fitted values vs Standardized residuals",
xlab="Fitted Values", ylab="Standardized Residuals") |
178bce292b2e4fa7292e244d529af673546b4c78 | ef3d97cb6751d2124eb4680ccbd1922d4f7ce6ca | /R/gogr.R | 63a8d68758f7cc4126b759614d6c289a6d2884c1 | [] | no_license | ajschumacher/gogr | dfc15170c1de917847a052485447d4b7ee6ea8ba | f4f3d477853cbb446e8c4b18dda82d47098550a3 | refs/heads/master | 2020-04-25T23:32:03.348179 | 2015-01-01T18:16:07 | 2015-01-01T18:16:07 | 28,667,918 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 708 | r | gogr.R | #' Use gog visualization system from R
#'
#' Communicate with a \href{https://github.com/ajschumacher/gog}{gog}
#' server for data visualization independent of R.
#'
#' @docType package
#' @name gogr
#' @import jsonlite
#' @import httr
NULL
#' Send data to a gog server
#'
#' This function takes takes a data frame and sends it to a gog server.
#' The gog server is responsible for passing the data to a gog frontend
#' for visualization.
#'
#' @param x a data frame
#' @param url the gog /data endpoint to send to
#'
#' @export
#' @examples
#' \dontrun{
#' gog(iris)
#' }
gog <- function(x, url="http://localhost:4808/data") {
text <- toJSON(x)
response <- POST(url, body=text)
invisible(response)
}
|
08df93d8fcbaa382ca69182ca2aa39c370ca33bf | dec08efdfbfa3ae869ae9e24c81ef0eedbbf5ff5 | /plot2.R | a7dfb85febf873af42f39ba57588af82880ad661 | [] | no_license | DJ-L/repo160720 | 69b2fc3ecb793d65b312b270ae5f7ee76ff29a39 | 26d737a81dd2456d01bd896ec87f3923acc47345 | refs/heads/master | 2021-01-16T21:36:39.727409 | 2016-07-20T09:26:40 | 2016-07-20T09:26:40 | 63,767,989 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,156 | r | plot2.R | setwd("C:\\Users\\Daniel\\Documents\\Coursera\\Exploring Data\\Week 1\\exdata-data-household_power_consumption")
#Loading data
full_data<-read.csv("household_power_consumption.txt",stringsAsFactors=FALSE,header = TRUE,sep = ";")
full_data$Date = strptime(full_data$Date,"%d/%m/%Y")
feb_data<-full_data["2007-02-01"<=full_data$Date & full_data$Date<="2007-02-02",]
feb_data$sec <- as.numeric(feb_data$Date)-as.numeric(strptime("01/02/2007","%d/%m/%Y"))+as.numeric(substr(feb_data$Time,1,2))*3600+as.numeric(substr(feb_data$Time,4,5))*60+as.numeric(substr(feb_data$Time,7,8))
#Check missing
result <- feb_data$Global_active_power=="?"
table(result)#No missing during selected days
#Create the plot
par(mfrow=c(1,1))
Thu<-0
Fri<-as.numeric(strptime("02/02/2007","%d/%m/%Y"))-as.numeric(strptime("01/02/2007","%d/%m/%Y"))
Sat<-as.numeric(strptime("03/02/2007","%d/%m/%Y"))-as.numeric(strptime("01/02/2007","%d/%m/%Y"))
par(pch=0, col="black")
plot(feb_data$sec,as.numeric(feb_data$Global_active_power),xaxt = "n", type = "l",xlab="", ylab="Global active power (kilowatts)")
axis(1,c(Thu,Fri,Sat),c("Thu","Fri","Sat"))
dev.copy(png,'plot2.png')
dev.off()
|
5d2c7f7ab210f8fdcce7e36129858d7e7a1141ee | bb0fc5db62c57f57fc78521ed5d61eac10d1d4f0 | /R/options.R | ea9b8c33de4e23cd7de20f05980d0d97bfceb2bc | [] | no_license | nemochina2008/jasptools | 50907e9064cae6a639a8043b3007cfb0b82bb569 | 635f3f135a43455370e5f00f908c3de7449606f8 | refs/heads/master | 2020-03-28T15:07:01.728286 | 2017-12-15T13:41:47 | 2017-12-15T13:41:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,663 | r | options.R | #' Obtain options to run JASP analyses with.
#'
#' \code{analysisOptions} provides an easy way to create analysis options. You
#' may use the json from the Qt terminal or from the json files found in
#' resources. The former you have to provide yourself, for the latter you only
#' have to specify the name of the analysis.
#'
#'
#' @param source String containing valid json, or the name of a JASP analysis.
#' If you provide json, be sure to use single quotes.
#' @param hint Boolean. Should additional hints be placed in the output so you
#' know how to give values to differents types of options? Only works if
#' \code{source} is set to the name of an analysis.
#' @return A list containing options you can supply to \code{jasptools::run}.
#' If \code{source} is an analysis name then all default options have been
#' filled in and booleans set to FALSE. The options that have no default are
#' left empty. If \code{hint} is set to TRUE then hints are set for these empty
#' options; they are placed between \%'s.
#' @examples
#'
#' options <- jasptools::analysisOptions("BinomialTest")
#' options[["variables"]] <- "contBinom"
#'
#' # Above and below are identical (below is taken from the Qt terminal)
#'
#' options <- jasptools::analysisOptions('{
#' "id" : 0,
#' "name" : "BinomialTest",
#' "options" : {
#' "VovkSellkeMPR" : false,
#' "confidenceInterval" : false,
#' "confidenceIntervalInterval" : 0.950,
#' "descriptivesPlots" : false,
#' "descriptivesPlotsConfidenceInterval" : 0.950,
#' "hypothesis" : "notEqualToTestValue",
#' "plotHeight" : 300,
#' "plotWidth" : 160,
#' "testValue" : 0.50,
#' "variables" : [ "contBinom" ]
#' },
#' "perform" : "run",
#' "revision" : 0,
#' "settings" : {
#' "ppi" : 192
#' }
#' }')
#'
#' @export analysisOptions
analysisOptions <- function(source, hint = FALSE) {
if (! is.character(source) || length(source) > 1) {
stop("Expecting a character input of length 1 as source,
either a json string or analysis name.")
}
type <- "file"
if (jsonlite::validate(source) == TRUE) {
type <- "qt"
}
options <- NULL
if (type == "qt") {
options <- .analysisOptionsFromQt(source)
} else {
rawOptions <- .analysisOptionsFromFile(source)
options <- .fillOptions(rawOptions, hint)
}
return(options)
}
.analysisOptionsFromFile <- function(analysis) {
file <- file.path(.getPkgOption("json.dir"), paste0(analysis, ".json"))
analysisOpts <- try(jsonlite::read_json(file), silent = TRUE)
if (inherits(analysisOpts, "try-error")) {
stop("The JSON file for the analysis you supplied could not be found.
Please ensure that (1) its name matches the main R function
and (2) your working directory is set properly.")
}
if ("options" %in% names(analysisOpts)) {
return(analysisOpts[["options"]])
} else if ("options" %in% names(analysisOpts[["input"]])) {
return(analysisOpts[["input"]][["options"]])
} else {
stop("The JSON file was found, but it appears to be invalid")
}
}
.analysisOptionsFromQt <- function(x) {
json <- try(jsonlite::fromJSON(x, simplifyVector=FALSE), silent = TRUE)
if (inherits(json, "try-error")) {
stop("Your json is invalid, please copy the entire message
including the outer braces { } that was send to R in the Qt terminal.
Remember to use single quotes around the message.")
}
if ("options" %in% names(json)) {
return(json[["options"]])
} else {
stop("The JSON file appears to be invalid")
}
}
.fillOptions <- function(options, hint = FALSE) {
output <- list()
for (i in 1:length(options)) {
option <- options[[i]]
if ("default" %in% names(option)) {
output[[option[["name"]]]] <- option[["default"]]
} else {
if (option[["type"]] == "Table" && hint) {
template <- option[["template"]]
output[[option[["name"]]]] <- list(list())
for (j in 1:length(template)) {
name <- template[[j]][["name"]]
value <- .optionTypeToValue(template[[j]], hint)
output[[option[["name"]]]][[1]][[name]] <- value
}
} else {
output[[option[["name"]]]] <- .optionTypeToValue(option, hint)
}
}
}
return(output)
}
.optionTypeToValue <- function(option, hint = FALSE) {
switch(option[["type"]],
Boolean =
FALSE,
Integer =
if (hint) {
"%420%"
} else {
""
},
IntegerArray =
if (hint) {
c("%25%", "%95%")
} else {
list()
},
List =
option[["options"]][[1]],
Number =
option[["value"]],
Table =
list(),
String =
if (hint) {
"%SomeString%"
} else {
""
},
Term =
if (hint) {
"%variable1%"
} else {
""
},
Terms =
if (hint) {
list(c("%variable1%"),
c("%variable2%"),
c("%variable1%", "%variable3%"))
} else {
list()
},
Variable =
if (hint) {
"%variable1%"
} else {
""
},
Variables =
if (hint) {
c("%variable1%", "%variable2%")
} else {
list()
},
VariablesGroups =
if (hint) {
list(c("%variable1%", "%variable2%"),
c("%variable3%", "%variable4%"))
} else {
list()
},
NULL
)
}
|
1164599556bbd2f011baed7c96b83485e55cf4bc | 30207ebba8454058484573b57372b5fdf8f81fe1 | /time_series/残差检验.R | 4a483d55da4b209e921f048fdc81d920f7b1939c | [] | no_license | hallo128/R | f9aec7535397f22906f142ad4b77e1011cb565d9 | cabb57f9d953433b87e6340869f06b54a326f82c | refs/heads/master | 2021-01-17T18:56:57.361377 | 2016-06-25T16:00:10 | 2016-06-25T16:00:10 | 60,279,186 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,878 | r | 残差检验.R | setwd("d:/")
###读入数据(只能一条数据,可以事前处理,也可以事后处理)
data=read.csv("hj.csv",header=F)[,1]
#改变为时间序列数据
hj=ts(data,start = c(1980,1),frequency = 12)
####################1求长期趋势Tt
library(TSA) #与season()函数有关
#季节平均模型(计算长期趋势)
time=time(hj) #提取模型的时间,要求数据位序列类型数据
model=lm(hj~time)
#plot(hj,type='o') #画出趋势图
#abline(model,col='blue')
#长期趋势
Tt=ts(fitted(model),start = c(1980,1),freq=12)
####################2求季节趋势St
Month=season(hj) #提取季节因素
####加法模型
model1=lm(residuals(model)~Month-1) #-1不再有截距项
St=ts(fitted(model1),start = c(1980,1),freq=12)
Tas=Tt+St
#残差
St_res1=model1$residuals
#平稳性检验
plot(St_res1,type='o')
#正态分布
#1QQ图
qqnorm(St_res1)
qqline(St_res1)
#2(H0:正态)
shapiro.test(St_res1)
#3(H0:正态)
jarque.bera.test(St_res1)
#4直方图
hist(St_res1)
#5箱型图
boxplot(St_res1)
#########独立性检验
###游程检验(变量必须为因子)————随机性检验
#H0:独立
runs.test(factor(sign(St_res1)))
###########相关性检验
###1样本自相关函数(H0:rho(k)=0)
acf(St_res1)
#接受域,k阶无相关性。拒绝域,k阶有相关性
###2相关性(H0:不相关)
Box.test(St_res1,lag=3,type='Ljung-Box')#前3个残差
Box.test(St_res1,lag=3,type='Box-Pierce')#前3个残差
####################2求季节趋势St
#乘法模型
newhj=hj/fitted(model)
model2=lm(newhj~Month-1) #-1不再有截距项
St1=ts(fitted(model2),start = c(1980,1),freq=12)
Tas1=Tt*St1
#残差
St_res2=model2$residuals
#全部
plot(hj,type='o') #画出真实趋势图
s1=summary(model)
s1$coef[,1]
s2=summary(model1)
s2$coef[,1]
s3=summary(model2)
s3$coef[,1]
|
ee23891fdad7382a569b1264ae557d1b5e6a4457 | 1dc0ab4e2b05001a5c9b81efde2487f161f800b0 | /experiments/save fit objects.R | 3f568139826cb9b19ca15b06384da14c1c2bcf19 | [] | no_license | noeliarico/knnrr | efd09c779a53e72fc87dc8c0f222c0679b028964 | 9f6592d1bbc1626b2ea152fbd539acfe9f9a5ab3 | refs/heads/master | 2020-06-01T02:44:34.201881 | 2020-03-13T13:30:52 | 2020-03-13T13:30:52 | 190,601,477 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 405 | r | save fit objects.R | save_fit <- function(name, caret) {
if(caret) {
named <- paste0("fit_", name, "_d")
namer <- paste0("fit_", name, "_r")
d <- get(named)
r <- get(namer)
save(list = c(named, namer), file = paste0("experiments/results/5cv/", name, ".RData"))
}
else {
save(list = name, file = paste0("experiments/results/5cv/", name, ".RData"))
}
}
lapply(trained, save_fit, caret = TRUE)
|
9145ecc7674a4b73ca17ebf70234edbe34492d9d | 240ca985f52df0995bf791465bbcd2dc9d83483c | /tests/testthat/test-operatorz.R | ba3788ab4d742af4d12dab89ad529e678697b7f4 | [
"MIT"
] | permissive | romaintailhurat/operatorz | 971751bc0441970fc86ea680fd84b5edb54dc160 | 5968edfe3dff5011cdbed06bd62117b0f63e8e22 | refs/heads/master | 2020-06-18T05:09:55.806178 | 2019-07-11T07:14:18 | 2019-07-11T07:14:18 | 196,174,778 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 874 | r | test-operatorz.R | # %++% operator
test_that("Concat operators is cool", {
expect_equal(
"yo" %++% "da",
"yoda"
)
expect_identical(
1:10 %++% NULL,
as.character(1:10)
)
expect_length(
letters %++% LETTERS,
26
)
})
# %||% op
test_that("OR operator fits its purpose", {
expect_equal(
NULL %||% 42,
42
)
})
# %ni% op
test_that("Not in operator", {
expect_equal(
c(1, 42) %ni% 1:10,
c(FALSE, TRUE)
)
})
# ‰..% op
test_that("Sample operator works the way it should", {
pool <- 1:100
# Normal behaviour
expect_length(
pool %..% 5,
5
)
# Handling errors
expect_error(
pool %..% "five"
)
})
# %<>%
test_that("Diff operator is neat", {
df_diff <- iris %<>% data.frame(x = iris$Petal.Length)
expect_identical(
names(df_diff),
c("Sepal.Length", "Sepal.Width", "Petal.Width", "Species")
)
})
|
a331d05b62af991ec20605e2292c302b8307c3fb | 99d6bff98f838f99fa863d120a37431511e17519 | /r1.R | a9b77d16df6909f4c25d768adf713b6e79c3c5f2 | [] | no_license | kcpackard/Testing-R-code | ff7fcf2b99a2ac24b32a1a4bbc821b6ffa7bf398 | 36fb81568f6e0f146c591de6cb874d414d7aa698 | refs/heads/master | 2020-07-01T17:14:19.533661 | 2016-11-20T11:43:12 | 2016-11-20T11:43:12 | 74,269,303 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 173 | r | r1.R | #This is a test script for using github
data(cars)
head(cars)
library(ggplot2)
data(mpg)
head(mpg)
p <- ggplot(mpg, aes(class, hwy))
p + geom_boxplot(aes(colour = drv)) |
b292791e7866edeb3b3d915bc26a6124012c6b3a | 01ee8f4214bd47d4be2cdd1a8dd8d3f49e9e2f79 | /Power.R | 48406b8f223e7198c224c63774eda0749a0aeb29 | [] | no_license | vmez/EnergyConsumption | c46267e963c0ddbac100bb0ca98afd3ccc596899 | bb37ea9e154c9f0d077aa0db3ffe9dafca0a8f6b | refs/heads/master | 2021-04-29T13:05:33.104743 | 2018-09-24T12:48:43 | 2018-09-24T12:48:43 | 121,743,404 | 0 | 1 | null | null | null | null | IBM852 | R | false | false | 13,793 | r | Power.R | # Set working directory and load data ...............................
#setwd("C:/Users/Violeta/Dropbox/Ubiqum/3_Deep.Analytics")
#power <- data.table::fread("Energy/household_power_consumption.txt", sep = ";", na.strings = '?')
# Libraries .........................................................
#pacman::p_load(dplyr, tidyr)
# Create Working Data .....................................................
#power <- power %>%
# unite(DateTime, Date, Time , sep = " ", remove = F) %>%
# rename(Global = Global_active_power, Reactive = Global_reactive_power, Intensity = Global_intensity,
# Kitchen = Sub_metering_1, Laundry = Sub_metering_2, WHAC = Sub_metering_3) %>%
# mutate(Global = Global/60, Kitchen = Kitchen/1000, Laundry = Laundry/1000, WHAC = WHAC/1000) %>%
# mutate(TotalSub = Kitchen + Laundry + WHAC,
# Unregistered = Global - TotalSub,
# Efficiency = Global/(Voltage*Intensity))
#write.csv(power, "Energy/power_data.csv", row.names = F)
# ...........................................................................................
pacman::p_load(dplyr, tidyr, lubridate, zoo, forecast, ggplot2, htmlwidgets, dygraphs, xts, seasonal)
setwd("C:/Users/Violeta/Dropbox/Ubiqum/3_Deep.Analytics/Energy")
power <- data.table::fread("power_data.csv")
# Set Time Format
power$DateTime <- parse_date_time(power$DateTime, orders = "dmy HMS")
power <- power %>% mutate(Date = dmy(Date))
# Define Total Daily demand (kW) .......................................................
power$day <- floor_date(power$Date, "day")
power_day <- power %>% group_by(D = day) %>% summarise(GP = sum(Global, na.rm = T),
React = sum(Reactive, na.rm = T),
Kitchen = sum(Kitchen, na.rm = T),
Laundry = sum(Laundry, na.rm = T),
WHAC = sum(WHAC, na.rm = T),
T.Sub = sum(TotalSub, na.rm = T),
Unregistered = sum(Unregistered, na.rm = T),
Efficiency = sum(Efficiency, na.rm = T))
# what method to choose to plot the line of best fit? testing geom_line vs geom_smooth and its methods
p.auto <- ggplot(power_day, aes(D, GP)) + geom_line() + geom_smooth(method = "auto") + xlab("") + theme_minimal()
p.loess <- ggplot(power_day, aes(D, GP)) + geom_line() + geom_smooth(method = "loess") + xlab("") + theme_minimal()
gridExtra::grid.arrange(p.auto, p.loess, top = "Fitting line of best fit: gam vs loess")
# the GAM method fits the data better.
# Plotting geom_smooth to fit a line of best fit (leave as 'auto' instead of typing 'gam')
ggplot(power_day) +
geom_smooth(aes(D, GP), method = "auto", color = "blue", alpha = 0.7) +
geom_smooth(aes(D, Kitchen), method = "auto", color = "tomato", alpha = 0.7) +
geom_smooth(aes(D, Laundry), method = "auto", color = "seagreen", alpha = 0.7) +
geom_smooth(aes(D, WHAC), method = "auto", color = "black", alpha = 0.7) +
ggtitle("Daily Total Energy Consumption", subtitle = "Consumption over 47 months") +
ylab("Total Use kWh") + xlab("") + theme_minimal()
# Define Total Week demand(kW) ........................................................
power$weekly <- floor_date(power$Date, "week")
power_week <- power %>% group_by(W = weekly) %>% summarise(GP = sum(Global, na.rm = T),
React = sum(Reactive, na.rm = T),
Kitchen = sum(Kitchen, na.rm = T),
Laundry = sum(Laundry, na.rm = T),
WHAC = sum(WHAC, na.rm = T),
T.Sub = sum(TotalSub, na.rm = T),
Unregistered = sum(Unregistered, na.rm = T),
Efficiency = sum(Efficiency, na.rm = T))
ggplot(power_week, aes(W, GP)) + geom_line(color = "tomato", lwd = 1) +
ggtitle("Weekly Total Energy Consumption", subtitle = "Consumption over 47 months") +
ylab("Total Use kWh") + xlab("") + theme_minimal()
# Line of best fit to the data:
ggplot(power_week, aes(W, GP)) + geom_line() + geom_smooth(method = "auto") + xlab("") + theme_minimal()
# returns loess method
# Define Total Monthly demand (kW) .....................................................
power$month <- floor_date(power$Date, "month")
power_month <- power %>% group_by(M = month) %>% summarise(GP = sum(Global, na.rm = T),
React = sum(Reactive, na.rm = T),
Kitchen = sum(Kitchen, na.rm = T),
Laundry = sum(Laundry, na.rm = T),
WHAC = sum(WHAC, na.rm = T),
T.Sub = sum(TotalSub, na.rm = T),
Unregistered = sum(Unregistered, na.rm = T),
Efficiency = sum(Efficiency, na.rm = T))
ggplot(power_month) +
geom_line(aes(M, GP), color = "blue", lwd = 1) +
geom_line(aes(M, T.Sub), color = "deeppink3", lwd = 1) +
ggtitle("Daily Total Energy Consumption", subtitle = "Consumption over 47 months") +
ylab("Total Use kWh") + xlab("") + theme_minimal()
#Line of best fit:
ggplot(power_month, aes(M,GP)) + geom_line() + geom_smooth(method = "auto") + theme_minimal() + xlab("")
# returns loess method
# Subset, average Energy Cosumption
month_av <- power %>% group_by(M = month) %>% summarise(GP = mean(Global, na.rm = T),
React = mean(Reactive, na.rm = T),
Kitchen = mean(Kitchen, na.rm = T),
Laundry = mean(Laundry, na.rm = T),
WHAC = mean(WHAC, na.rm = T),
T.Sub = mean(TotalSub, na.rm = T),
Unregistered = mean(Unregistered, na.rm = T),
Efficiency = mean(Efficiency, na.rm = T))
ggplot(month_av, aes(M, T.Sub)) + geom_area(fill = "blue", lwd = 1) +
ggtitle("Monthly Average Energy Consumption", subtitle = "Consumption over 47 months") +
ylab("Average Use Wh") + xlab("") + theme_minimal()
#Line of Best Fit:
ggplot(month_av, aes(M, GP)) + geom_line() + geom_smooth() + xlab("") + theme_minimal()
# returns loess method
# Define Total Yearly demand (kW) .......................................................
power$year <- floor_date(power$Date, "year")
power_year <- power %>% group_by(Y = year) %>% summarise(GP = sum(Global, na.rm = T),
React = sum(Reactive, na.rm = T),
Kitchen = sum(Kitchen, na.rm = T),
Laundry = sum(Laundry, na.rm = T),
WHAC = sum(WHAC, na.rm = T),
T.Sub = sum(TotalSub, na.rm = T),
Unregistered = sum(Unregistered, na.rm = T),
Efficiency = sum(Efficiency, na.rm = T))
power_year <- power_year[-1,] #remove 2006 since not even a complete month
ggplot(power_year, aes(Y, GP)) + geom_line(color = "gold", lwd = 1) +
ggtitle("Yearly Total Energy Consumption", subtitle = "Consumption over 47 months") +
ylab("Total Use kWh") + xlab("") + theme_minimal() + ylim(c(7500, 10000))
# Line of Best Fit:
ggplot(power_year, aes(Y, GP)) + geom_line() + geom_smooth() + xlab("") + theme_minimal()
# returns loess method
#(gam method was detected by auto for only daily energy consumption. Loess has been detected for weekly, monthly, yearly patterns)
# Adding perspective to Consumption ......................................................
ggplot() +
geom_line(data = power_day, aes(D, GP), color = "orange") +
geom_line(data = power_week, aes(W, GP), color = "tomato") +
geom_line(data = power_month, aes(M, GP), color = "blue") +
ggtitle("Total Energy Consumption", subtitle = "Daily, Weekly, Monthly") +
ylab("Total Use kWh") + xlab("") + theme_minimal()
# create time series object for the dygraph ................................................
dayly <- xts(power_day$GP, power_day$D)
weekly <- xts(power_week$GP, power_week$W)
monthly <- xts(power_month$GP, power_month$M)
data_ts <- cbind(Daily = dayly, Weekly = weekly, Montly = monthly)
(consumption <- dygraph(data_ts, main = "Total Energy Consimption", ylab = "Total kWh") %>%
dyRangeSelector() %>% dyRoller(rollPeriod = 48)) # roll period set by monthly obs
# Pie Chart for room comaprison:
# Time Series ............................................................................
day_ts <- ts(power_day$GP, frequency = 356, start = c(2007,1), end = c(2010,356))
week_ts <- ts(power_week$GP, frequency = 53, start = c(2007, 1), end = c(2010, 50))
month_ts <- ts(power_month$GP, frequency = 12, start = c(2007,1), end = c(2010, 11))
year_ts <- ts(power_year$GP, frequency = 1, start = c(2007), end = c(2010))
# Bars and moving averages ...............................................................
autoplot(month_ts) + theme_minimal()
ggseasonplot(month_ts, year.labels = T) + theme_minimal() # plotting seasons
ggsubseriesplot(month_ts) + theme_minimal() # seasonal changes over time
gglagplot(month_ts, do.lines = F) # Better correlation with lag12
ggAcf(month_ts) + theme_minimal() # r12 and r1 are best fit
ggtsdisplay(month_ts, plot.type = "histogram")
# in the monthly gathered data, r1 and r12 are highest peaks. And lowest at r7
# Defining Moving Average for Monthly Consumption
autoplot(month_ts, series = "GP") +
autolayer(ma(month_ts),series = "6-MA") + theme_minimal()
# Decompose based on additive:
month_ts %>% decompose(type = "additive") %>% autoplot() + theme_minimal()
# Decompose with X11:
fit <- month_ts %>% seas(x11 = "")
autoplot(fit) + theme_minimal() + ggtitle("Monthy Time Series")
# Seasonally adjusted for improved predictions:
autoplot(month_ts, series = "GP") +
autolayer(trendcycle(fit), series = "Trend") +
autolayer(seasadj(fit), series = "Seasonally Adjusted") + theme_minimal() +
ggtitle("Seasonaly Adjusted Month Time Series")
# Decompose Seasonal Extraction in ARIMA Time Series:
month_ts %>% decompose() %>% autoplot() + theme_minimal() +
ggtitle("Seasonal Exraction in ARIMA Decomposition, Time Series")
# STL Decomposition "Seasonal and Trend decomposition using Loess":
# The default fits a parabola to the points, odd number for t.window:
fit_stl <- month_ts %>% stl(t.window = 11, s.window = "periodic", robust = T)
autoplot(fit_stl) + theme_minimal() + ggtitle("STL Decomposition, Month Time Series")
# Na´ve Forecast:
fit_stl %>% seasadj() %>% naive() %>% autoplot() + theme_minimal() +
ggtitle("Seasonally Adjusted Na´ve Forecast", subtitle = "High Granurality for Monthly Time Series")
# Na´ve Forecast, not adjusted:
fit_stl %>% forecast(method = "naive") %>% autoplot() + theme_minimal() +
ggtitle("Na´ve Forecast, Not-Seasonally Adjusted", subtitle = "Monthly Time Series")
fit_day <- day_ts %>% stl(t.window = 360, s.window = "periodic", robust = T)
fit_day %>% forecast(method = "naive") %>% autoplot() + theme_minimal() +
ggtitle("Forecasting Daily Consumption", subtitle = "STL + Random Walk")
# Detecting Anomaly ........................................................................
#install.packages("anomalize")
require(anomalize)
month_noNA <- na.omit(power_month) # have to omit NA to use Anomalize:
# We use STL which uses seasonal decomposition. The alternative methos is "twitter" which uses
# trend to remove the trend. (Results slightly differ).
# Method "gesd" detects outliers better than the alternative "iqr".
# Unregistered Areas:9 anomalies detected
month_noNA %>%
time_decompose(Unregistered, method = "stl") %>%
anomalize(remainder, method = "gesd") %>%
time_recompose() %>%
plot_anomaly_decomposition(alpha_dots = 1, size_circles = 6, color_yes = "red") +
ggtitle("Energy Consumption in Unregistered Areas",
subtitle = "9 Anomalies in the Remainder calculated with GESD across 47 months")
# T.Submeters: 5 anomalies detected
month_noNA %>%
time_decompose(T.Sub, method = "stl") %>%
anomalize(remainder, method = "gesd") %>%
time_recompose() %>%
plot_anomaly_decomposition(alpha_dots = 0.5, size_circles = 6, color_yes = "deeppink") +
ggtitle("Energy Consumption for all Three Submeters",
subtitle = "5 Anomalies in the Remainder calculated with GESD across 47 months")
# Entire House: 2 anomalies detected
month_noNA %>%
time_decompose(GP, method = "stl") %>%
anomalize(remainder, method = "gesd") %>%
time_recompose() %>%
plot_anomaly_decomposition(alpha_dots = 0.5, size_circles = 6, color_yes = "black") +
ggtitle("Energy Consumption registered for the Entire Household",
subtitle = "2 Anomalies in the Remainder calculated with GESD across 47 months")
|
26fbec88463dfe6a28dd19ba05df96b97183bfe5 | 97a38f09dce88a8460737ab6cf4e2b2335624125 | /man/multilevel_kernel.Rd | f202f0e14bc336dba983131d084cab355acf323f | [] | no_license | jeremyhengjm/UnbiasedScore | a7959eb3ddb892a522b413a3eae47d4573309bd3 | b036ac899721ea240a2a0a67440947d1c0698ac9 | refs/heads/master | 2023-04-20T09:54:44.835291 | 2021-05-10T13:34:29 | 2021-05-10T13:34:29 | 257,778,224 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,102 | rd | multilevel_kernel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multilevel_kernel.R
\name{multilevel_kernel}
\alias{multilevel_kernel}
\title{Runs a multilevel Markov kernel}
\usage{
multilevel_kernel(
model,
theta,
discretization,
observations,
nparticles,
resampling_threshold,
coupled_resampling,
ref_trajectory_coarse = NULL,
ref_trajectory_fine = NULL,
algorithm = "CPF",
treestorage = FALSE
)
}
\arguments{
\item{model}{a list representing a hidden Markov model, e.g. \code{\link{hmm_ornstein_uhlenbeck}}}
\item{theta}{a vector of parameters as input to model functions}
\item{discretization}{lists containing stepsize, nsteps, statelength, obstimes for fine and coarse levels,
and coarsetimes of length statelength_fine indexing time steps of coarse level}
\item{observations}{a matrix of observations, of size nobservations x ydimension}
\item{nparticles}{number of particles}
\item{resampling_threshold}{ESS proportion below which resampling is triggered (always resample at observation times by default)}
\item{coupled_resampling}{a 2-way coupled resampling scheme, such as \code{\link{coupled2_maximal_independent_residuals}}}
\item{ref_trajectory_coarse}{a matrix of reference trajectory for coarser discretization level, of size xdimension x statelength_coarse}
\item{ref_trajectory_fine}{a matrix of reference trajectory for finer discretization level, of size xdimension x statelength_fine}
\item{algorithm}{character specifying type of algorithm desired, i.e.
\code{\link{CPF}} for conditional particle filter,
\code{\link{CASPF}} for conditional ancestor sampling particle filter,
\code{\link{CBSPF}} for conditional backward sampling particle filter}
\item{treestorage}{logical specifying tree storage of Jacob, Murray and Rubenthaler (2013);
if missing, this function store all states and ancestors}
}
\value{
two new trajectories stored as matrices of size xdimension x statelength_coarse/fine.
}
\description{
Runs two coupled kernels that leaves the corresponding smoothing distribution (at each discretization level) invariant.
}
|
fda1eab6f0f6001361d3f06658f33ea2796192b5 | 341cf636752b287fad6fe8d7c8e642d9681129d4 | /Session 2/Models.Visualisation.R | dcaf38c2ca45ae4a05b3f709716416af4ec98d61 | [] | no_license | CaleaD/Pandemic-Simulation | a9def61ac5b75ea18ec4dd2fbf765b3288e17ee9 | ff52f90fa66c9803ef967f4c64d18ae4cdd267fc | refs/heads/main | 2023-05-07T17:39:41.292552 | 2021-06-01T21:27:48 | 2021-06-01T21:27:48 | 360,194,190 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,476 | r | Models.Visualisation.R |
###################
### Vizualizare ###
###################
### A.) Interfete Interactive
# TODO: urmatoarele ore;
# A.1.) Shiny app:
# - links & gallery:
# https://shiny.rstudio.com/
# https://shiny.rstudio.com/gallery/
# A.2.) Dashboards:
# - de evaluat: shinydashboard, flexdashboard;
library("shiny")
library("shinyjs")
library("shinyBS") # Buttons & Components
library("shinydashboard") # diverse dashboard-uri
library("flexdashboard")
###########
### Colours
### Function colors():
# - displays the names of all available colors;
# - find.col(): helper function (see below);
### Function heat.colors():
####################
### Helper Functions
find.col = function(name="red", start=1, max=30, bottom.mrg=8, ...) {
is.col = grepl(name, colors());
n.max = min(sum(is.col), start + max - 1);
id = seq(start, n.max);
name.col = colors()[is.col][id]
x = rep(1, length(id)); names(x) = name.col;
# set bottom margin
old.par = par(mar=c(bottom.mrg,1,2,1) + 0.1)
barplot(x, col=name.col, las=3, ...)
par(old.par)
invisible(name.col)
}
plot.col = function(col, bottom.mrg=8, ...) {
x = rep(1, length(col)); names(x) = names(col);
# set bottom margin
old.par = par(mar=c(bottom.mrg,1,2,1) + 0.1)
barplot(x, col=col, las=3, ...)
par(old.par)
invisible()
}
######################
### Examples
find.col()
find.col("green")
find.col("pale")
plot.col(heat.colors(30))
|
ac25b05cf1909a072740aa60bfe60906ca2b1afa | 2d84b56b3d8037a566bc3c152e73218e7dd3dcda | /tests/testthat/testPhamFix.R | 2b0853b58f34266bab6f5e597e91ac6dbb89c5f1 | [] | no_license | drodriguezperez/kselection | 31ab185fe9a6fa25c07dd2aba86b6b02b9af70b6 | 4f9b0e24986e0f9d1456f3754fe4b4e41f98957f | refs/heads/master | 2022-05-31T15:22:38.687490 | 2022-05-17T16:15:12 | 2022-05-17T16:15:12 | 23,993,309 | 6 | 2 | null | 2015-01-08T15:04:59 | 2014-09-13T12:02:56 | R | UTF-8 | R | false | false | 1,232 | r | testPhamFix.R | ##
## Test previous errors on kselection
##
## Created by Daniel Rodriguez Perez on 10/10/2014.
##
## Copyright (c) 2014 Daniel Rodriguez Perez.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>
##
context("Tests previous errors in kselection")
test_that("evaluate data.frames with low rows", {
x <- matrix(c(rnorm(5, 2, .1), rnorm(5, 3, .1),
rnorm(5, -2, .1), rnorm(5, -3, .1)), 10, 2)
obj <- kselection(x, max_centers = 9)
expect_that(class(obj), equals("Kselection"))
expect_warning(kselection(x),
"The maximum number of clusters has been reduced from 15 to 9")
})
|
fe88bef921069a977153d5ef6e5cd12c589ad5e2 | 9cc49351c9649dd0b4a84eef9c3d5a98ab983b2d | /analyses/recovery_rate/recovery_rate.r | c2ff65f09d1e217de113181bc1bdf002c4050c03 | [] | no_license | SimonGreenhill/northwind | 8a926436d58711cd0b87c0d1ef48467d4e934d60 | fa9e2e52bf3c2585a89a6c954eda183d6eb02812 | refs/heads/master | 2023-01-14T18:48:45.899610 | 2020-10-05T09:34:31 | 2020-10-05T09:34:31 | 197,758,170 | 0 | 0 | null | 2023-01-09T01:04:48 | 2019-07-19T11:04:09 | Python | UTF-8 | R | false | false | 2,574 | r | recovery_rate.r | library(ggplot2)
require(gridExtra)
records <- read.delim('coverage.dat', header=TRUE)
#
# Recovery all lines
#
p1 <- ggplot(records,
aes(x=PPercent, y=OPercent, color=TotalInventory)
)
p1 <- p1 + geom_line(aes(group=Language))
p1 <- p1 + scale_color_gradient("Inventory Size", trans="log", high="orange", low="blue")
p1 <- p1 + xlab('Transcript Percentage') + ylab("Percentage of Observed Phonemes")
p1 <- p1 + theme_classic()
p1 <- p1 + guides(color="none")
pdf("recovery_rate.pdf")
print(p1)
x <- dev.off()
#
# Recovery - smoothed
#
p <- ggplot(records, aes(x=PPercent, y=OPercent))
p <- p + geom_smooth()
p <- p + xlab('Transcript Percentage') + ylab("Percentage of Observed Phonemes")
p <- p + xlim(0, 100) + ylim(0, 100)
p <- p + theme_classic()
pdf("recovery_rate_combined.pdf")
print(p)
x <- dev.off()
#
# Recovery -- blocked into 10s
#
records$Block <- as.factor(round(records$TotalInventory, -1))
p <- ggplot(records,
aes(x=PPercent, y=OPercent, group=Block,
fill=Block,
color=Block
))
p <- p + geom_smooth()
p <- p + scale_color_brewer(palette="Set1")
p <- p + scale_fill_brewer(palette="Set1")
p <- p + xlab('Transcript Percentage') + ylab("Percentage of Observed Phonemes")
p <- p + xlim(0, 100) + ylim(0, 100)
p <- p + theme_classic()
pdf("recovery_rate_blocked.pdf")
print(p)
x <- dev.off()
p2 <- ggplot(records,
aes(x=TranscriptLength, y=OPercent, color=TotalInventory)
)
p2 <- p2 + geom_line(aes(group=Language))
p2 <- p2 + scale_color_gradient("Inventory Size", trans="log", high="orange", low="blue")
p2 <- p2 + xlab('Transcript Length (Phonemes)')
p2 <- p2 + ylab("Percentage of Observed Phonemes")
p2 <- p2 + theme_classic()
p2 <- p2 + guides(color="none")
pdf("recovery_rate_vs_transcript_length.pdf")
print(p2)
x <- dev.off()
p1 <- p1 + ggtitle('a. Recovery Rate (Transcript Percentage)') + theme(plot.title=element_text(hjust=0))
p2 <- p2 + ggtitle('b. Recovery Rate (Transcript Length)') + theme(plot.title=element_text(hjust=0))
p1 <- p1 + geom_smooth(colour="#333333", method="loess")
p2 <- p2 + geom_smooth(colour="#333333", method="loess")
# plot 3
p3 <- p2 + scale_x_log10()
# the above will generate ggplot2 warnings from the counts at point zero:
# Transformation introduced infinite values in continuous x-axis
p3 <- p3 + ggtitle('c. Recovery Rate (Log Transformed Transcript Length)') + theme(plot.title=element_text(hjust=0))
# force the same scale
p1 <- p1 + ylim(0, 100)
p2 <- p2 + ylim(0, 100)
p3 <- p3 + ylim(0, 100)
ggsave("combined.pdf", grid.arrange(p1, p2, p3, ncol=1))
|
dab8e0fe76082291beeb5cf70dd74e78bfc736f3 | 858f4682ab0ac8c20617045edddcc774268742fb | /newyorktimes/man/rss_rank.Rd | 3539e85702a5ffe965c06845b3ed9a6331666b21 | [
"MIT"
] | permissive | minikittyf/final_project | eeb1fd337c79811a98bef7dfa24d949957ecb374 | 27a386532696fcb9d10d45b4bc8801f4a0e317be | refs/heads/master | 2020-11-24T01:14:48.447084 | 2019-12-13T18:45:50 | 2019-12-13T18:45:50 | 227,899,145 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,138 | rd | rss_rank.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/newyorktimes.R
\name{rss_rank}
\alias{rss_rank}
\title{Section Rank.}
\usage{
rss_rank(search_section = "Arts")
}
\arguments{
\item{search_section}{A Character.(the character should choose from: Africa, Americas, ArtandDesign, Arts, AsiaPacific, Automobile, Baseball, Books, Business, Climate, CollegeBasketball, CollegeFootball, Dance, Dealbook, DiningandWine, Economy, Education, EnergyEnvironment, Europe, FashionandStyle, Golf, Health, Hockey, HomePage, Jobs, Lens, MediaandAdvertising, MiddleEast, MostEmailed, MostShared, MostViewed, Movies, Music, NYRegion, Obituaries, PersonalTech, Politics, ProBasketball, ProFootball, RealEstate, Science, SmallBusiness, Soccer, Space, Sports, SundayBookReview, Sunday-Review, Technology, Television, Tennis, Theater, TMagazine, Travel, Upshot, US, Weddings, Well, YourMoney).}
}
\value{
A dataframe inclde article's title, link, description, published date, and their rank based on \code{search_section}.
}
\description{
Get recent ranked articles' information in specific section.
}
\examples{
rss_rank('Arts')
}
|
3683c89ac0a6316a6d4c8ec50ee1e1439d82512c | 88abace67b291e782dd807cff053c50793f2ad0f | /plot4.R | 80f5134bf26917a1387cbb95b94a880469e5cb93 | [] | no_license | Hightechnician/ExData_Plotting1 | 04746ffeb16cd2acd4983b3dfc464d6e3fdd51d2 | 45b7a0d4e8e1eec3b83832c86cbfc41a8078eb70 | refs/heads/master | 2021-01-23T16:28:22.661655 | 2014-08-08T12:53:42 | 2014-08-08T12:53:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 984 | r | plot4.R | library(sqldf)
filename<-"household_power_consumption.txt"
# Uses sql query to choose target dates
DF <- read.csv.sql(filename, sep=";", sql = 'select * from file where Date = "1/2/2007" OR Date = "2/2/2007"')
# Creates dest file
png(filename="plot4.png",width=480,height=480,units="px")
# Sets 2*2 form
par(mfrow=c(2,2))
# Draws top-left
plot(time,DF$Global_active_power,type="l",ylab="Global Acvtive Power", xlab = "")
# Draws top-right
plot(time,DF$Voltage,type="l",ylab="Voltage", xlab = "datetime")
# Draws bottom-left
plot(time, DF$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering")
lines(time, DF$Sub_metering_2, col="red")
lines(time, DF$Sub_metering_3, col="blue")
legend("topright", col=c("black", "red", "blue"),lty=1,legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n",cex = 0.8)
# Draws bottom-right
plot(time,DF$Global_reactive_power,type="l",ylab="Global_reAcvtive_power", xlab = "datetime")
# Closes the dev
dev.off() |
e282ad41f949e491ef44c47683f9865aa4b41473 | d12ad467a8e4afa8baf96b88b7c4dba819ed7e34 | /descriptive/tables.R | b60c623486d35bf1983e2c16bebeb10a8c1fbedb | [] | no_license | dsself/populism | bd178b99e41376c55697128f45ec505d3e3d5347 | 5585e17ac93553ae519ac6e925e6f9890de02758 | refs/heads/master | 2021-01-10T12:18:43.617847 | 2018-09-20T14:22:19 | 2018-09-20T14:22:19 | 48,120,097 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,387 | r | tables.R | #fig 3
#library(hadleyverse)
#library(stargazer)
#setwd("C:/Users/Darin/Documents/populism/descriptive")
#load("C:/Users/Darin/Documents/populism/descriptive/Joined.Rdta")
#dc <- as.data.frame(dc)
stargazer(dc, title = "Sample Summary Statistics", covariate.labels = c("Vote Share" ,"Populism", "PSI", "Party Strength"), label = "descriptive")
d1 <- dc %>%
select(region, elec_result_major, score, PSI, PI_7) %>%
group_by(region) %>%
summarize_each(funs(median)) %>%
reshape2::melt() %>%
reshape2::dcast(variable ~ region, value.var = "value") %>%
mutate(Variable = c("Vote Share", "Populism", "PSI", "Party Strength")) %>%
select(Variable, Americas, Europe)
stargazer(d1, summary = F, title = "Breakdown of Populism and Party System Attributes by Region", rownames = F)
t1 <- t.test(dc$score, dc$PSI)
t1t <- tidy(t1) %>%
mutate(Variable = "PSI") %>%
select(Variable, estimate, tstat = statistic, p.value)
t2 <- t.test(dc$score, dc$PI_7)
t2t <- tidy(t2) %>%
mutate(Variable = "Party Strength") %>%
select(Variable, estimate, tstat = statistic, p.value)
ts <- rbind(t1t, t2t) %>%
mutate(Estimate = round(estimate, digits = 2), TStat = round(tstat, digits = 2), PValue = round(p.value, digits = 4)) %>%
select(Variable, Estimate, TStat, PValue)
stargazer(ts, summary = F, rownames = F, title = "Difference of Means - Populism Score") |
7276cb48d202edbf8c8d468923a767178fcd02d3 | 04acfdc213a437da3ac4d8706d06236328469a60 | /man/kernThomas.Rd | 8c012e521fd3341effb663d02a35adc42df35bc8 | [] | no_license | antiphon/sseg | beaa4619c5a4aaf5c5db732230f9b1f470b34153 | c6364cdb9c952fc7565417c02c65f010c944d65b | refs/heads/master | 2021-01-10T13:49:49.246506 | 2018-03-18T11:45:42 | 2018-03-18T11:45:42 | 54,035,415 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 257 | rd | kernThomas.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rPSNC.R
\name{kernThomas}
\alias{kernThomas}
\title{Gaussian kernel with bandwidth omega}
\usage{
kernThomas(r, omega, ...)
}
\description{
Gaussian kernel with bandwidth omega
}
|
aee0d65ba6be3ea59ae525450afdc4b0bc6dd11c | aa966390669a2df21111868439c2eaca748aeaab | /man/find_best_selection_SA.Rd | bc5e68a3d8da6c0ed71b505fed12385fe9267ed4 | [] | no_license | cran/sms | a255711c30541169f872aa129b36915fd740a12c | ac4b476f35ac02cdcceba0b07e66130558992b23 | refs/heads/master | 2021-01-19T12:39:07.801719 | 2015-11-15T10:09:12 | 2015-11-15T10:09:12 | 17,699,739 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,182 | rd | find_best_selection_SA.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sms.R
\name{find_best_selection_SA}
\alias{find_best_selection_SA}
\title{find_best_selection_SA}
\usage{
find_best_selection_SA(area_census, insms, inseed = -1)
}
\arguments{
\item{area_census}{A census dataset consisting of various areas rows.}
\item{insms}{A microsimulation object which holds the data and details
of the simulation such as iterations, lexicon.}
\item{inseed}{A number to be used for random seed.}
}
\value{
msm_results An object with the results of the simulation, of this area.
}
\description{
Run a simulation in parallel mode with Simulated Annealing
}
\examples{
library(sms)
data(survey)
data(census)
in.lexicon=createLexicon()
in.lexicon=addDataAssociation(in.lexicon, c("he","he"))
in.lexicon=addDataAssociation(in.lexicon, c("females","female"))
this_area=as.data.frame(census[1,]) #Select the first area from the census table
insms= new("microsimulation",census=census, panel=survey, lexicon=in.lexicon, iterations=5)
myselection= find_best_selection_SA( this_area, insms, inseed=1900)
print(myselection)
}
\author{
Dimitris Kavroudakis \email{dimitris123@gmail.com}
}
|
6584a7089dcc8974f5e06b24596e7ba65945d926 | 781acb845196fc990fe6c05b95fc994c6ef47306 | /R/varImpACC.R | ad5745f67e1bdc828c26ab45f1fc14406fdd537d | [] | no_license | PhilippPro/varImp | 741e1f15a8c8445e65622dfa7e556c66c147b972 | c7b544c80e2332cebb20f25e22a68dcf5efb63aa | refs/heads/master | 2022-01-11T09:32:05.745634 | 2021-12-25T08:05:32 | 2021-12-25T08:05:32 | 135,269,180 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,290 | r | varImpACC.R | #' varImpACC
#'
#' Computes the variable importance regarding the accuracy (ACC).
#'
#' @param object An object as returned by cforest.
#' @param mincriterion The value of the test statistic or 1 - p-value that must be exceeded in order to include a
#' split in the computation of the importance. The default mincriterion = 0 guarantees that all splits are included.
#' @param conditional The value of the test statistic or 1 - p-value that must be exceeded in order to include a split
#' in the computation of the importance. The default mincriterion = 0 guarantees that all splits are included.
#' @param threshold The threshold value for (1 - p-value) of the association between the variable of interest and a
#' covariate, which must be exceeded inorder to include the covariate in the conditioning scheme for the variable of
#' interest (only relevant if conditional = TRUE). A threshold value of zero includes all covariates.
#' @param nperm The number of permutations performed.
#' @param OOB A logical determining whether the importance is computed from the out-of-bag sample or the learning
#' sample (not suggested).
#' @param pre1.0_0 Prior to party version 1.0-0, the actual data values were permuted according to the original
#' permutation importance suggested by Breiman (2001). Now the assignments to child nodes of splits in the variable
#' of interest are permuted as described by Hapfelmeier et al. (2012), which allows for missing values in the
#' explanatory variables and is more efficient wrt memory consumption and computing time. This method does not
#' apply to conditional variable importances.
#'
#' @return Vector with computed permutation importance for each variable
#' @export
#'
#' @examples
#' data(iris)
#' iris2 = iris
#' iris2$Species = factor(iris$Species == "versicolor")
#' iris.cf = cforest(Species ~ ., data = iris2,control = cforest_unbiased(mtry = 2, ntree = 50))
#' set.seed(123)
#' a = varImpACC(object = iris.cf)
#'
varImpACC = function (object, mincriterion = 0, conditional = FALSE, threshold = 0.2,
nperm = 1, OOB = TRUE, pre1.0_0 = conditional) {
return(varImp(object, mincriterion = mincriterion, conditional = conditional, threshold = threshold, nperm = nperm,
OOB = OOB, pre1.0_0 = pre1.0_0, measure = "ACC"))
}
|
62e6b5aefd2e36572fbc4c6cffbdec8c496be365 | 78d7853bc8ec468f20b33a325abb0b6424ab8a57 | /R/angle_logi.r | 8a632402e40dcc7ff26384661ae404a88f8b91e8 | [] | no_license | wenjie2wang/smac | 15ac183f6bf4cc50604958002388d20698cb752a | 05ff9c926aaa660bfef93df0d43fef0dd451aab7 | refs/heads/main | 2023-06-24T18:34:54.908726 | 2021-07-30T03:40:20 | 2021-07-30T03:40:20 | 390,900,858 | 0 | 0 | null | 2021-07-30T02:08:54 | 2021-07-30T02:08:54 | null | UTF-8 | R | false | false | 268 | r | angle_logi.r | angle_logi=function(x,y,weight,nlambda,lambda.min,lambda,standardize,epsilon)
{
if (is.null(lambda)) {z = logiway1(x, y, weight, nlambda, lambda.min, standardize, epsilon)}
if (!is.null(lambda)) {z = logiway2(x, y, weight, lambda, standardize, epsilon)}
return(z)
}
|
4c5d505619f402cd7478f2bde74b4f855a9ebb5a | 96dfb2e54b4b6882e6e487a5e7d221d098d85135 | /R/make_lsd_date_file.R | f649306ae5c1e236dbfd39dbd662505c732ba0fa | [] | no_license | Dmirandae/NELSI | e37d705c9de10f2a149eef039a498ada049dcefa | 403bacbb641629caadb728b300b844870eca820e | refs/heads/master | 2021-01-22T11:29:39.120640 | 2017-01-16T00:29:24 | 2017-01-16T00:29:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 526 | r | make_lsd_date_file.R | make_lsd_date_file <- function(phylodata, outfile = 'outfile.date'){
if(class(phylodata) == 'DNAbin'){
taxa_names <- rownames(phylodata)
}else if(class(phylodata) == 'phylo'){
taxa_names <- phylodata$tip.label
}
dates <- sapply(taxa_names, function(x) gsub('.+_', '', x), USE.NAMES = F)
lines <- paste0(taxa_names, ' ', dates, collapse = '\n')
cat(length(taxa_names), '\n', file = outfile)
cat(lines, file = outfile, append = T)
print(paste('Dates file saved in ', outfile))
}
|
9d5d662f099026bbd3710efdcc8aac0b7816b5c8 | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/inst/examples/cairo-cairo-t-1.R | 95f6b757d90cf919c24d282f9bef6a4e8f4726c7 | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 150 | r | cairo-cairo-t-1.R | cr$pushGroup()
cr$setSource(fill_pattern)
cr$fillPreserve()
cr$setSource(stroke_pattern)
cr$stroke()
cr$popGroupToSource(cr)
cr$paintWithAlpha(alpha)
|
7694be7d64f890967ba22ec9c4a887147868b100 | 8e904b4ccda9f3f9253344db2c25ed8fdc5b199a | /best.R | bc40d70283d585635899f3a3c20725e4524333b0 | [] | no_license | rajeshmore/Assignment4 | 7caa9d1af02e3d6fce9543c2d5a25adaf9b76b29 | f490f2323f705fa387dfa8b87699ffa1495ecc43 | refs/heads/master | 2021-01-20T06:20:10.248903 | 2014-11-30T17:01:57 | 2014-11-30T17:01:57 | 27,343,133 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,453 | r | best.R | best <- function(stateChr, outcomeChr) {
# Function to find the best hospital in a state
# Read file data
file_data <- read.csv("outcome-of-care-measures.csv",colClasses = "character")
# Convert data type from char to numeric and suppress warning
suppressWarnings(file_data[, 11] <- as.numeric(file_data[, 11]))
suppressWarnings(file_data[, 17] <- as.numeric(file_data[, 17]))
suppressWarnings(file_data[, 23] <- as.numeric(file_data[, 23]))
#Merge data set
hospital_data <- file_data[,c(2,7,11,17,23)]
#provide proper column names
colnames(hospital_data) <- c("hospital","state","heart attack","heart failure","pneumonia")
# Check for valid input argument
if ( stateChr %in% hospital_data$state == FALSE)
stop("invalid state")
if (outcomeChr %in% c("heart attack","heart failure","pneumonia") == FALSE)
stop("invalid outcome")
# Eliminate NA Values
mydata <- na.omit(hospital_data[which(hospital_data$state==stateChr),])
#Find row number for min value by outcome
if (outcomeChr == "heart attack") {rownum<- which(mydata$"heart attack" == min(mydata[,3]))}
if (outcomeChr == "heart failure") {rownum<- which(mydata$"heart failure" == min(mydata[,4]))}
if (outcomeChr == "pneumonia") {rownum<- which(mydata$"pneumonia" == min(mydata[,5]))}
#Return Hospital name for identified row
mydata[rownum,1]
}
# best("TX", "heart failure")
# best("MD", "heart attack")
# best("MD", "pneumonia")
# best("BB", "heart attack")
# best("NY", "hert attack")
|
f5b009fd005327814806a58ee62c1b0b0a5d9f22 | 2aa9c4e518530e54e9d795d587ffaba195e5c3b4 | /run-all.R | 5be1690f274e4e5b6ee08fc5946a2c489317e417 | [] | no_license | evisat/rdata-6COSC006W | 48d8615aae84f33ac00f014561893ab7c17c7286 | da7a7b51ac44d19e5418f9a3d0c37bd30a5ded29 | refs/heads/master | 2020-04-21T17:08:34.964336 | 2019-04-24T12:08:04 | 2019-04-24T12:08:04 | 169,726,410 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 191 | r | run-all.R | #script loads all functions used in project
source('R/00_utils.R')
#script loads all libraries used in project
source('R/01_load-libs.R')
#script cleans raw data
source('R/02_clean-data.R') |
98eac3d2dd87e963f63ffa7d72aff24cf68f3be7 | f5929d21bcca795a939c5c9a798e2c3b9ce9d951 | /create_data_set/create_claim_table.R | 123e90beb6f4e4ffe1451974daff6959588f99be | [
"MIT"
] | permissive | onc-healthit/2021PCOR-ML-AI | 16f3664bec18cc2c62327dd38e5b40908fced2e0 | 9b849ba68558818d57477581253c49b9c66951c7 | refs/heads/main | 2023-04-19T05:29:58.036607 | 2021-09-17T20:46:51 | 2021-09-17T20:46:51 | 377,345,412 | 4 | 1 | MIT | 2021-09-13T20:33:25 | 2021-06-16T02:15:15 | Jupyter Notebook | UTF-8 | R | false | false | 3,115 | r | create_claim_table.R | #Functions used in S3a_esrd_claims.R to create the pre-esrd claims tables.
#The schema for the tables changes from year to year. For example, there is no
#cdtype field prior to 2014, since all diagnosis codes were ICD9 prior to 2014.
#The script handles these year-to-year changes in schema.
create_claim_table <- function(
data_dir,
con,
filenames,
fieldnames,
column_type,
column_type_2015,
table_name_pt) {
# send information to insert each year of claims data into the same postgres table
fieldnames = tolower(fieldnames)
for (filename in filenames) {
incident_year =
substr(filename, str_length(filename) - 3, str_length(filename))
if (incident_year < 2015) {
# claims prior to 2015 are all icd9, so we set cdtype to I for those years
csvfile = read_csv(file.path(data_dir, str_glue("{filename}.csv")), col_types = column_type_2015)
csvfile = csvfile %>%
mutate(cdtype = "I")
}
else {
csvfile = read_csv(file.path(data_dir, str_glue("{filename}.csv")), col_types = column_type)
}
tblname = str_remove(filename, incident_year)
names(csvfile) = tolower(names(csvfile))
fields = names(csvfile)
patients = dbGetQuery(
con,
str_glue(
"SELECT usrds_id
FROM {table_name_pt}")
)
df = patients %>%
inner_join(
csvfile,
by = "usrds_id") %>%
mutate(
incident_year = incident_year)
df$pdgns_cd = df$pdgns_cd %>%
trimws() %>%
str_pad(.,
width = 7,
side = "right",
pad = "0")
if (grepl('_ip_', tblname)){
df = createIP_CLM(df, incident_year)
}
else {
df <- df %>%
filter(!is.na(masked_clm_from) & (masked_clm_from != ""))
}
rm(csvfile)
# Append every set, except '2012' which will be the first table to import.
# this is b/c 2012 has the format that we want to use to create the table
# and append the other years since the format changes between 2011 and 2012-2017
if (incident_year==2012){
drop_table_function(con, tblname)
print(str_glue("creating {tblname} claims using {incident_year}={nrow(df)}
patients={nrow(df %>% distinct(usrds_id, keep_all=FALSE))}"))
dbWriteTable(
con,
tblname,
df[, fieldnames],
append = FALSE,
row.names = FALSE)
}
else {
print(str_glue("adding {incident_year} to {tblname}={nrow(df)}
patients={nrow(df %>% distinct(usrds_id, keep_all=FALSE))}"))
dbWriteTable(
con,
tblname,
df[, fieldnames],
append = TRUE,
row.names = FALSE)
}
}
}
createIP_CLM = function(df, incident_year) {
# filtering for table named "preesrd5y_ip_clm"
print(str_glue("filtering IP claims {incident_year}"))
df = df %>%
filter(
!is.na(masked_clm_from) &
(masked_clm_from != "") &
!is.na(drg_cd) &
(drg_cd != "")
)
return(df)
} |
bad454f214775e956ca83ffb755347dc086a9996 | 16396454140853e8df670775acd10d556bb36960 | /man/decomposition.Rd | 2ebf178d2568bd64b597747e03545cbb750b29fa | [] | no_license | alenzhao/asymmetry | 194f122c1151878cd535f51560072b8d72846b0e | 6c240d95fb8181fa7f3821be5587e0800b8b941f | refs/heads/master | 2020-06-18T14:17:35.874700 | 2016-11-11T17:26:33 | 2016-11-11T17:26:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 909 | rd | decomposition.Rd | \name{decomposition}
\alias{decomposition}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Decompose an asymmetric matrix
}
\description{
The decomposition of an asymmetric matrix into a symmetric matrix and a skew-symmetric matrix is an elementary result from mathematics that is the cornerstone of this package. The decomposition into a skew-symmetric and a symmetric component is written as: \eqn{Q=S+A}, where \eqn{Q} is an asymmetric matrix, \eqn{S} is a symmetric matrix, and \eqn{A} is a skew-symmetric matrix.
}
\usage{
decomposition(X)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
An asymmetric matrix
}
}
\value{
\item{S }{The symmetric part of the matrix}
\item{A }{The skew-symmetric part of the matrix}
}
\author{
Berrie Zielman
}
\examples{
data("Englishtowns")
Q <- decomposition(Englishtowns)
# the skew-symmetric part
Q$A
}
|
0dc347ceb795ebda9d2a9576bc30ac5690e83823 | aa515e0b1b1595bcfc59ca72eb9ce56ac3388656 | /man/easy_plot_ly.Rd | 93739f3f8afa7fdef42979707092bd6f5f9e1171 | [
"MIT"
] | permissive | shanealman/EZRplots | f84204a2f6df621243f41e388de8d15688439195 | fc6056f511a1b7d2540f6dc7e2c7a8fea165e1b5 | refs/heads/master | 2022-06-08T00:46:38.403540 | 2020-05-04T07:55:44 | 2020-05-04T07:55:44 | 260,323,514 | 0 | 4 | MIT | 2020-05-04T07:55:46 | 2020-04-30T21:36:52 | R | UTF-8 | R | false | true | 1,896 | rd | easy_plot_ly.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/easy_plot_ly.R
\name{easy_plot_ly}
\alias{easy_plot_ly}
\title{Easy Plotting with Plotly}
\usage{
## x, y, z, and color variables need to be in a vectorized form like x$data or y$data.
## If you are using the surface plot type, a z variable matrix is all that should be used,
## if non matrix data is put into the funciton for x and y they will be converted to a z matrix
## Line plots should use quantitative color variables. 3D density plots require only an x and y variable
## and will create a z matrix for you
easy_plot_ly(x, y, z, color, type, data)
}
\arguments{
\item{x}{The x-axis variable}
\item{y}{The y-axis variable}
\item{z}{The z-axis variable}
\item{color}{A cateogrical or quantitative variable to subset the data}
\item{type}{The type of plot you would like to generate: "scatter", "line",
"surface", "3d_density", "mesh", or "auto" to have plotly generate one for you}
\item{data}{The datset that will be used}
}
\value{
A 2D or 3D plot based on the package plotly
}
\description{
This function provides a simple way to generate 2D and 3D plots using the R package
plotly.
}
\examples{
Scatter Plot:
easy_plot_ly(x = iris$Sepal.Length, y = iris$Sepal.Width, z = iris$Petal.Length,
color = iris$Species, type = 'scatter', data = iris)
Line Plot:
df1 <- data.frame(x = sin(1:1000), y = cos(1:1000), z = 1:1000)
easy_plot_ly(x = df1$x, y = df1$y, z = df1$z, color = df1$x, type = 'line', data = df1)
Surface Plot:
easy_plot_ly(z = volcano, type = 'surface', data = volcano)
3D Density Plot:
easy_plot_ly(x = iris$Sepal.Length, y = iris$Sepal.Width, type = '3d_density', data = iris)
Mesh Plot:
easy_plot_ly(x = iris$Sepal.Length, y = iris$Sepal.Width, z = iris$Petal.Length,
type = 'mesh', data = iris)
}
|
f174b203eb3988b8a4b4b490216e0f5b8cee3ac9 | 6f8da14fb8330a2fdc0a6e45d013558b47023c86 | /R/pullback_vdp.R | 4e7e597d04a7ff6408c3e2340b789c849f2a23cf | [] | no_license | mcrucifix/iceages_scripts | b641e930e3e1a75b945dd82a0b41f7f9e6436147 | cb2d1d1bdc83e0a2d7eee8767c8728d4e6a9881c | refs/heads/master | 2021-01-23T06:34:22.742936 | 2013-07-29T19:32:01 | 2013-07-29T19:32:01 | 7,934,568 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,735 | r | pullback_vdp.R | # Copyright (c) 2012 Michel Crucifix <michel.crucifix@uclouvain.be>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# the following conditions:
# The above copyright notice and this permission notice shall be
# incluuded in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND INFRINGEMENT
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------
# R version 2.15.2 (2012-10-26) -- "Trick or Treat"
# ------------------------------------------------------------------
require(iceages)
# pullback attractors of vanderpol oscillator
# driven by periodic ersatz of precession and obliquity
# with standard parameters
Astro <- read_astro(1,1)
times=seq(0,200,0.5)
# determinstic parameter set
parvdp = c(alpha=30.0, beta=0.7, gammapre=0.6, gammaobl=0.6,
omega=4.10, asym=0)
# stochastic parameter set
parvdps = c(alpha=30.0, beta=0.7, gammapre=0.6, gammaobl=0.6,
omega=4.10, sigma=0.5)
# initial conditions positioned on the pullback attractor at time 0
# we do this rather than using the convenient 'pullaback_d' because
# we want to be able to restart from the same IC with the stochastic model
init <- basin(models$vdp_d, par=parvdp, -700., 0, Astro=Astro)$clusters
p41 <- list()
for (i in seq(nrow(init)))
{
p41[[i]] <- propagate_d(models$vdp_d, times, init=init[i,], par=parvdp, Astro=Astro)
}
# tweaks the paramer
parvdp40 = parvdp
parvdp40['omega'] = 4.0
# generates the 'stochastic' and 'deterministic' tweaked attractors
s41 <- propagate_s(models$vdp_s, init=init[1,], par=parvdps,
times, Astro=Astro, seed=95)
# we selected here the 'fourth' pullback attractor
# chosen after some trial and error to be representative
# of the phenomenon we want to illustrate
p40 <- pullback_d(models$vdp_d, times=times, par=parvdp40, Astro=Astro)$S[[4]]
# ... and save !
save(file='../RData/pullback_11.RData', p41, s41, p40,times)
|
9a7ef193e7e5bd60010bacf8f1f9f799bf7aede4 | 1242e612287c974e333d9998345fa7b2bf0baf09 | /R script.R | 6d35ef6a4fd705e65534631961f1c3ca15ad8a73 | [] | no_license | MercenaryGhost/Time-Series-models-to-estimate-Bitcoin-Prices. | 9b8d1904a56eb426f9d4dc917e63ba2f5dc6b4de | 8e2c3b767454600dfe4d1f93bc23752f2eea3756 | refs/heads/main | 2023-05-05T23:40:33.189033 | 2021-05-22T08:43:38 | 2021-05-22T08:43:38 | 369,757,807 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,277 | r | R script.R | #ARIMA Model
library(readxl)
Bitcoin_dataset_updated <- read_excel("E:/sem 3-2/Applied econometrics/Assignment 3/Bitcoin_dataset_updated.xlsx", sheet = "Sheet1")
library(dplyr)
library(lubridate)
Bitcoin_dataset_updated$week <- floor_date(Bitcoin_dataset_updated$Date, "week")
x<-ddply(Bitcoin_dataset_updated, .(week), function(z) mean(z$price)) #to convert the data into weekly averages.
View(x)
write.csv(x, "E:/sem 3-2/Applied econometrics/Assignment 3/weekly.csv",row.names = FALSE)
write.csv(Bitcoin_dataset_updated, "E:/sem 3-2/Applied econometrics/Assignment 3/bitcoin.csv",row.names = FALSE)
weekly <- read_excel("E:/sem 3-2/Applied econometrics/Assignment 3/weekly.xlsx")
View(weekly)
rm(x)
Bitcoin.ts <- ts(weekly$`avg price`, frequency = 52, start = c(2015,1)) # considered values only from 2015 first week above values are deleted in excel manually before loading the weekly df.
Bitcoin.ts
plot.ts(Bitcoin.ts)
plot.ts(log(Bitcoin.ts))
Bitcoin.tsdiff1 = diff(Bitcoin.ts, differences = 1)
plot.ts(Bitcoin.tsdiff1)
Bitcoin.tsdiff2 = diff(Bitcoin.tsdiff1, differences = 1)
plot.ts(Bitcoin.tsdiff2)
library(tseries)
library(aTSA)
adf.test(Bitcoin.ts, alternative="stationary")
adf.test(Bitcoin.tsdiff1, alternative="stationary")
adf.test(Bitcoin.tsdiff2, alternative="stationary")
stationary.test(Bitcoin.ts)
stationary.test(Bitcoin.ts, method = "pp")
stationary.test(Bitcoin.ts, method = "kpss")
stationary.test(Bitcoin.tsdiff1)
stationary.test(Bitcoin.tsdiff1, method = "pp")
stationary.test(Bitcoin.tsdiff2)
acf(Bitcoin.tsdiff1, lag.max = 50)
pacf(Bitcoin.tsdiff1, lag.max = 50)
library(forecast)
library(urca)
auto.arima(Bitcoin.tsdiff1)
auto.arima(Bitcoin.ts)
bitcoin.tsarima <- arima(Bitcoin.ts, order = c(1,1,0))
View(bitcoin.tsarima)
summary(bitcoin.tsarima)
Bitcoin.tsforecasts <- forecast(bitcoin.tsarima, h = 10, level = c(95))
Bitcoin.tsforecasts
plot(Bitcoin.tsforecasts)
acf(Bitcoin.tsforecasts$residuals, lag.max=50)
Box.test(Bitcoin.tsforecasts$residuals, lag=50, type="Ljung-Box")
Box.test(Bitcoin.tsforecasts$residuals, lag=50, type="Box-Pierce")
plot.ts(Bitcoin.tsforecasts$residuals)
hist(Bitcoin.tsforecasts$residuals, breaks = 50)
#ARDL Model
weekly2 <- read_excel("E:/sem 3-2/Applied econometrics/Assignment 3/weekly2.xlsx")
View(weekly2)
Bitcoin2.ts <- ts(weekly2, frequency = 52, start = c(2015,1))
plot.ts(Bitcoin2.ts[,1])
plot.ts(Bitcoin2.ts[,2])
plot.ts(Bitcoin2.ts[,3])
plot.ts(Bitcoin2.ts[,4])
x = diff(Bitcoin2.ts[,3], differences = 1)
y = diff(Bitcoin2.ts[,4], differences = 1)
plot.ts(x)
plot.ts(y)
stationary.test(Bitcoin2.ts[,2])
stationary.test(Bitcoin2.ts[,3])
stationary.test(Bitcoin2.ts[,4])
stationary.test(x)
stationary.test(y)
Bitcoin2.ts.tab <- cbind(Bitcoin2.ts,diff(Bitcoin2.ts[,2]),diff(Bitcoin2.ts[,3]),diff(Bitcoin2.ts[,4]))
View(Bitcoin2.ts.tab)
library(dynlm)
library(knitr)
library(broom)
Bitcoin2.tsdyn1 <- dynlm(d(price)~L(d(price),1)+d(transactions)+d(SP),data = Bitcoin2.ts)
Bitcoin2.tsdyn2 <- dynlm(L(price,1)~price+L(price,-1)+L(d(transactions),1)+L(d(SP),1),data = Bitcoin2.ts)
Bitcoin2.tsdyn3 <- dynlm(d(price)~L(d(price),1)+L(d(transactions),0:1)+L(d(SP),0),data = Bitcoin2.ts)
Bitcoin2.tsdyn4 <- dynlm(d(price)~L(d(price),1)+L(d(transactions),0:2)+L(d(SP),0),data = Bitcoin2.ts)
Bitcoin2.tsdyn5 <- dynlm(d(price)~L(d(price),1)+L(d(transactions),0:2)+L(d(SP),0:2),data = Bitcoin2.ts)
Bitcoin2.tsdyn6 <- dynlm(d(price)~L(d(price),1)+L(d(transactions),0:3)+L(d(SP),0:3),data = Bitcoin2.ts)
kable(tidy(summary(Bitcoin2.tsdyn1)), digits=4, caption="The Bitcoin auto regressive distributed lag model1")
kable(tidy(summary(Bitcoin2.tsdyn2)), digits=4, caption="The Bitcoin auto regressive distributed lag model2")
kable(tidy(summary(Bitcoin2.tsdyn3)), digits=4, caption="The Bitcoin auto regressive distributed lag model3")
kable(tidy(summary(Bitcoin2.tsdyn4)), digits=4, caption="The Bitcoin auto regressive distributed lag model4")
kable(tidy(summary(Bitcoin2.tsdyn5)), digits=4, caption="The Bitcoin auto regressive distributed lag model5")
kable(tidy(summary(Bitcoin2.tsdyn6)), digits=4, caption="The Bitcoin auto regressive distributed lag model6")
glL1 <- glance(Bitcoin2.tsdyn1)[c("r.squared","statistic","AIC","BIC")]
glL3 <- glance(Bitcoin2.tsdyn3)[c("r.squared","statistic","AIC","BIC")]
glL4 <- glance(Bitcoin2.tsdyn4)[c("r.squared","statistic","AIC","BIC")]
tabl <- rbind(glL1, as.numeric(glL3), as.numeric(glL4))
kable(tabl, caption="Goodness-of-fit statistics for Bitcoin-ARDL models")
ehat <- resid(Bitcoin2.tsdyn4)
acf(ehat,lag.max = 50)
res1 <- resid(Bitcoin2.tsdyn4)
res2 <- lag(resid(Bitcoin2.tsdyn4),-1)
plot(res1,res2)
abline(v=mean(res1, na.rm = TRUE), lty=2)
abline(h=mean(res2, na.rm = TRUE), lty=2)
res3 <- lag(resid(Bitcoin2.tsdyn4),-2)
plot(res1,res3)
abline(v=mean(res1, na.rm = TRUE), lty=2)
abline(h=mean(res3, na.rm = TRUE), lty=2)
library(lmtest)
a <- bgtest(Bitcoin2.tsdyn4, order=1, type="F", fill=0)
b <- bgtest(Bitcoin2.tsdyn4, order=1, type="F", fill=NA)
c <- bgtest(Bitcoin2.tsdyn4, order=4, type="Chisq", fill=0)
d <- bgtest(Bitcoin2.tsdyn4, order=4, type="Chisq", fill=NA)
dfr <- data.frame(rbind(a[c(1,2,4)], b[c(1,2,4)], c[c(1,2,4)], d[c(1,2,4)]))
dfr <- cbind(c("1, F, 0", "1, F, NA", "4, Chisq, 0", "4, Chisq, NA"), dfr)
names(dfr)<-c("Method", "Statistic", "Parameters", "p-Value")
kable(dfr, caption="Breusch-Godfrey test for the Bitcoin-ARDL model no 4")
library(dLagM)
auto.arima(weekly2$transactions)
transactions.ts <- ts(weekly2$transactions)
transactions.arima <- arima(transactions.ts,order = c(0,1,2))
transactions_forecast <- forecast::forecast(transactions.arima, h=5, level = c(95))
transactions_forecast
auto.arima(weekly2$SP)
SP.ts <- ts(weekly2$SP)
SP.arima <- arima(SP.ts,order = c(0,1,1))
SP_forecast <- forecast::forecast(SP.arima, h=5, level = c(95))
SP_forecast
#ARDL Forecast using dLagM package #we cannot use this for a dynlm object, so I have re-estimated the same model using ardlDlm(), this is supported by the dLagM::forecast() function.
diff.ts <- cbind(diff(weekly2$price,1),diff(weekly2$transactions,1),diff(weekly2$SP)) #diff of all three variables.
View(diff.ts)
rem.p = list(X2 = c(3) , X3 = c(1,2,3)) # p denotes the x lags(for all x variables) and q denotes the AR part, so to remove some select lags we can use this remove parameter.
rem.q = c(2) # I have given some buffer values to avoid dY.t-1 getting removed and similar for x variables as well.
remove = list(p = rem.p , q = rem.q)
view(remove)
model.ardlDlm = ardlDlm(formula = X1 ~ X2 + X3, data = data.frame(diff.ts) , #data should be a dataframe not anyother vector type
p = 3 , q = 2 ,
remove = remove) #as there is no parameter to directly calculate differences I have given the differences data itself as the variables.
x.new = matrix(c(-3866.7, 32.7, -1505.9, 0,0,0), ncol = 3, nrow = 2) #these are the differences of predicted values from ARIMA.
dLagM::forecast(model = model.ardlDlm , x = x.new , h = 4,interval = TRUE, nSim = 100)
|
5ea3be0425540f5b8bf72009705b2ac68727a307 | d44d7086935d940498cc65e0793b90cbed83baae | /R/compare_index.R | 2b034a9cb525dc0cc1488649433d2419ae723772 | [] | no_license | BerSerK/FenJi-A-Pricing | 1f952c8225920ea26641fb8f232f14bb1c943b25 | 2c8d729fece5c9e323f38823ca5a431b919d07b8 | refs/heads/master | 2021-03-25T03:12:29.955195 | 2015-10-21T03:22:25 | 2015-10-21T03:22:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 193 | r | compare_index.R | index1 <- read.csv('~/Downloads/index.csv')
for ( i in 1:length(index1$BargainDate)) {
index1$Date[i] = as.Date(index1$BargainDate[i], '%m/%d/%Y')
}
plot(index1$Date, index1$index, type='l')
|
c2cf4ff0bd4eaf94e6fcae57d3064c1c0aa46c59 | 649d2f1ebe25c8119f6af5948f820ca978f1b88d | /R_exam_project_canis_lupus_italicus.r | 9f41c031a68c4e3d737cadcef528fdb432a19b5a | [] | no_license | Enricalo/Monitoring_2021 | 603b643946c4db029ff3065ea5c5cbc50871ec7d | c5c898d43eee1f2979b83641799149ada4156919 | refs/heads/main | 2023-02-28T12:29:20.015968 | 2021-01-28T21:33:22 | 2021-01-28T21:33:22 | 309,310,152 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,727 | r | R_exam_project_canis_lupus_italicus.r | library(raster) #
library(readr) #
library(ncdf4) #
setwd("C:/LAB_2020_2021/")
#recall geographic data from raster package
#altitude data, crop map on Italy
IT<-getData('alt', country='IT', mask=TRUE, col=cl)
#alternative map
#library(maptools)
#data("wrld_simpl")
#plot(wrld_simpl)
#plot(wrld_simpl, xlim=c(10,15), ylim=c(36,48), axes=T)
#map not detailed enough
#first graph wolf distribution 2010-2015
wolf1012 <- read_delim("canislupus1012.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
#change colors to look like a geographical map on a book
cl1 <- colorRampPalette(c('darkolivegreen2','darkolivegreen4', 'chocolate', 'chocolate4', 'coral4', 'brown4', 'grey34', 'grey58', 'grey', 'white'))(100) #
#plot new colors and name
plot(IT, col=cl1, , main="wolf_2010_2012")
#plot points about wolfs occurence data in Italy from 2010 to 2012
wolf_2010_2012<-points(wolf1012$decimalLongitude, wolf1012$decimalLatitude, col="turquoise1", pch=19, cex = 0.7)
#second graph wolf distribution 2018-2020
wolf1820 <- read_delim("canislupus1820.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
#recall geographic data from raster package
#altitude data, crop map on Italy
#IT<-getData('alt', country='IT', mask=TRUE, col=cl)
#change colors to look like a geographical map on a book
#cl1 <- colorRampPalette(c('darkolivegreen2','darkolivegreen4','gold4','sienna1', 'chocolate4', 'brown', 'brown4', 'orangered4','grey58', 'white'))(100) #
#plot new colors
plot(IT, col=cl1, main="wolf_2018_2020")
#plot points about wolfs occurence data in Italy from 2010 to 2015
points(wolf1820$decimalLongitude, wolf1820$decimalLatitude, col="black", pch=19, cex = 0.7)
#compare
par(mfrow=c(1,2))
plot(IT, col=cl1, , main="wolf_2010_2012")
points(wolf1012$decimalLongitude, wolf1012$decimalLatitude, col="turquoise1", pch=19, cex = 0.7, main="wolf_2010_2012")
plot(IT, col=cl1, main="wolf_2018_2020")
points(wolf1820$decimalLongitude, wolf1820$decimalLatitude, col="black", pch=19, cex = 0.7, main="wolf_2018_2020")
#save picture
png("wolf_occurences_2010_2020.png")
par(mfrow=c(1,2))
plot(IT, col=cl1, , main="wolf_2010_2012")
points(wolf1012$decimalLongitude, wolf1012$decimalLatitude, col="turquoise1", pch=19, cex = 0.7, main="wolf_2010_2012")
plot(IT, col=cl1, main="wolf_2018_2020")
points(wolf1820$decimalLongitude, wolf1820$decimalLatitude, col="black", pch=19, cex = 0.7, main="wolf_2018_2020")
dev.off()
#clear the screen
dev.off()
###
#plot preys during the years
#plot wild boars 2010 2015
boar1012 <- read_delim("susscrofa1012.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
#recall geographic data from raster package
#altitude data, crop map on Italy
plot(IT, col=cl1, main="wild_boar_prey_2010_2012")
#plot points about boars occurence data in Italy from 2010 to 2012
points(boar1012$decimalLongitude, boar1012$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="boar_2010_2012")
#plot wild boars 2018 2020
boar1820 <- read_delim("susscrofa1820.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
#recall geographic data from raster package
#altitude data, crop map on Italy
plot(IT, col=cl1, main="wild_boar_prey_2018_2020")
#plot points about boars occurence data in Italy from 2010 to 2015
points(boar1820$decimalLongitude, boar1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="boar_2018_2020")
#compare
par(mfrow=c(1,2))
plot(IT, col=cl1, main="wild_boar_prey_2010_2012")
points(boar1012$decimalLongitude, boar1012$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="boar_2010_2012")
plot(IT, col=cl1, main="wild_boar_prey_2018_2020")
points(boar1820$decimalLongitude, boar1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="boar_2018_2020")
# save picture
png("boar_occurences_2010_2020.png")
par(mfrow=c(1,2))
plot(IT, col=cl1, main="wild_boar_prey_2010_2012")
points(boar1012$decimalLongitude, boar1012$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="boar_2010_2012")
plot(IT, col=cl1, main="wild_boar_prey_2018_2020")
points(boar1820$decimalLongitude, boar1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="boar_2018_2020")
dev.off()
#clear the screen
dev.off()
#plot deers 2010 2015
deer1012 <- read_delim("cervelaph1015.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
#recall geographic data from raster package
#altitude data, crop map on Italy
plot(IT, col=cl1, main="deer_prey_2010_2015")
#plot points about deers occurence data in Italy from 2010 to 2015
points(deer1012$decimalLongitude, deer1012$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="deer_2010_2012")
#plot deers 2018 2020
deer1820 <- read_delim("cervelaph1820.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
#recall geographic data from raster package
#altitude data, crop map on Italy
plot(IT, col=cl1, main="deer_prey_2018_2020")
#plot points about deers occurence data in Italy from 2010 to 2015
points(deer1820$decimalLongitude, deer1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="deer_2018_2020")
#compare
par(mfrow=c(1,2))
plot(IT, col=cl1, main="deer_prey_2010_2012")
points(deer1012$decimalLongitude, deer1012$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="deer_2010_2012")
plot(IT, col=cl1, main="deer_prey_2018_2020")
points(deer1820$decimalLongitude, deer1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="deer_2018_2020")
#save picture
png("deer_occurences_2010_2020.png")
par(mfrow=c(1,2))
plot(IT, col=cl1, main="deer_prey_2010_2012")
points(deer1012$decimalLongitude, deer1012$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="deer_2010_2012")
plot(IT, col=cl1, main="deer_prey_2018_2020")
points(deer1820$decimalLongitude, deer1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="deer_2018_2020")
dev.off()
#clear the screen
dev.off ()
####
#plot the spatial extent of vegetation cover
#download copernicus data FCOVER May 2010 and May 2020
#plot the data
FCOVER2010 <- raster("c_gls_FCOVER_201005240000_GLOBE_VGT_V1.4.1.nc")
cl2 <- colorRampPalette(c('tan2','tan4','sienna','darkolivegreen2','darkolivegreen4','darkgreen'))(100) #
plot(FCOVER2010, col=cl2, main="VegetationIndex_2010")
ext <- c(0,20,35,55) # xmin xmax ymin ymax
FCOVER2010_Italy <- crop(FCOVER2010, ext)
plot(FCOVER2010_Italy, col=cl2, , main="VegetationIndex_2010")
#save a picture
png("FCOVER2010.png")
plot(FCOVER2010_Italy, col=cl2, main="VegetationIndex_2010")
dev.off()
FCOVER2020 <- raster("c_gls_FCOVER_202005240000_GLOBE_PROBAV_V1.5.1.nc")
cl2 <- colorRampPalette(c('tan2','tan4','sienna','darkolivegreen2','darkolivegreen4','darkgreen'))(100) #
plot(FCOVER2020, col=cl2, main="VegetationIndex_2020")
ext <- c(0,20,35,55) # xmin xmax ymin ymax
FCOVER2020_Italy <- crop(FCOVER2020, ext)
plot(FCOVER2020_Italy, col=cl2, , main="VegetationIndex_2020")
#save a picture
png("FCOVER2020.png")
plot(FCOVER2020_Italy, col=cl2, main="VegetationIndex_2020")
dev.off()
#differences between the two images
#Was there any increase in vegetation cover?
cldif<-colorRampPalette(c('grey58', 'sienna4', 'green4', 'orange', 'gold', 'yellow'))(100) #
difV<- FCOVER2010_Italy - FCOVER2020_Italy
plot(difV, col=cldif, main="VegetationIndex_changes_10years")
#save a picture
png("difference_FCOVER.png")
plot(difV, col=cldif, main="VegetationIndex_changes_10years")
dev.off()
#compare
par(mfrow=c(2,4))
#wolf
plot(IT, col=cl1, , main="wolf_2010_2012")
points(wolf1012$decimalLongitude, wolf1012$decimalLatitude, col="yellow1", pch=19, cex = 0.7, main="wolf_2010_2012")
#boar
plot(IT, col=cl1, main="wild_boar_prey_2010_2012")
points(boar1012$decimalLongitude, boar1012$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="boar_2010_2012")
#deer
plot(IT, col=cl1, main="deer_prey_2010_2012")
points(deer1012$decimalLongitude, deer1012$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="deer_2010_2012")
#vegetation
plot(FCOVER2010_Italy, col=cl2, main="VegetationIndex_2010")
##
#second row 2020
#wolf
plot(IT, col=cl1, main="wolf_2018_2020")
points(wolf1820$decimalLongitude, wolf1820$decimalLatitude, col="yellow1", pch=19, cex = 0.7, main="wolf_2018_2020")
#boar
plot(IT, col=cl1, main="wild_boar_prey_2018_2020")
points(boar1820$decimalLongitude, boar1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="boar_2018_2020")
#deer
plot(IT, col=cl1, main="deer_prey_2018_2020")
points(deer1820$decimalLongitude, deer1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="deer_2018_2020")
#vegetation
plot(FCOVER2020_Italy, col=cl2, main="VegetationIndex_2020")
#save picture
png("final_considerations_2010_2020.png")
par(mfrow=c(2,4))
plot(IT, col=cl1, , main="wolf_2010_2012")
points(wolf1012$decimalLongitude, wolf1012$decimalLatitude, col="yellow1", pch=19, cex = 0.7, main="wolf_2010_2012")
plot(IT, col=cl1, main="wild_boar_prey_2010_2015")
points(boar1012$decimalLongitude, boar10125$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="boar_2010_2012")
plot(IT, col=cl1, main="deer_prey_2010_2015")
points(deer1012$decimalLongitude, deer1012$decimalLatitude, col="turquoise1", pch=17, cex = 0.7, main="deer_2010_2020")
plot(FCOVER2010_Italy, col=cl2, main="VegetationIndex_2010")
plot(IT, col=cl1, main="wolf_2018_2020")
points(wolf1820$decimalLongitude, wolf1820$decimalLatitude, col="yellow1", pch=19, cex = 0.7, main="wolf_2018_2020")
plot(IT, col=cl1, main="wild_boar_prey_2018_2020")
points(boar1820$decimalLongitude, boar1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="boar_2018_2020")
plot(IT, col=cl1, main="deer_prey_2018_2020")
points(deer1820$decimalLongitude, deer1820$decimalLatitude, col="black", pch=17, cex = 0.7, main="deer_2018_2020")
plot(FCOVER2020_Italy, col=cl2, main="VegetationIndex_2020")
dev.off()
#the end
|
3f502aa46cbedb9ea099668bfa0e5c847ff5d442 | ab8812f0eb333be6988c98d4def477c818fb3cb3 | /tests/testthat/test-unnest.R | 0d7e0b79750c19022d7cec576cef2aa9f60c8de0 | [
"MIT"
] | permissive | tidyverse/tidyr | 857c4ba4f31e01cdf1bcde4c2a4b88cc530d8715 | 0764e65fad777b71aa2a81ffd5447d04a61f8d5e | refs/heads/main | 2023-08-21T17:31:26.569797 | 2023-01-24T21:21:51 | 2023-01-24T21:21:51 | 20,688,261 | 998 | 420 | NOASSERTION | 2023-03-31T06:49:36 | 2014-06-10T14:24:33 | R | UTF-8 | R | false | false | 8,139 | r | test-unnest.R | test_that("can keep empty rows", {
df <- tibble(x = 1:3, y = list(NULL, tibble(), tibble(a = 1)))
out1 <- df %>% unnest(y)
expect_equal(nrow(out1), 1)
out2 <- df %>% unnest(y, keep_empty = TRUE)
expect_equal(nrow(out2), 3)
expect_equal(out2$a, c(NA, NA, 1))
})
test_that("empty rows still affect output type", {
df <- tibble(
x = 1:2,
data = list(
tibble(y = character(0)),
tibble(z = integer(0))
)
)
out <- unnest(df, data)
expect_equal(out, tibble(x = integer(), y = character(), z = integer()))
})
test_that("bad inputs generate errors", {
df <- tibble(x = 1, y = list(mean))
expect_snapshot((expect_error(unnest(df, y))))
})
test_that("unesting combines augmented vectors", {
df <- tibble(x = as.list(as.factor(letters[1:3])))
expect_equal(unnest(df, x)$x, factor(letters[1:3]))
})
test_that("vector unnest preserves names", {
df <- tibble(x = list(1, 2:3), y = list("a", c("b", "c")))
out <- unnest(df, x)
expect_named(out, c("x", "y"))
})
test_that("rows and cols of nested-dfs are expanded", {
df <- tibble(x = 1:2, y = list(tibble(a = 1), tibble(b = 1:2)))
out <- df %>% unnest(y)
expect_named(out, c("x", "a", "b"))
expect_equal(nrow(out), 3)
})
test_that("can unnest nested lists", {
df <- tibble(
x = 1:2,
y = list(list("a"), list("b"))
)
rs <- unnest(df, y)
expect_identical(rs, tibble(x = 1:2, y = list("a", "b")))
})
test_that("can unnest mixture of name and unnamed lists of same length", {
df <- tibble(
x = c("a"),
y = list(y = 1:2),
z = list(1:2)
)
expect_identical(
unnest(df, c(y, z)),
tibble(x = c("a", "a"), y = c(1:2), z = c(1:2))
)
})
test_that("can unnest list_of", {
df <- tibble(
x = 1:2,
y = vctrs::list_of(1:3, 4:9)
)
expect_equal(
unnest(df, y),
tibble(x = rep(1:2, c(3, 6)), y = 1:9)
)
})
test_that("can combine NULL with vectors or data frames", {
df1 <- tibble(x = 1:2, y = list(NULL, tibble(z = 1)))
out <- unnest(df1, y)
expect_named(out, c("x", "z"))
expect_equal(out$z, 1)
df2 <- tibble(x = 1:2, y = list(NULL, 1))
out <- unnest(df2, y)
expect_named(out, c("x", "y"))
expect_equal(out$y, 1)
})
test_that("vectors become columns", {
df <- tibble(x = 1:2, y = list(1, 1:2))
out <- unnest(df, y)
expect_equal(out$y, c(1L, 1:2))
})
test_that("multiple columns must be same length", {
df <- tibble(x = list(1:2), y = list(1:3))
expect_snapshot((expect_error(unnest(df, c(x, y)))))
df <- tibble(x = list(1:2), y = list(tibble(y = 1:3)))
expect_snapshot((expect_error(unnest(df, c(x, y)))))
})
test_that("can use non-syntactic names", {
out <- tibble("foo bar" = list(1:2, 3)) %>% unnest(`foo bar`)
expect_named(out, "foo bar")
})
test_that("unpacks df-cols (#1112)", {
df <- tibble(x = 1, y = tibble(a = 1, b = 2))
expect_identical(unnest(df, y), tibble(x = 1, a = 1, b = 2))
})
test_that("unnesting column of mixed vector / data frame input is an error", {
df <- tibble(x = list(1, tibble(a = 1)))
expect_snapshot((expect_error(unnest(df, x))))
})
test_that("unnest() advises on outer / inner name duplication", {
df <- tibble(x = 1, y = list(tibble(x = 2)))
expect_snapshot(error = TRUE, {
unnest(df, y)
})
})
test_that("unnest() advises on inner / inner name duplication", {
df <- tibble(
x = list(tibble(a = 1)),
y = list(tibble(a = 2))
)
expect_snapshot(error = TRUE, {
unnest(df, c(x, y))
})
})
test_that("unnest() disallows renaming", {
df <- tibble(x = list(tibble(a = 1)))
expect_snapshot(error = TRUE, {
unnest(df, c(y = x))
})
})
test_that("unnest() works on foreign list types recognized by `vec_is_list()` (#1327)", {
new_foo <- function(...) {
structure(list(...), class = c("foo", "list"))
}
df <- tibble(x = new_foo(tibble(a = 1L), tibble(a = 2:3)))
expect_identical(unnest(df, x), tibble(a = 1:3))
# With empty list
df <- tibble(x = new_foo())
expect_identical(unnest(df, x), tibble(x = unspecified()))
# With empty types
df <- tibble(x = new_foo(tibble(a = 1L), tibble(a = integer())))
expect_identical(unnest(df, x), tibble(a = 1L))
expect_identical(unnest(df, x, keep_empty = TRUE), tibble(a = c(1L, NA)))
# With `NULL`s
df <- tibble(x = new_foo(tibble(a = 1L), NULL))
expect_identical(unnest(df, x), tibble(a = 1L))
expect_identical(unnest(df, x, keep_empty = TRUE), tibble(a = c(1L, NA)))
})
# other methods -----------------------------------------------------------------
test_that("rowwise_df becomes grouped_df", {
skip_if_not_installed("dplyr", "0.8.99")
df <- tibble(g = 1, x = list(1:3)) %>% dplyr::rowwise(g)
rs <- df %>% unnest(x)
expect_s3_class(rs, "grouped_df")
expect_equal(dplyr::group_vars(rs), "g")
})
test_that("grouping is preserved", {
df <- tibble(g = 1, x = list(1:3)) %>% dplyr::group_by(g)
rs <- df %>% unnest(x)
expect_s3_class(rs, "grouped_df")
expect_equal(dplyr::group_vars(rs), "g")
})
# Empty inputs ------------------------------------------------------------
test_that("can unnest empty data frame", {
df <- tibble(x = integer(), y = list())
out <- unnest(df, y)
expect_equal(out, tibble(x = integer(), y = unspecified()))
})
test_that("unnesting bare lists of NULLs is equivalent to unnesting empty lists", {
df <- tibble(x = 1L, y = list(NULL))
out <- unnest(df, y)
expect_identical(out, tibble(x = integer(), y = unspecified()))
})
test_that("unnest() preserves ptype", {
tbl <- tibble(x = integer(), y = list_of(ptype = tibble(a = integer())))
res <- unnest(tbl, y)
expect_equal(res, tibble(x = integer(), a = integer()))
})
test_that("unnesting typed lists of NULLs retains ptype", {
df <- tibble(x = 1L, y = list_of(NULL, .ptype = tibble(a = integer())))
out <- unnest(df, y)
expect_identical(out, tibble(x = integer(), a = integer()))
})
test_that("ptype can be overriden manually (#1158)", {
df <- tibble(
a = list("a", c("b", "c")),
b = list(1, c(2, 3)),
)
ptype <- list(b = integer())
out <- unnest(df, c(a, b), ptype = ptype)
expect_type(out$b, "integer")
expect_identical(out$b, c(1L, 2L, 3L))
})
test_that("ptype works with nested data frames", {
df <- tibble(
a = list("a", "b"),
b = list(tibble(x = 1, y = 2L), tibble(x = 2, y = 3L)),
)
# x: double -> integer
ptype <- list(b = tibble(x = integer(), y = integer()))
out <- unnest(df, c(a, b), ptype = ptype)
expect_identical(out$x, c(1L, 2L))
expect_identical(out$y, c(2L, 3L))
})
test_that("skips over vector columns", {
df <- tibble(x = integer(), y = list())
expect_identical(unnest(df, x), df)
})
test_that("unnest keeps list cols", {
df <- tibble(x = 1:2, y = list(3, 4), z = list(5, 6:7))
out <- df %>% unnest(y)
expect_equal(names(out), c("x", "y", "z"))
})
# Deprecated behaviours ---------------------------------------------------
test_that("cols must go in cols", {
df <- tibble(x = list(3, 4), y = list("a", "b"))
expect_snapshot(unnest(df, x, y))
})
test_that("need supply column names", {
df <- tibble(x = 1:2, y = list("a", "b"))
expect_snapshot(unnest(df))
})
test_that("sep combines column names", {
local_options(lifecycle_verbosity = "warning")
df <- tibble(x = list(tibble(x = 1)), y = list(tibble(x = 1)))
expect_snapshot(out <- df %>% unnest(c(x, y), .sep = "_"))
expect_named(out, c("x_x", "y_x"))
})
test_that("unnest has mutate semantics", {
df <- tibble(x = 1:3, y = list(1, 2:3, 4))
expect_snapshot(out <- df %>% unnest(z = map(y, `+`, 1)))
expect_equal(out$z, 2:5)
})
test_that(".drop and .preserve are deprecated", {
local_options(lifecycle_verbosity = "warning")
df <- tibble(x = list(3, 4), y = list("a", "b"))
expect_snapshot(df %>% unnest(x, .preserve = y))
df <- tibble(x = list(3, 4), y = list("a", "b"))
expect_snapshot(df %>% unnest(x, .drop = FALSE))
})
test_that(".id creates vector of names for vector unnest", {
local_options(lifecycle_verbosity = "warning")
df <- tibble(x = 1:2, y = list(a = 1, b = 1:2))
expect_snapshot(out <- unnest(df, y, .id = "name"))
expect_equal(out$name, c("a", "b", "b"))
})
|
d215375cb3268ebd7e75b5bd840a886106619811 | d41b9cfdcd5cdb84eb788f660ce2448f02523e0a | /3-revision_entrega/scripts/2-crear_datos_revision.R | 2093c299db82899d399052c4e627eb8079847c7e | [] | no_license | tereom/madmex_pixel_sample | 02bbfca62161cf3110e7dc45eee8322c67ed9b31 | 9d7ffaaea48a2d0c01f1cc32c3c7c978b838eded | refs/heads/master | 2020-08-06T05:28:33.248483 | 2019-10-04T17:21:05 | 2019-10-04T17:21:05 | 212,853,145 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,583 | r | 2-crear_datos_revision.R | # se crean datos para revisiar la muestra y para estimar
library(sf)
library(raster)
library(dplyr)
library(tidyverse)
library(srvyr)
source("R/funciones.R")
### entrega BITS
bits_2018 <- read_sf("datos_entrada/datos_bits/PUNTOS DE VALIDACION-2018/Puntos_de_Validacion_2018_Revisados.shp")
bits_2018_points <- bits_2018 %>% st_cast("POINT")
# detectamos puntos repetidos
bits_2018_points$equals <- st_equals(bits_2018_points) %>% map_int(first)
# seleccionamos únicamente un punto cuando hay repetidos
bits_2018_unique <- bits_2018_points %>%
group_by(equals) %>%
top_n(n = 1, identifier) %>%
ungroup()
# agregamos variable de mapa MADMEX para comparar y determinar coincidencia
# última versión MADMEX 2018:
raster_31 <- raster("datos_entrada/madmex_sentinel2_2018_31.tif")
# Reproyectamos raster y extraemos la variable para agregarla a sf de BITS
# agregamos también columnas para el valor de intérpretes en 17 clases
# (solo estaba en 31 clases).
crs_lcc <- crs(raster_31) %>% as.character()
bits_2018_lcc <- st_transform(bits_2018_unique, crs = crs_lcc)
bits_raster <- raster::extract(raster_31, y = bits_2018_lcc)
bits_2018_lcc <- bits_2018_lcc %>%
add_column(raster = bits_raster) %>%
mutate_at(vars(raster, interp1, interp2, interp3),
.funs = list(c31 = identity, c17 = clasifica_31_17)) %>%
select(-raster, -interp1, -interp2, -interp3)
# determinamos coincidencia con la primera etiqueta de los revisores (top) y
# con alguna de las 3 asignadas
bits_2018_lcc_row <- bits_2018_lcc %>%
rowwise() %>%
mutate(
correcto_31_top = raster_c31 == interp1_c31,
correcto_31_top3 = raster_c31 %in% c(interp1_c31, interp2_c31,
interp3_c31),
correcto_17_top = raster_c17 == interp1_c17,
correcto_17_top3 = raster_c17 %in% c(interp1_c17, interp2_c17,
interp3_c17)
) %>%
ungroup() %>%
select(OBJECTID, identifier, correcto_31_top:correcto_17_top3)
bits_2018_lcc <- bits_2018_lcc %>%
left_join(bits_2018_lcc_row)
write_rds(bits_2018_lcc, "datos_salida/bits_2018_lcc.rdata")
# para la estimación se requiere la información del marco y diseño
# leemos tamaños de diseño de muestreo
marco <- read_csv("datos_salida/tamanos_2.csv") %>%
dplyr::select(-p, -n_0) %>%
rename(classid = clase, n_planned = n)
# leemos muestra entregada a BITS y revisamos repetidos
muestra_pais <- read_rds(path = "datos_salida/muestra_pais.rds")
muestra_pais <- muestra_pais %>%
mutate(id_muestra = 1:n())
muestra_pais$equals <- st_equals(muestra_pais) %>%
map_int(first)
# puntos con repetición (195)
muestra_eq <- muestra_pais %>%
group_by(equals) %>%
mutate(n = n()) %>%
filter(n > 1)
st_write(muestra_eq, "datos_salida/muestra_pais_reps.shp")
# sin repetición
muestra_pais_unique <- muestra_pais %>%
group_by(equals) %>%
top_n(n = 1, id_muestra) %>%
ungroup() %>%
dplyr::select(-equals)
# unimos puntos únicos con puntos BITS para tener variables de estratificación:
# edo y classid de datos originales
bits_2018_edo <- select(bits_2018_lcc, -equals) %>%
st_join(muestra_pais_unique, join = st_is_within_distance,
dist = 0.02, left = TRUE)
# para los ponderadores necesitamos saber el tamaño por estrato
bits_2018_w <- bits_2018_edo %>%
left_join(marco, by = c("classid", "edo")) %>%
group_by(classid, edo) %>%
mutate(n_obs = n()) %>%
ungroup() %>%
mutate(estrato = paste0(classid, "-", edo))
# revisamos planeados vs observados
bits_2018_w %>%
st_drop_geometry() %>%
dplyr::select(classid, edo, n_planned, n_obs) %>%
dplyr::distinct() %>%
mutate(diff = n_planned - n_obs) %>%
filter(diff > 0) %>%
arrange(-diff)
glimpse(bits_2018_w)
write_rds(bits_2018_w, path = "datos_salida/bits_2018_weights.rdata")
# creamos datos con diseño para estimación con survey y srvyr
library(srvyr)
bits_design <- bits_2018_w %>%
as_survey_design(ids = identifier, strata = estrato, fpc = N)
write_rds(bits_design, path = "datos_salida/bits_2018_design.rdata")
# entrega Pedro
# dos etiquetas: etiqueta pixel y etiqueta hectárea, resultan de evaluar
# únicamente el pixel seleccionado y de evaluar una hectárea alrededor
pedro <- read_sf("datos_salida/muestras_pedro_etiquetada/muestra300_etiq_pedro.shp")
bits_2018_lcc_df <- st_drop_geometry(bits_2018_lcc)
# agregamos clasificación 17 clases y etiquetas de BITS, determinamos
# coincidencias
bits_pedro <- pedro %>%
st_drop_geometry() %>%
mutate(
pedro_c17 = clasifica_31_17(pedro31cl),
pedro1ha_c17 = clasifica_31_17(pedro1ha),
pedro_c31 = pedro31cl,
pedro1ha_c31 = pedro1ha
) %>%
dplyr::select(-interp1, -interp2, -interp3, -pedro31cl,
-pedro1ha) %>%
left_join(bits_2018_lcc_df, by = "identifier") %>%
rowwise() %>%
mutate(
p_correcto_31_pix = (pedro_c31 == raster_c31),
p_correcto_31_ha = (pedro1ha_c31 == raster_c31),
p_correcto_31 = p_correcto_31_pix | p_correcto_31_ha,
p_bits_correcto_31_pix = pedro_c31 %in%
c(interp1_c31, interp2_c31, interp3_c31),
p_bits_correcto_31_1ha = pedro1ha_c31 %in%
c(interp1_c31, interp2_c31, interp3_c31),
p_bits_correcto_31 = p_bits_correcto_31_pix | p_bits_correcto_31_1ha,
p_correcto_17_pix = (pedro_c17 == raster_c17),
p_correcto_17_ha = (pedro1ha_c17 == raster_c17),
p_correcto_17 = p_correcto_17_pix | p_correcto_17_ha,
p_bits_correcto_17_pix = pedro_c17 %in% c(interp1_c17, interp2_c17,
interp3_c17),
p_bits_correcto_17_1ha = pedro1ha_c17 %in% c(interp1_c17, interp2_c17,
interp3_c17),
p_bits_correcto_17 = p_bits_correcto_17_pix | p_bits_correcto_17_1ha
) %>%
ungroup()
write_rds(bits_pedro, path = "datos_salida/bits_pedro.rds")
# usando diseño de muestreo (pob. es muestra BITS)
# datos con los que se creó la muestra Pedro (no finales)
bits_2018_sample <- read_sf("datos_entrada/datos_bits/Validacion_Final_Mapa-2018_BITS_190211/validacion_final_2018.shp")
bits_2018_sample_n <- bits_2018_sample %>%
st_drop_geometry() %>%
group_by(raster_m18) %>%
summarise(N = n())
bits_pedro_w <- bits_pedro %>%
left_join(bits_2018_sample_n, by = "raster_m18")
bits_pedro_design <- bits_pedro_w %>%
as_survey_design(ids = identifier, strata = raster_m18, fpc = N)
write_rds(bits_pedro_design, path = "datos_salida/bits_pedro_design.rds")
|
a22e30967dcf1fa5510d4fb20acc53de317d42e9 | 380c4e316f8b37a2057812b5e72e9e16fc3f8f79 | /run_analysis.R | 5ed4eaca65ec94f536db7f93090bdc3b67eb29da | [] | no_license | jolenechen83/CleaningDataAssignment | b0e3c158501621f74bb82e7990927de53cb19815 | e491093614c07308058c57b02f6cfe5399a1fe46 | refs/heads/master | 2021-01-19T07:57:05.932225 | 2015-04-26T16:49:08 | 2015-04-26T16:49:08 | 34,619,311 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,632 | r | run_analysis.R | # Data(zip file) were downloaded and extracted into working directory. All data
# were stored in the data folder.
# 1. Merges the training and the test sets to create one data set.
path_rf <- "./data"
#Read all the files
testActivity <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
trainActivity <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
testSubject <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
trainSubject <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
testFeatures <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
trainFeatures <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
# merge the dataset
dSubject <- rbind(trainSubject, testSubject)
dActivity<- rbind(trainActivity, testActivity)
dFeatures<- rbind(trainFeatures, testFeatures)
names(dSubject)<-c("Subject")
names(dActivity)<- c("Activity")
FeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dFeatures)<- FeaturesNames$V2
# Merge columns to get the data frame Data for all data
dCombine <- cbind(dSubject, dActivity)
Data <- cbind(dFeatures, dCombine)
# 2. Extracts only the measurements on the mean and standard deviation
# for each measurement.
FeaturesNames2<-FeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", FeaturesNames$V2)]
selected<-c(as.character(FeaturesNames2), "Subject", "Activity" )
Data<-subset(Data,select=selected)
write.csv(Data,file=paste( "subsetData.csv", sep=""))
# 3. Uses descriptive activity names to name the activities in the data set
labels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
# 4. Appropriately labels the data set with descriptive variable names.
# prefix t is replaced by time
# prefix f is replaced by frequency
# Acc is replaced by Accelerometer
# Gyro is replaced by Gyroscope
# Mag is replaced by Magnitude
# BodyBody is replaced by Body
names(Data)<-gsub("^t", "time", names(Data))
names(Data)<-gsub("^f", "frequency", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "Magnitude", names(Data))
names(Data)<-gsub("BodyBody", "Body", names(Data))
# 5.From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable for each activity
# and each subject.
library(plyr)
Data2<-aggregate(. ~Subject + Activity, Data, mean)
Data2<-Data2[order(Data2$Subject,Data2$Activity),]
write.table(Data2, file = "tidydataset.txt",row.name=FALSE)
|
434119fec7ab2c759d04a7632c494365694fc282 | 715c1eee20f2ce63b755f5fe32f3734778432c9c | /01_90_expand_grid.R | 113c0bd6a79e694a1b6288574470bab71c30d110 | [] | no_license | nikhiljohnbejoy/Rconcepts | 47dd030d10abf027338a657e7c97826cc0677798 | 321693fb423d118593b9cce0266ca52e1dcd02fd | refs/heads/master | 2022-12-01T02:28:16.322446 | 2020-07-22T03:52:05 | 2020-07-22T03:52:05 | 281,564,317 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 736 | r | 01_90_expand_grid.R | # combn produces a matrix of all combinations
photos.combn <- combn(c("Bride", "Groom",
"Bride's Parents",
"Groom's Parents",
"Bride's Siblings",
"Groom's Siblings"), 3)
photos.combn <- t(photos.combn)
# expand.grid produces a dataframe of vector A against vector B
photos.expand.grid <- expand.grid(c("Bride", "Bride's Parents", "Bride's Siblings"),
c("Groom", "Groom's Parents", "Groom's Siblings"))
photos.expand.grid <- expand.grid(c("Bride", "Groom"),
c("Bride's Parents", "Groom's Parents"),
c("Bride's Siblings", "Groom's Siblings"))
|
8e7e91b72bfe5f6b98647f0f2db3735001fca316 | 9f932bdf4a74450c8fbf25318b229358654a5c45 | /attic/simulation_math_util_fn_old.R | 0a927df6e763da54d6691bdafc9f27a8f4bb4ee6 | [] | no_license | adalisan/JOFC-MatchDetect | 5cce6c3d57763f925e31202d27dca317c029d4a7 | 3340c3cdc39a13e2f6c18f666a953e42ca73e745 | refs/heads/master | 2020-04-11T02:58:48.375064 | 2015-08-08T01:49:59 | 2015-08-08T01:49:59 | 11,868,601 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 43,643 | r | simulation_math_util_fn_old.R | ## functions
run.mc.replicate<-function(model,p, r, q, c.val,
d = p-1,
pprime1 = ifelse(model=="gaussian",p+q,p+q+2), # cca arguments , signal+noise dimension
pprime2 = ifelse(model=="gaussian",p+q,p+q+2), # cca arguments, signal+noise dimension
Wchoice = "avg", #How to impute L
pre.scaling = TRUE, #Make the measurement spaces have the same scale
oos = TRUE, #embed test observations by Out-of-sampling ?
alpha = NULL,
n = 100, m = 100, #Number of training and test observations
sim.grass=FALSE,
eigen.spectrum=FALSE,
old.gauss.model.param=FALSE,
separability.entries.w,
compare.pom.cca=TRUE, # Run PoM and CCA to compare with JOFC?
oos.use.imputed,
level.mcnemar=0.01, #At what alpha, should unweighted(w=0.5) and optimal w^* be compared
def.w=0.5, #The null hypothesis is that power(def.w) >= power(rival.w) (by default ,def.w is the w for the unweighted case which equals 0.5)
rival.w=NULL,
proc.dilation=FALSE, #when investigating convergence of JOFC to PoM, should Procrustes analysis of configurations include the dilation component?
assume.matched.for.oos,
w.vals, #w values to use for JOFC
wt.equalize,
verbose=FALSE,
power.comparison.test=TRUE){
print(paste("random ",runif(1)))
print("run.mc.replicate")
#
# The followin if statement is Not really necessary, unless we change our mind about rival.w being the best in every MC replicate
# and want to make rival.w a constant (preferably the best overall)
if (is.null(rival.w)){
if (0.95 %in% w.vals){
rival.w=0.95
}
else if (0.9 %in% w.vals){
rival.w=0.9
}
else if (0.99 %in% w.vals){
rival.w=0.99
}
}
w.max.index <- length(w.vals)
size <- seq(0, 1, 0.01)
len <- length(size)
power.w.star <- 0
power.mc= array(0,dim=c(w.max.index,len)) #power values for JOFC in this MC replicate
power.cca.mc = array(0,dim=c(len)) #power values for CCA in this MC replicate
power.pom.mc = array(0,dim=c(len)) #power values for PoM in this MC replicate
power.cca.reg.mc = array(0,dim=c(len)) #power values for reg CCA in this MC replicate
config.mismatch <- list(frob.norm=array(0,dim=c(w.max.index))) #Frob. norm of configuration difference
#between PoM and JOFC with smallest w
min.stress.for.w.val = array(0,dim=c(w.max.index)) #minimum stress value for smacof algorithm
pom.stress <- 0
T0.cca.reg <- array(0,dim=c(m)) #Test statistics for regularized CCA under null
TA.cca.reg <- array(0,dim=c(m)) #Test statistics for regularized CCA under alternative
T0.cca <- array(0,dim=c(m)) #Test statistics for CCA under null
TA.cca <- array(0,dim=c(m)) #Test statistics for CCA under alternative
T0.pom <- array(0,dim=c(m)) #Test statistics for PoM under null
TA.pom <- array(0,dim=c(m)) #Test statistics for JOFC under alternative
T0 <- matrix(0,w.max.index,m) #Test statistics for JOFC under null
TA <- matrix(0,w.max.index,m) #Test statistics for JOFC under alternative
T0.best.w <- matrix(0,2,m) #Test statistics for JOFC (comparison of w=0.5 with optimal w*
TA.best.w <- matrix(0,2,m)
cont.table <- matrix(0,2,2)
Fid.Err.Term.1 <- array(0,dim=c(w.max.index))
Fid.Err.Term.2 <- array(0,dim=c(w.max.index))
Comm.Err.Term <- array(0,dim=c(w.max.index))
sigma <- matrix(0,p,p)
means <- array(0 , dim=c(w.max.index,2*d))
if (is.null(alpha)) {
if (model=="gaussian"){
sigma<- diag(p)
if (old.gauss.model.param) sigma <-Posdef(p,r)
alpha.mc <- mvrnorm(n+(2*m), rep(0,p),sigma)
} else if (model=="dirichlet"){
alpha.mc <- rdirichlet(n+2*m, rep(1,p+1))
} else stop("unknown model")
} else {
alpha.mc <- alpha[[mc]]
}
## optimal power
optim.power<- c()
if (model=="gaussian"){
for (aleph in size){
crit.val.1<-qgamma(aleph,(p)/2,scale=2/r,lower.tail=FALSE)
crit.val.2<-crit.val.1
type.2.err<-pgamma(crit.val.2,shape=(p)/2,scale=2*(1+1/r))
beta<- 1-type.2.err
optim.power<- c(optim.power,beta)
}
}
## n pairs of matched points
if (model=="gaussian"){
xlist <- matched_rnorm(n, p, q, c.val, r, alpha=alpha.mc[1:n, ],sigma.alpha=sigma,old.gauss.model.param=old.gauss.model.param)
} else{
xlist <- matched_rdirichlet(n, p, r, q, c.val, alpha.mc[1:n, ])
}
X1 <- xlist$X1
X2 <- xlist$X2
if (model=="gaussian")
sigma.mc<-xlist$sigma.beta
D1 <- dist(X1)
D2 <- dist(X2)
if (verbose) print("random matched pairs generated\n")
#prescaling
if (pre.scaling) {
s <- lm(as.vector(D1) ~ as.vector(D2) + 0)$coefficients
} else {
s <- 1
}
#m pairs of unmatched points
if (model=="gaussian"){
## test observations -- m pairs of matched and m pairs of unmatched
ylist <- matched_rnorm(m, p, q, c.val, r, alpha=alpha.mc[(n+1):(n+m), ],
sigma.alpha=sigma,old.gauss.model.param=old.gauss.model.param, sigma.beta=sigma.mc)
Y2A <- matched_rnorm(m, p, q, c.val, r, alpha=alpha.mc[(n+m+1):(n+m+m), ],
sigma.alpha=sigma,old.gauss.model.param=old.gauss.model.param, sigma.beta=sigma.mc)$X2
} else{
ylist <- matched_rdirichlet(m, p, r, q, c.val, alpha.mc[(n+1):(n+m), ])
Y2A <- matched_rdirichlet(m, p, r, q, c.val, alpha.mc[(n+m+1):(n+m+m), ])$X2
}
Y1 <- ylist$X1
Y20 <- ylist$X2
# Dissimilarity matrices for in-sample +out-of-sample
D10A <- as.matrix(dist(rbind(X1, Y1)))
D20 <- as.matrix(dist(rbind(X2, Y20))) * s
D2A <- as.matrix(dist(rbind(X2, Y2A))) * s
D1<-as.matrix(D1)
D2<-as.matrix(D2)
pom.config<-c()
cca.config<-c()
D2<-D2*s
if (verbose) print("PoM and CCA embedding\n")
if (compare.pom.cca) {
## ==== cca ====
#embed in-sample measurements
if (oos == TRUE) {
if (c.val==0){
if (model=="gaussian"){
X1t <- smacofM(D1,ndim = p,verbose=FALSE)
X2t <- smacofM(D2,ndim = p,verbose=FALSE)
} else{
X1t <- smacofM(D1,ndim = p+1,verbose=TRUE)
X2t <- smacofM(D2,ndim = p+1,verbose=FALSE)
}
} else{
X1t <- smacofM(D=D1,ndim= pprime1,verbose=FALSE)
X2t <- smacofM(D=D2,ndim= pprime2,verbose=FALSE)
}
xcca <- cancor(X1t, X2t)
#project using projection vectors computed by CCA
#if (profile.mode) Rprof("profile-oosMDS.out",append=TRUE)
Y1t <- (oosMDS(D10A, X1t) %*% xcca$xcoef)[, 1:d]
Y20t <- (oosMDS(D20, X2t) %*% xcca$ycoef)[, 1:d]
Y2At <- (oosMDS(D2A, X2t) %*% xcca$ycoef)[, 1:d]
#if (profile.mode) Rprof(NULL)
#cca.config<-rbind(X1t,X2t)
} else {
if (c.val==0){
if (model=="gaussian"){
X1t <- smacofM(D10A, ndim=p,verbose=FALSE)
D20A <-dist(rbind(X2, Y20, Y2A))
X2t <- smacofM(D20A, ndim=p,verbose=FALSE)
}
else{
X1t <- smacofM(D10A, ndim=p+1,verbose=FALSE)
D20A <-dist(rbind(X2, Y20, Y2A))
X2t <- smacofM(D20A, ndim=p+1,verbose=FALSE)
}
} else{
if (model=="gaussian"){
pprime1 <- p+q
pprime2 <- p+q
}
else{
pprime1 <- p+q+2
pprime2 <- p+q+2
}
X1t <- smacofM(D10A, ndim=pprime1,verbose=FALSE,init=cmdscale(D10A,pprime1))
D20A <-dist(rbind(X2, Y20, Y2A))
X2t <- smacofM(D20A, ndim=pprime2,verbose=FALSE,init=cmdscale(D20A,pprime2))
}
if (verbose) print("CCA embedding complete\n")
center1 <- colMeans(X1t[1:n, ]) # column means of training obs
center2 <- colMeans(X2t[1:n, ])
X1t <- X1t - matrix(center1, n+m, pprime1, byrow=TRUE) # column-center training only
X2t <- X2t - matrix(center2, n+2*m, pprime2, byrow=TRUE)
cca <- cancor(X1t[1:n, ], X2t[1:n, ])
Y1t <- (X1t[(n+1):(n+m), ] %*% cca$xcoef )[, 1:d]
Y20t <- (X2t[(n+1):(n+m), ] %*% cca$ycoef)[, 1:d]
Y2At <- (X2t[(n+m+1):(n+2*m), ] %*% cca$ycoef)[, 1:d]
}
T0.cca <- rowSums((Y1t - Y20t)^2)
TA.cca <- rowSums((Y1t - Y2At)^2)
power.cca.mc <- get_power(T0.cca, TA.cca, size)
if (verbose) print("CCA test statistic complete\n")
##low-dimensional (regularized) CCA
## ==== cca ====
#embed in-sample measurements
if (oos == TRUE) {
if (c.val==0){
if (model=="gaussian"){
X1t <- smacofM(D1,ndim = floor((d+p)/2),verbose=FALSE)
X2t <- smacofM(D2,ndim = floor((d+p)/2),verbose=FALSE)
} else{
X1t <- smacofM(D1,ndim = floor((d+p)/2)+1,verbose=TRUE)
X2t <- smacofM(D2,ndim = floor((d+p)/2)+1,verbose=FALSE)
}
} else{
X1t <- smacofM(D=D1,ndim= floor((d+p)/2)+1,verbose=FALSE)
X2t <- smacofM(D=D2,ndim= floor((d+p)/2)+1,verbose=FALSE)
}
xcca <- cancor(X1t, X2t)
#project using projection vectors computed by CCA
#if (profile.mode) Rprof("profile-oosMDS.out",append=TRUE)
Y1t <- (oosMDS(D10A, X1t) %*% xcca$xcoef)[, 1:d]
Y20t <- (oosMDS(D20, X2t) %*% xcca$ycoef)[, 1:d]
Y2At <- (oosMDS(D2A, X2t) %*% xcca$ycoef)[, 1:d]
#if (profile.mode) Rprof(NULL)
#cca.config<-rbind(X1t,X2t)
} else {
if (c.val==0){
if (model=="gaussian"){
X1t <- smacofM(D10A, ndim=floor((d+p)/2),verbose=FALSE)
D20A <-dist(rbind(X2, Y20, Y2A))
X2t <- smacofM(D20A, ndim=floor((d+p)/2),verbose=FALSE)
}
else{
X1t <- smacofM(D10A, ndim= floor((d+p)/2)+1,verbose=FALSE)
D20A <-dist(rbind(X2, Y20, Y2A))
X2t <- smacofM(D20A, ndim= floor((d+p)/2)+1,verbose=FALSE)
}
} else{
if (model=="gaussian"){
pprime1 <- p+q
pprime2 <- p+q
}
else{
pprime1 <- p+q+2
pprime2 <- p+q+2
}
X1t <- smacofM(D10A, ndim=pprime1,verbose=FALSE,init=cmdscale(D10A,pprime1))
D20A <-dist(rbind(X2, Y20, Y2A))
X2t <- smacofM(D20A, ndim=pprime2,verbose=FALSE,init=cmdscale(D20A,pprime2))
}
if (verbose) print("CCA embedding complete\n")
center1 <- colMeans(X1t[1:n, ]) # column means of training obs
center2 <- colMeans(X2t[1:n, ])
X1t <- X1t - matrix(center1, n+m, pprime1, byrow=TRUE) # column-center training only
X2t <- X2t - matrix(center2, n+2*m, pprime2, byrow=TRUE)
cca <- cancor(X1t[1:n, ], X2t[1:n, ])
Y1t <- (X1t[(n+1):(n+m), ] %*% cca$xcoef )[, 1:d]
Y20t <- (X2t[(n+1):(n+m), ] %*% cca$ycoef)[, 1:d]
Y2At <- (X2t[(n+m+1):(n+2*m), ] %*% cca$ycoef)[, 1:d]
}
T0.cca.reg <- rowSums((Y1t - Y20t)^2)
TA.cca.reg <- rowSums((Y1t - Y2At)^2)
power.cca.reg.mc <- get_power(T0.cca.reg, TA.cca.reg, size)
## ==== pom = procrustes o mds ====
if (oos == TRUE) {
#Embed in-sample
X1t <- smacofM(D1, ndim=d,verbose=FALSE)
X2t <- smacofM(D2, ndim=d,verbose=FALSE)
if (verbose) print (colMeans(X1t))
if (verbose) print (colMeans(X2t))
# Compute Proc from in-sample embeddings
proc <- MCMCpack::procrustes(X2t, X1t, dilation=proc.dilation)
# Out-of sample embed and Proc Transform dissimilarities
#if (profile.mode) Rprof("profile-oosMDS.out",append=TRUE)
Y1t <- oosMDS(D10A, X1t)
Y20t <- oosMDS(D20, X2t) %*% proc$R * proc$s
Y2At <- oosMDS(D2A, X2t) %*% proc$R * proc$s
#if (profile.mode) Rprof(NULL)
X2tp<-X2t %*% proc$R * proc$s
pom.config<-rbind(X1t,X2tp)
pom.stress<- sum((as.dist(D1) - dist(X1t))^2)
pom.stress<- pom.stress+ sum((as.dist(D2) - dist(X2tp))^2)
if (verbose) print("PoM embedding complete\n")
} else {
X1t <- smacofM(D10A,ndim= d,verbose=FALSE,init=cmdscale(D10A,d))
D20A <-dist(rbind(X2, Y20, Y2A))
X2t <- smacofM(D20A,ndim= d,verbose=FALSE,init=cmdscale(D20A,d))
center1 <- colMeans(X1t[1:n, ])
center2 <- colMeans(X2t[1:n, ])
X1t <- X1t - matrix(center1, n+m, d, byrow=TRUE) # column-center training only
X2t <- X2t - matrix(center2, n+2*m, d, byrow=TRUE)
proc <- MCMCpack::procrustes(X2t[1:n, ], X1t[1:n, ], dilation=proc.dilation)
Y1t <- X1t[(n+1):(n+m), ]
Y20t <- X2t[(n+1):(n+m), ] %*% proc$R * proc$s
Y2At <- X2t[(n+m+1):(n+2*m), ] %*% proc$R * proc$s
}
T0.pom <- rowSums((Y1t - Y20t)^2)
TA.pom <- rowSums((Y1t - Y2At)^2)
power.pom.mc <- get_power(T0.pom, TA.pom, size)
if (verbose) print("PoM test statistic complete \n")
}
## ==== jofc ====
# Impute "between-condition" dissimilarities from different objects
if (Wchoice == "avg") {
L <- (D1 + D2)/2
} else if (Wchoice == "sqrt") {
L <- sqrt((D1^2 + D2^2)/2)
} else if (Wchoice == "NA+diag(0)") {
L <- matrix(NA,n,n)
diag(L)<- 0
}
if (oos == TRUE) {
#In sample embedding
# Form omnibus dissimilarity matrix
M <- omnibusM(D1, D2, L)
init.conf<-NULL
if (compare.pom.cca) init.conf<- pom.config
# Embed in-sample using different weight matrices (differentw values)
X.embeds<-JOFC.Fid.Commens.Tradeoff(M,d,w.vals,separability.entries.w,init.conf=init.conf,wt.equalize=wt.equalize)
Fid.Err.Term.1 <- X.embeds[[w.max.index+2]]
Fid.Err.Term.2 <- X.embeds[[w.max.index+3]]
Comm.Err.Term <- X.embeds[[w.max.index+4]]
Fid.Err.Sum.Term.1 <- X.embeds[[w.max.index+5]]
Fid.Err.Sum.Term.2 <- X.embeds[[w.max.index+6]]
Comm.Err.Sum.Term <- X.embeds[[w.max.index+7]]
FC.ratio <- X.embeds[[w.max.index+8]]
FC.ratio.2 <- X.embeds[[w.max.index+9]]
FC.ratio.3 <- X.embeds[[w.max.index+10]]
print("Fid.Err.Term.1" )
print(Fid.Err.Term.1 )
print("Comm.Err.Term ")
print(Comm.Err.Term )
min.stress.for.w.val <- X.embeds[[w.max.index+1]]
if (verbose) print("JOFC embeddings complete\n")
#
# OOS Dissimilarity matrices
#
D.oos.1<-dist(Y1)
D.oos.2.null <- dist(Y20)
D.oos.2.alt <- dist(Y2A)
#Imputing dissimilarity entries for OOS
if (Wchoice == "avg") {
L.tilde.null <- (D.oos.1 + D.oos.2.null)/2
L.tilde.alt <- (D.oos.1 + D.oos.2.alt)/2
} else if (Wchoice == "sqrt") {
L.tilde.null <- sqrt((D.oos.1^2 + D.oos.2.null^2)/2)
L.tilde.alt <- sqrt((D.oos.1^2 + D.oos.2.alt^2)/2)
} else if (Wchoice == "NA+diag(0)") {
L.tilde.null <- matrix(NA,m,m)
L.tilde.alt <- matrix(NA,m,m)
diag(L.tilde.null)<- 0
diag(L.tilde.alt)<- 0
}
#Form OOS omnibus matrices
M.oos.0<- omnibusM(D.oos.1,D.oos.2.null, L.tilde.null)
M.oos.A<- omnibusM(D.oos.1,D.oos.2.alt, L.tilde.alt)
for (l in 1:w.max.index){
if (verbose) print("OOS embedding for JOFC for w= \n")
if (verbose) print(w.vals[l])
w.val.l <- w.vals[l]
X <- X.embeds[[l]]
oos.obs.flag<- c(rep(1,2*n),rep(0,2*m))
#Compute Weight matrix corresponding in-sample entries
oos.Weight.mat.1<-w.val.to.W.mat(w.val.l,(2*n),separability.entries.w,wt.equalize)
#Compute Weight matrix corresponding OOS entries
oos.Weight.mat.2<-w.val.to.W.mat(w.val.l,(2*m),separability.entries.w,wt.equalize)
# If assume.matched.for.oos is true, we assume OOS dissimilarities are matched(in reality,
# they are matched for the matched pairs, but unmatched for the unmatched pairs)
# If assume.matched.for.oos is true, we ignore the dissimilarities between matched/unmatched
# pairs
if (!assume.matched.for.oos){
oos.Weight.mat.2[1:m,m+(1:m)]<-0
oos.Weight.mat.2[m+(1:m),(1:m)]<-0
}
# if (oos.use.imputed is true) we treat the dissimiilarities between in-sample and out-of-sample measurements
# from different conditions like fidelity terms
# otherwise they are ignored
if (oos.use.imputed){
oos.Weight.mat.w <- matrix(1-w.val.l,2*n,2*m)
} else{
oos.Weight.mat.w <- rbind(cbind(matrix(1-w.val.l,n,m), matrix(0,n,m) ),
cbind(matrix(0,n,m),matrix(1-w.val.l,n,m))
)
}
oos.Weight.mat<-omnibusM(oos.Weight.mat.1,oos.Weight.mat.2,oos.Weight.mat.w)
# Since we are going to oos-embedding, set the weights of in-sample embedding of stress
# We are using previous in-sample embeddings, anyway
oos.Weight.mat[1:(2*n),1:(2*n)]<-0
if (verbose) print("dim(M.oos.0)")
if (verbose) print(dim(M.oos.0))
if (verbose) print("dim(M.oos.A)")
if (verbose) print(dim(M.oos.A))
if (verbose) print("dim(oos.Weight.mat)")
if (verbose) print(dim(oos.Weight.mat))
if (verbose) print("dim(X)")
if (verbose) print(dim(X))
#if (verbose) {print("oos.obs.flag")
ideal.omnibus.0 <- as.matrix(dist(rbind(X1,X2,Y1,Y20)))
ideal.omnibus.A <- as.matrix(dist(rbind(X1,X2,Y1,Y2A)))
omnibus.oos.D.0 <- omnibusM(M,M.oos.0,ideal.omnibus.0[1:(2*n),(2*n)+(1:(2*m))])
omnibus.oos.D.A <- omnibusM(M,M.oos.A, ideal.omnibus.A[1:(2*n),(2*n)+(1:(2*m))])
oos.Weight.mat[is.na(omnibus.oos.D.0)]<-0
omnibus.oos.D.0[is.na(omnibus.oos.D.0)]<-1
omnibus.oos.D.A[is.na(omnibus.oos.D.A)]<-1
if (verbose) print("JOFC null omnibus OOS embedding \n")
#if (profile.mode) Rprof("profile-oosIM.out",append=TRUE)
Y.0t<-oosIM(D=omnibus.oos.D.0,
X=X,
init = "random",
verbose = FALSE,
itmax = 1000,
eps = 1e-8,
W = oos.Weight.mat,
isWithin = oos.obs.flag,
bwOos = TRUE)
if (verbose) print("JOFC alternative omnibus OOS embedding \n")
Y.At<-oosIM(D=omnibus.oos.D.A,
X=X,
init = "random",
verbose = FALSE,
itmax = 1000,
eps = 1e-8,
W = oos.Weight.mat,
isWithin = oos.obs.flag,
bwOos = TRUE)
#if (profile.mode) Rprof(NULL)
Y1t<-Y.0t[1:m,]
Y2t<-Y.0t[m+(1:m),]
Y1t.A<-Y.At[1:m,]
Y2At<-Y.At[m+(1:m),]
X2tp<-pom.config[n+(1:n),]
X1t<-pom.config[(1:n),]
X.0<-rbind(X1t,X2tp)
X.a<-X[1:n,]
X.b<-X[n+(1:n),]
mean.a <- colMeans(X.a)
mean.b <- colMeans(X.b)
# means[l,]<- c(mean.a,mean.b)
proc.pom2JOFC <- MCMCpack::procrustes(X,X.0,dilation=FALSE,translation=TRUE)
#proc.pom2JOFC.a <- MCMCpack::procrustes(X.a,X1t,dilation=FALSE,translation=TRUE)
#proc.pom2JOFC.b <- MCMCpack::procrustes(X.b,X2tp,dilation=FALSE,translation=TRUE)
#X.c<-rbind(X.a-mean.a,X.b-mean.b)
#proc.pom2JOFC.a <- MCMCpack::procrustes(X.c,X.0,dilation=FALSE,translation=TRUE)
config.mismatch$frob.norm[l] <- norm (proc.pom2JOFC$X.new-X.0,'F')
#config.mismatch[l,2] <- norm (proc.pom2JOFC.a$X.new-X.0,'F')
#config.mismatch[l,3] <- norm (proc.pom2JOFC.b$X.new-X2tp,'F')
# if (verbose) print(means[l,])
T0[l,] <- rowSums((Y1t - Y2t)^2)
TA[l,] <- rowSums((Y1t.A - Y2At)^2)
if (verbose) print("JOFC test statistic complete \n")
power.mcnemar.l <- get_power(T0[l,],TA[l,],level.mcnemar)
if (power.mcnemar.l>power.w.star){
rival.w <- w.vals[l]
power.w.star <- power.mcnemar.l
w.val.rival.idx <- l
}
}
}
else {
M0 <- omnibusM(D10A, D20, W=(D10A+D20)/2)
MA <- omnibusM(D10A, D2A, W=(D10A+D2A)/2)
X0.embeds<-JOFC.Fid.Commens.Tradeoff(M0,d,w.vals,separability.entries.w,wt.equalize=wt.equalize)
XA.embeds<-JOFC.Fid.Commens.Tradeoff(MA,d,w.vals,separability.entries.w,wt.equalize=wt.equalize)
for (l in 1:w.max.index){
X0 <- X0.embeds[[l]]
XA <- XA.embeds[[l]]
T0[l,] <- rowSums((X0[(n+1):(n+m), ] - X0[(n+m+n+1):(n+m+n+m), ])^2)
TA[l,] <- rowSums((XA[(n+1):(n+m), ] - XA[(n+m+n+1):(n+m+n+m), ])^2)
#Not done yet
#if (compare.pom.cca){
#X.0<-rbind(X1t,X2t %*% proc$R * proc$s)
#proc.pom2JOFC <- MCMCpack::procrustes(X,X.0,dilation=FALSE)
#config.mismatch[l] <- norm (proc.pom2JOFC$X.new-X.0,'F')
#}
}
}
# Power comparison test
# In order to compare the best w^* vs w=0.5 in an unbiased way
# re-run the simulation only for w= w^* and w=0.5
# compute the contingency table using those results
#if (power.comparison.test){
## n pairs of matched points
if (model=="gaussian"){
xlist <- matched_rnorm(n, p, q, c.val, r, alpha=alpha.mc[1:n, ],sigma.alpha=sigma,
old.gauss.model.param=old.gauss.model.param, sigma.beta=sigma.mc)
} else{
xlist <- matched_rdirichlet(n, p, r, q, c.val, alpha.mc[1:n, ])
}
X1 <- xlist$X1
X2 <- xlist$X2
D1 <- dist(X1)
D2 <- dist(X2)
if (verbose) print("random matched pairs generated\n")
if (pre.scaling) {
s <- lm(as.vector(D1) ~ as.vector(D2) + 0)$coefficients
} else {
s <- 1
}
if (model=="gaussian"){
## test observations -- m pairs of matched and m pairs of unmatched
ylist <- matched_rnorm(m, p, q, c.val, r, alpha=alpha.mc[(n+1):(n+m), ],sigma.alpha=sigma,
old.gauss.model.param=old.gauss.model.param,sigma.beta=sigma.mc)
Y2A <- matched_rnorm(m, p, q, c.val, r, alpha=alpha.mc[(n+m+1):(n+m+m), ],sigma.alpha=sigma,
old.gauss.model.param=old.gauss.model.param,sigma.beta=sigma.mc)$X2
} else{
ylist <- matched_rdirichlet(m, p, r, q, c.val, alpha.mc[(n+1):(n+m), ])
Y2A <- matched_rdirichlet(m, p, r, q, c.val, alpha.mc[(n+m+1):(n+m+m), ])$X2
}
Y1 <- ylist$X1
Y20 <- ylist$X2
D10A <- as.matrix(dist(rbind(X1, Y1)))
D20 <- as.matrix(dist(rbind(X2, Y20))) * s
D2A <- as.matrix(dist(rbind(X2, Y2A))) * s
D1<-as.matrix(D1)
D2<-as.matrix(D2)
D2<-D2*s
## ==== jofc ====
if (Wchoice == "avg") {
L <- (D1 + D2)/2
} else if (Wchoice == "sqrt") {
L <- sqrt((D1^2 + D2^2)/2)
} else if (Wchoice == "NA+diag(0)") {
L <- matrix(NA,n,n)
diag(L)<- 0
}
if (oos == TRUE) {
#In sample embedding
M <- omnibusM(D1, D2, L)
init.conf<-NULL
if (compare.pom.cca) init.conf<- pom.config
#
# Use only def.w=0.5 and rival.w for w.vals
X.embeds.compare<-JOFC.Fid.Commens.Tradeoff(M,d,c(def.w,rival.w),separability.entries.w,init.conf=init.conf,wt.equalize=wt.equalize)
if (verbose) print("JOFC embeddings complete\n")
#
# OOS Dissimilarity matrices
#
D.oos.1<-dist(Y1)
D.oos.2.null <- dist(Y20) *s
D.oos.2.alt <- dist(Y2A) *s
if (Wchoice == "avg") {
L.tilde.null <- (D.oos.1 + D.oos.2.null)/2
L.tilde.alt <- (D.oos.1 + D.oos.2.alt)/2
} else if (Wchoice == "sqrt") {
L.tilde.null <- sqrt((D.oos.1^2 + D.oos.2.null^2)/2)
L.tilde.alt <- sqrt((D.oos.1^2 + D.oos.2.alt^2)/2)
} else if (Wchoice == "NA+diag(0)") {
L.tilde.null <- matrix(NA,m,m)
L.tilde.alt <- matrix(NA,m,m)
diag(L.tilde.null)<- 0
diag(L.tilde.alt)<- 0
}
M.oos.0<- omnibusM(D.oos.1,D.oos.2.null, L.tilde.null)
M.oos.A<- omnibusM(D.oos.1,D.oos.2.alt, L.tilde.alt)
for (l in 1:2){
if (verbose) print(paste(rival.w))
if (l==1){
w.val.l <- def.w
}
else {
w.val.l <- rival.w
}
X<-X.embeds.compare[[l]]
oos.obs.flag<- c(rep(1,2*n),rep(0,2*m))
oos.Weight.mat.1<-w.val.to.W.mat(w.val.l,(2*n),separability.entries.w,wt.equalize)
oos.Weight.mat.2<-w.val.to.W.mat(w.val.l,(2*m),separability.entries.w,wt.equalize)
if (!assume.matched.for.oos){
oos.Weight.mat.2[1:m,m+(1:m)]<-0
oos.Weight.mat.2[m+(1:m),(1:m)]<-0
}
if (oos.use.imputed){
oos.Weight.mat.w <- matrix(1-w.val.l,2*n,2*m)
} else{
oos.Weight.mat.w <- rbind(cbind(matrix(1-w.val.l,n,m), matrix(0,n,m) ),
cbind(matrix(0,n,m),matrix(1-w.val.l,n,m))
)
}
oos.Weight.mat<-omnibusM(oos.Weight.mat.1,oos.Weight.mat.2,oos.Weight.mat.w)
oos.Weight.mat[1:(2*n),1:(2*n)]<-0
ideal.omnibus.0 <- as.matrix(dist(rbind(X1,X2,Y1,Y20)))
ideal.omnibus.A <- as.matrix(dist(rbind(X1,X2,Y1,Y2A)))
omnibus.oos.D.0 <- omnibusM(M,M.oos.0, ideal.omnibus.0[1:(2*n),(2*n)+(1:(2*m))])
omnibus.oos.D.A <- omnibusM(M,M.oos.A, ideal.omnibus.A[1:(2*n),(2*n)+(1:(2*m))])
oos.Weight.mat[is.na(omnibus.oos.D.0)]<-0
omnibus.oos.D.0[is.na(omnibus.oos.D.0)]<-1
omnibus.oos.D.A[is.na(omnibus.oos.D.A)]<-1
if (verbose) print("JOFC null omnibus OOS embedding \n")
# if (profile.mode) Rprof("profile-oosIM.out",append=TRUE)
Y.0t<-oosIM(D=omnibus.oos.D.0,
X=X,
init = "random",
verbose = FALSE,
itmax = 1000,
eps = 1e-8,
W = oos.Weight.mat,
isWithin = oos.obs.flag,
bwOos = TRUE)
if (verbose) print("JOFC alternative omnibus OOS embedding \n")
Y.At<-oosIM(D=omnibus.oos.D.A,
X=X,
init = "random",
verbose = FALSE,
itmax = 1000,
eps = 1e-8,
W = oos.Weight.mat,
isWithin = oos.obs.flag,
bwOos = TRUE)
#if (profile.mode) Rprof(NULL)
Y1t<-Y.0t[1:m,]
Y2t<-Y.0t[m+(1:m),]
Y1t.A<-Y.At[1:m,]
Y2At<-Y.At[m+(1:m),]
T0.best.w[l,] <- rowSums((Y1t - Y2t)^2)
TA.best.w[l,] <- rowSums((Y1t.A - Y2At)^2)
if (verbose) print("JOFC test statistic complete \n")
}
}
w.val.def.idx <- which(w.vals==def.w)
w.val.rival.idx<- which(w.vals==rival.w)
crit.value<-get_crit_val(T0.best.w[1,],level.mcnemar)
crit.value.2<-get_crit_val(T0.best.w[2,],level.mcnemar)
if (verbose){
print("crit.values")
print(crit.value)
print(crit.value.2)
}
cont.table[1,1] <- sum(T0.best.w[1,]<=crit.value & T0.best.w[2,]<=crit.value.2) +
sum(TA.best.w[1,]>crit.value & TA.best.w[2,]>crit.value.2)
cont.table[1,2] <- sum(T0.best.w[1,]>crit.value & T0.best.w[2,]<=crit.value.2) +
sum(TA.best.w[1,]<=crit.value & TA.best.w[2,]>crit.value.2)
cont.table[2,1] <- sum(T0.best.w[1,]<=crit.value & T0.best.w[2,]>crit.value.2) +
sum(TA.best.w[1,]>crit.value & TA.best.w[2,]<=crit.value.2)
cont.table[2,2] <- sum(T0.best.w[1,]>crit.value & T0.best.w[2,]>crit.value.2) +
sum(TA.best.w[1,]<=crit.value & TA.best.w[2,]<=crit.value.2)
if (verbose) print("Cont table computed \n")
if (verbose) print(cont.table)
# }
for (l in 1:w.max.index){
power.mc[l, ] <- get_power(T0[l,], TA[l,], size)
}
FidComm.Terms<- list(F1=Fid.Err.Term.1,F2=Fid.Err.Term.2,C=Comm.Err.Term)
FidComm.Sum.Terms <- list(F1=Fid.Err.Sum.Term.1,F2=Fid.Err.Sum.Term.2,C=Comm.Err.Sum.Term)
if (verbose) print(str(FidComm.Terms))
if (verbose) print("FC.ratio")
if (verbose) print(str(FC.ratio))
if (verbose) print("FC.ratio.2")
if (verbose) print(str(FC.ratio.2))
if (verbose) print("FC.ratio.3")
if (verbose) print(str(FC.ratio.3))
print("end run.mc.replicate")
list(power.mc=power.mc,power.cmp=list(cca = power.cca.mc,pom = power.pom.mc,cca.reg =power.cca.reg.mc), cont.tables=cont.table,
config.dist= config.mismatch, min.stress=c(min.stress.for.w.val,pom.stress),means=means,FidComm.Terms=FidComm.Terms,
FidComm.Sum.Terms = FidComm.Sum.Terms,F.to.C.ratio = FC.ratio, wtF.to.C.ratio=FC.ratio.2,
F.bar.to.C.bar.ratio= FC.ratio.3,optim.power=optim.power
)
}
w.val.to.W.mat<-function(w,n,sep.err.w,wt.equalize){
Weight.Mat<-matrix(1-w,n,n)
num.pt.pairs<- n/2
commens.entries <- cbind(1:num.pt.pairs,num.pt.pairs+(1:num.pt.pairs))
commens.entries <- rbind(commens.entries,cbind(num.pt.pairs+1:num.pt.pairs,1:num.pt.pairs))
correction.factor <- 1
if (sep.err.w==FALSE){
Weight.Mat[1:num.pt.pairs,][,num.pt.pairs+(1:num.pt.pairs)]<- 0
Weight.Mat[num.pt.pairs+(1:num.pt.pairs),][,(1:num.pt.pairs)]<- 0
correction.factor<-(1/2)*(n-2)
}
else {correction.factor<-(n-2)
}
if (wt.equalize==FALSE)
correction.factor <- 1
diag(Weight.Mat)<-0
normalized.w<- w*correction.factor
weights.sum <- normalized.w+(1-w)
Weight.Mat[commens.entries]<-normalized.w
Weight.Mat <- Weight.Mat / weights.sum
return(Weight.Mat)
}
#JOFC.data.test.stats()
JOFC.Fid.Commens.Tradeoff <-function(D,ndimens,w.vals,sep.err.w,init.conf,wt.equalize){
# if (profile.mode) Rprof("JOFC.FC.out",append=TRUE)
n<- nrow(D)
smacof.embed<-list()
stress.vec<-c()
comm.sum.vec<-c()
fid1.sum.vec<-c()
fid2.sum.vec<-c()
comm.vec<-c()
fid1.vec<-c()
fid2.vec<-c()
half.n<- n/2
for (w in w.vals){
Weight.Mat<-w.val.to.W.mat(w,n,sep.err.w,wt.equalize)
Weight.Mat[is.na(D)]<-0
D[is.na(D)] <-1
new.embed <- smacofM(D,ndimens , W=Weight.Mat ,
init = init.conf,
verbose = FALSE,
itmax = 1000,
eps = 1e-6)
smacof.embed<-c(smacof.embed,list(new.embed ))
stress.mat <- (as.dist(D) - dist(new.embed))^2
comm.term <- 0
fid.term.1 <-0
fid.term.2 <-0
for (i in 1:(half.n-1)) {
comm.term <- comm.term + (stress.mat [n*(i-1) - i*(i-1)/2 + half.n])
for (j in (i+1):half.n) {
fid.term.1 <- fid.term.1 + (stress.mat [n*(i-1) - i*(i-1)/2 + j-i])
}
}
i <- half.n
comm.term <- comm.term + (stress.mat [n*(i-1) - i*(i-1)/2 + half.n])
for (i in (half.n+1):(n-1)) {
for (j in (i+1):n) {
fid.term.2 <- fid.term.2 + (stress.mat [n*(i-1) - i*(i-1)/2 + j-i])
}
}
fid1.sum.vec <- c(fid1.sum.vec,fid.term.1)
fid2.sum.vec <- c(fid2.sum.vec,fid.term.2)
comm.sum.vec <- c(comm.sum.vec,comm.term)
stress.mat<-as.dist(Weight.Mat) * stress.mat
stress <- sum(stress.mat)
stress.vec<-c(stress.vec,stress)
num.fid.terms<-half.n*(half.n-1)/2
fid1.vec <- c(fid1.vec,fid.term.1/num.fid.terms)
fid2.vec <- c(fid2.vec,fid.term.2/num.fid.terms)
comm.vec <- c(comm.vec,comm.term/half.n)
}
FC.ratio <- (fid1.sum.vec+fid2.sum.vec)/comm.sum.vec
FC.ratio.2 <- ((1-w.vals)/w.vals)*(fid1.sum.vec+fid2.sum.vec)/comm.sum.vec
FC.ratio.3 <- (fid1.vec + fid2.vec) / comm.vec
smacof.embed<-c(smacof.embed,list(stress.vec),list(fid1.vec),list(fid2.vec),list(comm.vec) ,
list(fid1.sum.vec),list(fid2.sum.vec),list(comm.sum.vec),list(FC.ratio),list(FC.ratio.2),list(FC.ratio.3))
#print("length(smacof.embed)")
#print(length(smacof.embed))
#print(sum(smacof.embed[[1]]-smacof.embed[[2]]))
#print(sum(smacof.embed[[1]]-smacof.embed[[length(w.vals)]]))
# if (profile.mode) Rprof(NULL)
return(smacof.embed)
}
matched_rnorm_old_form<- function(n, p, q, c, r, alpha,sigma.alpha) {
## Return n pairs of matched Normal distributed random vectors, given by
## X_{ik} ~ (1-c) Norm(alpha[i] ,I) + c Norm(0,SIGMA+I), i = 1, ..., n; k = 1, 2,
## where alpha[i] gaussian distributed common means, Norm(0,SIGMA+I)is gaussian
## noise on R^q
signal1 <- matrix(0, n, p)
signal2 <- matrix(0, n, p)
sigma.beta<- diag(p)
sigma.eta <- diag(p)
for (i in 1:n) {
signal1[i, ] <- mvrnorm(1, alpha[i,],sigma.beta)
signal2[i, ] <- mvrnorm(1, alpha[i,],sigma.eta)
}
sigma.X <-sigma.alpha+sigma.beta
sigma.Y <-sigma.alpha+sigma.eta
# eig.1 <- eigen(sigma.X)
# eig.2 <- eigen(sigma.Y)
noise.sigma.1<- Posdef(q,
max(eigen(sigma.alpha, symmetric=TRUE, only.values = TRUE)$values))
noise.sigma.2<- noise.sigma.1
noise1 <- mvrnorm(n, rep(0,q), noise.sigma.1)
noise2 <- mvrnorm(n, rep(0,q), noise.sigma.2)
if (c == 0) {
return(list(X1=signal1, X2=signal2))
} else {
return(list(X1=cbind((1-c)*signal1, c*noise1),
X2=cbind((1-c)*signal2, c*noise2)))
}
}
matched_rnorm<- function(n, p, q, c, r, alpha,sigma.alpha,old.gauss.model.param,sigma.beta=NULL) {
if (old.gauss.model.param) return (matched_rnorm_old_form(n,p,q,c,r,alpha,sigma.alpha))
## Return n pairs of matched Normal distributed random vectors, given by
## X_{ik} ~ (1-c) Norm(alpha[i] ,I/r) + c Norm(0,(1+1/r)I), i = 1, ..., n; k = 1, 2,
## where alpha[i] gaussian distributed common means, Norm(I(1+1/r)) is gaussian
## noise on R^q
signal1 <- matrix(0, n, p)
signal2 <- matrix(0, n, p)
if (is.null(sigma.beta))
sigma.beta<- Posdef(p,1/r)
sigma.eta <- sigma.beta
for (i in 1:n) {
signal1[i, ] <- mvrnorm(1, alpha[i,],sigma.beta)
signal2[i, ] <- mvrnorm(1, alpha[i,],sigma.eta)
}
noise.sigma.1<- (1+1/r)*diag(q)
noise.sigma.2<- noise.sigma.1
noise1 <- matrix(mvrnorm(n, rep(0,q), noise.sigma.1),n,q)
noise2 <- matrix(mvrnorm(n, rep(0,q), noise.sigma.2),n,q)
if (c == 0) {
return(list(X1=signal1, X2=signal2,sigma.beta=sigma.beta))
} else {
return(list(X1=cbind((1-c)*signal1, c*noise1),
X2=cbind((1-c)*signal2, c*noise2)
,sigma.beta=sigma.beta
)
)
}
}
matched_rdirichlet <- function(n, p, r, q, c, alpha) {
## Return n pairs of matched Dirichlet distribtued random vectors, given by
## X_{ik} ~ (1-c) Dir(r alpha[i] + 1) + c Dir(1), i = 1, ..., n; k = 1, 2,
## where alpha are given, Dir(r alpha[i] + 1) is on Delta^p, Dir(1) is uniform
## noise on Delta^q
signal1 <- matrix(0, n, p+1)
signal2 <- matrix(0, n, p+1)
for (i in 1:n) {
signal1[i, ] <- rdirichlet(1, r*alpha[i, ]+1)
signal2[i, ] <- rdirichlet(1, r*alpha[i, ]+1)
}
noise1 <- rdirichlet(n, rep(1, q+1))
noise2 <- rdirichlet(n, rep(1, q+1))
if (c == 0) {
return(list(X1=signal1, X2=signal2))
} else {
return(list(X1=cbind((1-c)*signal1, c*noise1),
X2=cbind((1-c)*signal2, c*noise2)))
}
}
get_crit_val<- function(T0,size)
{
n <- length(T0)
T0 <- sort(T0)
return(T0[round(n*(1-size))])
}
get_power <- function(T0, TA, size)
## T0: values of test statistic under H0
## TA: values of test statistic under HA
{
n <- length(T0)
m <- length(size)
T0 <- sort(T0)
power <- rep(0, m)
for (i in 1:m) {
if (size[i] == 0) {
power[i] <- 0
} else if(size[i] == 1) {
power[i] <- 1
} else {
power[i] <- sum(TA > T0[round(n*(1-size[i]))]) / n
}
}
power
}
omnibusM <- function(D1, D2, W)
{
D1 <- as.matrix(D1)
D2 <- as.matrix(D2)
W <- as.matrix(W)
rbind(cbind(D1, W), cbind(t(W), D2))
}
plot.MC.evalues.with.CI<-function(evalues.mc,plot.title,plot.col,conf.int=TRUE,add=FALSE){
num.sims<-dim(evalues.mc)[1]
evalue.count <- dim(evalues.mc)[2]
fp.points <- 1:evalue.count
num.plot.points <-evalue.count
y.points<-colMeans(evalues.mc,na.rm=TRUE)
var.y.points <-rep (0,num.plot.points)
valid.sample.count <- rep (0,num.plot.points)
for (i in 1:num.sims){
err.points <- evalues.mc[i,]-y.points
err.points <- err.points^2
for (j in 1:num.plot.points){
if (!is.na(evalues.mc[i,j])){
var.y.points[j] <- var.y.points[j] + err.points[j]
valid.sample.count[j] <- valid.sample.count[j] + 1
}
}
}
var.y.points <- var.y.points/valid.sample.count
std.y.points <- 2*sqrt(var.y.points)
ucl <- y.points+std.y.points
lcl <- y.points-std.y.points
if (add){
lines(x=fp.points,y= y.points,main=plot.title,
xlab="Eigenvalues",col=plot.col,xlim=c(0,1),ylim=c(0,1),lwd=2.5)
}
else{
plot(x=fp.points,y= y.points,main=plot.title,
xlab="Eigenvalues",ylab="",col=plot.col,xlim=c(0,1),ylim=c(0,1),type='l',lwd=2.5)
}
if (conf.int){
arrows(fp.points,ucl,fp.points,lcl,length=.05,angle=90,code=3, lty=3,col=plot.col)
}
par(lty=3)
abline(0,1,col="blue")
par(lty=1)
}
plot.ROC.with.CI<-function(plot.roc.points,plot.title,plot.col,conf.int=TRUE,add=FALSE,fp.points=seq(0,1,0.01),
ispowercurve=TRUE,linewd=2.5,xlim=1,ylim=1){
num.sims<-dim(plot.roc.points)[1]
fp.points <- fp.points[fp.points<=xlim]
num.x.pts <- length(fp.points)
plot.roc.points <- plot.roc.points[,1:num.x.pts]
y.points<-colMeans(plot.roc.points,na.rm=TRUE)
var.y.points <-rep (0,length(fp.points))
var.y.points <- colVars(plot.roc.points,na.rm=TRUE)
std.y.points <- 2*sqrt(var.y.points)
ucl <- y.points+std.y.points
lcl <- y.points-std.y.points
#if (is.finite(max(y.points))){
# if (max(ucl) < ylim)
# ylim <- max(y.points)
#}
if (add){
lines(x=fp.points,y= y.points,main=plot.title,
xlab=expression(alpha),ylab=expression(beta),col=plot.col,xlim=c(0,xlim),ylim=c(0,1),lwd=linewd)
}
else{
plot(x=fp.points,y= y.points,main=plot.title,
xlab=expression(alpha),ylab=expression(beta),col=plot.col,xlim=c(0,xlim),ylim=c(0,1),type='l',lwd=linewd)
}
if (conf.int){
arrows(fp.points,ucl,fp.points,lcl,length=.05,angle=90,code=3, lty=3,col=plot.col)
}
par(lty=3)
abline(0,1,col="blue")
par(lty=1)
}
plot.graph.with.CI<-function(plot.roc.points,plot.title,plot.col,conf.int=TRUE,add=FALSE,fp.points=seq(0,1,0.01),customx.labels=NULL,customy.labels=NULL,ispowercurve=TRUE){
standardx.axis <- FALSE
standardy.axis <- FALSE
if (is.null(customx.labels))
standardx.axis<-TRUE
if (is.null(customy.labels))
standardy.axis<-TRUE
num.sims<-dim(plot.roc.points)[1]
y.points<-colMeans(plot.roc.points,na.rm=TRUE)
var.y.points <-rep (0,length(fp.points))
var.y.points <- colVars(plot.roc.points,na.rm=TRUE)
std.y.points <- 2*sqrt(var.y.points)
ucl <- y.points+std.y.points
lcl <- y.points-std.y.points
if (add){
lines(x=fp.points,y= y.points,main=plot.title,
col=plot.col,xaxt=ifelse(standardx.axis,"s","n"),
yaxt=ifelse(standardy.axis,"s","n"), lwd=2.5,xlab="",ylab="")
}
else{
plot(x=fp.points,y= y.points,main=plot.title,xaxt=ifelse(standardx.axis,"s","n"),
yaxt=ifelse(standardy.axis,"s","n"), col=plot.col,type='l',lwd=2.5,xlab="",ylab="")
}
if (!standardx.axis)
axis(1, at=fp.points,labels=customx.labels)
if (!standardy.axis)
axis(2, at=y.points,labels=customy.labels)
if (conf.int){
arrows(fp.points,ucl,fp.points,lcl,length=.05,angle=90,code=3, lty=3,col=plot.col)
}
par(lty=1)
}
get_epsilon_c <- function(X, Y)
## Return commensurability error
{
sum((X - Y)^2) / nrow(X)
}
get_epsilon_f <- function(D, DX)
## Return fidelity error
{
mean((as.dist(D) - as.dist(DX))^2)
}
get_epsilon <- function(D1, D1X, D2, D2X, X1t, X2t)
{
c(get_epsilon_f(D1, D1X),
get_epsilon_f(D2, D2X),
get_epsilon_c(X1t, X2t))
}
get_power <- function(T0, TA, size)
## T0: values of test statistic under H0
## TA: values of test statistic under HA
{
n <- length(T0)
m <- length(size)
T0 <- sort(T0)
power <- rep(0, m)
for (i in 1:m) {
if (size[i] == 0) {
power[i] <- 0
} else if(size[i] == 1) {
power[i] <- 1
} else {
power[i] <- sum(TA > T0[round(n*(1-size[i]))]) / n
}
}
power
}
weight <- function(n, c = 1)
## Create the weight matrix for W=diag(0)+NA
{
rbind(cbind(matrix(1,n,n), c*diag(n)), cbind(c*diag(n), matrix(0,n,n)))
}
grassmannian <- function(Q1, Q2) {
## Q1 and Q2 are two pxd projection matrices
svd(Q1 - Q2)$d[1]
}
## theta <- acos(svd(t(Q1)%*%Q2)$d)
##
## then geodesic distance is sqrt(sum(theta^2)) (and there are a
## boatload of other distances computable from theta).
geo_dist <- function(Q1, Q2) {
theta <- acos(svd(t(Q1) %*% Q2)$d)
sqrt(sum(theta^2))
## sum(theta^2)
}
Haursdorf_dist <- function(Q1,Q2)
{
sin(geo_dist(Q1,Q2)/2)
}
Posdef <- function (dim, maxev = 1)
## Generating a random positive-definite matrix
## Eigenvalues are generated from uniform(0, maxev)
{
ev = runif(dim-1, 0, maxev)
ev <- c(ev,maxev)
Z <- matrix(ncol=dim, rnorm(dim^2))
decomp <- qr(Z)
Q <- qr.Q(decomp)
R <- qr.R(decomp)
d <- diag(R)
ph <- d / abs(d)
O <- Q %*% diag(ph)
Z <- t(O) %*% diag(ev) %*% O
return(Z)
}
## polarity <- function(X, Xstar)
## ## Change the signs of each column of X to best match Xstar
## ## in the sum of squared difference sense
## ##
## ## Return a ncol(X) by ncol(X) diagonal matrix Q, with {1, -1}
## ## entries, such that ||XQ - Xstar|| is minimized
## {
## d <- ncol(X)
## ss <- rep(0, 2^d)
## diag.entries <- as.matrix(expand.grid(lapply(1:d, function(i) c(1,-1))))
## for (i in 1:2^d) {
## ss[i] <- sum((X %*% diag(diag.entries[i, ]) - Xstar)^2)
## }
## diag(diag.entries[which.min(ss), ])
## }
polarity <- function(X, Xstar)
## Change the signs of each column of X to best match Xstar
## in the sum of squared difference sense
##
## Return a ncol(X) by ncol(X) diagonal matrix Q, with {1, -1}
## entries, such that ||XQ - Xstar|| is minimized
{
d <- ncol(X)
diagv <- rep(1L, d)
for (i in 1:d) {
if (sum((X[, i] - Xstar[, i])^2) > sum((-X[, i] - Xstar[, i])^2))
diagv[i] <- -1L
}
diag(diagv)
}
impute_dYX <- function(D, W, dYX, k=3) {
## get imputed distances between oos objects Y and within-sample
## objects X of different conditions. That is, d(Y2, X1) or d(Y1, X2)
##
## D = dist(X1) or dist(X2)
## W = imputed distance between X1 and X2
## dYX = dist(Y1, X1) or dist(Y2, X2)
D <- as.matrix(D)
W <- as.matrix(W)
n <- ncol(D)
ord <- t(apply(dYX, 1, function(dyx) order(dyx, rnorm(n))))[, 1:k]
imputed.dYX <- t(apply(ord, 1, function(ii) colMeans(W[ii, ])))
imputed.dYX
}
generateX <- function(alpha, r, q, c) {
## Dir(r alpha_i + 1) is on Delta^p, p=ncol(alpha)
## Consider uniform noise Dir(1) on Delta^q
## X_{ik} ~ (1-c) Dir(r alpha_i + 1) + c Dir(1)
n <- nrow(alpha)
p <- ncol(alpha)
Signal1 <- matrix(0, n, p)
Signal2 <- matrix(0, n, p)
for (i in 1:n) {
Signal1[i, ] <- rdirichlet(1, r*alpha[i, ]+1)
Signal2[i, ] <- rdirichlet(1, r*alpha[i, ]+1)
}
Noise1 <- rdirichlet(n, rep(1, q+1))
Noise2 <- rdirichlet(n, rep(1, q+1))
if (c == 0) {
return(list(X1=Signal2, X2=Signal2))
} else {
return(list(X1=cbind((1-c)*Signal1, c*Noise1),
X2=cbind((1-c)*Signal2, c*Noise2)))
}
}
colVars <- function(x, na.rm=FALSE, dims=1, unbiased=TRUE, SumSquares=FALSE,
twopass=FALSE) {
if (SumSquares) return(colSums(x^2, na.rm, dims))
N <- colSums(!is.na(x), FALSE, dims)
Nm1 <- if (unbiased) N-1 else N
if (twopass) {x <- if (dims==length(dim(x))) x - mean(x, na.rm=na.rm) else
sweep(x, (dims+1):length(dim(x)), colMeans(x,na.rm,dims))}
(colSums(x^2, na.rm, dims) - colSums(x, na.rm, dims)^2/N) / Nm1
}
ThreewayMDS.Embed.Hyp.Test <- function(D1,D2,X1,X2,Y1,Y20,Y2A,model,ndim){
Threeway.Embed<- smacofIndDiff(delta=list(D1,D2), ndim = d, weightmat = NULL, init = NULL, metric = TRUE,
ties = "primary", constraint = model, verbose = FALSE, modulus = 1,
itmax = 1000, eps = 1e-6)
X1t <- Threeway.Embed$conf[[1]]
X2t <- Threeway.Embed$conf[[2]]
Y1t <- oosMDS(dist(rbind(X1, Y1)), X1t)%*% (solve(Threeway.Embed$cweights[[1]])) # Check row column matchup
Y20t <- oosMDS(dist(rbind(X2, Y20)), X2t) %*% (solve(Threeway.Embed$cweights[[2]]))
Y2At <- oosMDS(dist(rbind(X2, Y2A)), X2t) %*% (solve(Threeway.Embed$cweights[[2]]))
T0 <- rowSums((Y1t - Y20t)^2)
TA <- rowSums((Y1t - Y2At)^2)
return (list(T0=T0,TA=TA))
}
sign.test.cont.table <-function(cont.table.list){
suc<-0
num.trials <-0
for (tab in cont.table.list){
if (tab[1,2]>tab[2,1]){
suc <- suc + 1
}
num.trials <- num.trials+1
}
fail<-length(cont.table.list)-suc
return(binom.test(suc,num.trials,p=0.5,alternative="greater"))
}
sign.rank.sum.test.cont.table <-function(cont.table.list){
num.trials <- length(cont.table.list)
x<-rep(0,num.trials)
y<-rep(0,num.trials)
i<-1
for (tab in cont.table.list){
x[i]<-tab[1,2]
y[i]<-tab[2,1]
i <- i + 1
}
return(wilcox.test(x,y,paired=TRUE,alternative="greater"))
}
binom.out <-function(cont.table.list){
num.trials <- length(cont.table.list)
binomial.v<-rep(0,num.trials)
i<-1
for (tab in cont.table.list){
if (tab[1,2]>tab[2,1]){
binomial.v[i] <- 1
}
i<- i+1
}
return (binomial.v)
}
omnibusM.inoos <- function(D1, D2, W)
{
D1 <- as.matrix(D1)
D2 <- as.matrix(D2)
W <- as.matrix(W)
rbind(cbind(D1, W), cbind(W, D2))
} |
8ae944b06f6c9d157178fb61a1918299a5c7c6ce | 36d3b6f2349ebdad12a996acfc21090130695a1b | /man/Kmeans.Anal.Rd | cd826324de63ae9381b222f16b42107fa2b3364c | [] | no_license | flajole/MApckg | da7be5c41e13cbf5c03b100e40ac0a1521306d34 | 229959e1b9e76034411dc8513cd5f7e9e63c3ef0 | refs/heads/master | 2021-01-18T16:36:38.247786 | 2016-02-10T14:11:23 | 2016-02-10T14:11:23 | 41,876,214 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 730 | rd | Kmeans.Anal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Cluster_Kmeans.R
\name{Kmeans.Anal}
\alias{Kmeans.Anal}
\title{K-means analysis}
\usage{
Kmeans.Anal(dataSet, analSet, clust.num = 3)
}
\arguments{
\item{dataSet}{List, data set object generated by \code{\link[MSdata]{MS_to_MA}} function.}
\item{analSet}{List, containing the results of statistical analysis (can be just an empty list).}
\item{clust.num}{The cluster number.}
}
\value{
Native \code{analSet} with one added \code{$kmeans} element containing
standard \code{\link[stats]{kmeans}} output.
}
\description{
Perform K-means analysis. Uses \code{\link[stats]{kmeans}} function.
}
\seealso{
\code{\link{PlotKmeans}} for plotting functions
}
|
99e2255ac4cb38a23d82663638b6ab6c841416b4 | a17cf22be2304c96d267fc1b68db7b7279c4a293 | /R/gaussian.R | 7d0c553a1a4a77c66bb4564c0d3e7f7692c2b16c | [] | no_license | robertdouglasmorrison/DuffyTools | 25fea20c17b4025e204f6adf56c29b5c0bcdf58f | 35a16dfc3894f6bc69525f60647594c3028eaf93 | refs/heads/master | 2023-06-23T10:09:25.713117 | 2023-06-15T18:09:21 | 2023-06-15T18:09:21 | 156,292,164 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,775 | r | gaussian.R | # gaussian.R - a set of probability density functions for peak fitting
gaussian <- function( x, center=0, width=1, height=NULL, floor=0) {
# adapted from Earl F. Glynn; Stowers Institute for Medical Research, 2007
twoVar <- 2 * width * width
sqrt2piVar <- sqrt( pi * twoVar)
y <- exp( -( x - center)^2 / twoVar) / sqrt2piVar
# by default, the height is such that the curve has unit volume
if ( ! is.null (height)) {
scalefactor <- sqrt2piVar
y <- y * scalefactor * height
}
y + floor
}
fit.gaussian <- function( x, y, start.center=NULL, start.width=NULL, start.height=NULL,
start.floor=NULL, fit.floor=FALSE) {
# try to find the best gaussian to fit the given data
# make some rough estimates from the values of Y
who.max <- which.max(y)
if ( is.null( start.center)) start.center <- x[ who.max]
if ( is.null( start.height)) start.height <- y[ who.max]
if ( is.null( start.width)) start.width <- sum( y > (start.height/2)) / 2
# call the Nonlinear Least Squares, either fitting the floor too or not
controlList <- nls.control( maxiter=100, minFactor=1/512, warnOnly=TRUE)
if ( ! fit.floor) {
starts <- list( "center"=start.center, "width"=start.width, "height"=start.height)
nlsAns <- try( nls( y ~ gaussian( x, center, width, height), start=starts, control=controlList))
} else {
if (is.null( start.floor)) start.floor <- quantile( y, seq(0,1,0.1))[2]
starts <- list( "center"=start.center, "width"=start.width, "height"=start.height,
"floor"=start.floor)
nlsAns <- try( nls( y ~ gaussian( x, center, width, height, floor), start=starts, control=controlList))
}
# package up the results to pass back
if ( class( nlsAns) == "try-error") {
centerAns <- start.center
widthAns <- start.width
heightAns <- start.height
floorAns <- if ( fit.floor) start.floor else 0
yAns <- gaussian( x, centerAns, widthAns, heightAns, floorAns)
residualAns <- y - yAns
} else {
coefs <-coef(nlsAns)
centerAns <- coefs[1]
widthAns <- coefs[2]
heightAns <- coefs[3]
floorAns <- if ( fit.floor) coefs[4] else 0
yAns <- fitted( nlsAns)
residualAns <- residuals( nlsAns)
}
# always report the SD as a possitive value
widthAns <- abs( widthAns)
out <- list( "center"=centerAns, "width"=widthAns, "height"=heightAns, "y"=yAns,
"residual"=residualAns)
if ( fit.floor) {
out <- c( out, "floor"=floorAns)
}
return( out)
}
lorentzian <- function( x, center=0, width=1, height=NULL, floor=0) {
widSq <- width * width
y <- width / ( pi * (( x - center)^2 + widSq))
# by default, the height is such that the curve has unit volume
if ( ! is.null (height)) {
scalefactor <- pi * width
y <- y * scalefactor * height
}
y + floor
}
fit.lorentzian <- function( x, y, start.center=NULL, start.width=NULL, start.height=NULL,
start.floor=NULL, fit.floor=FALSE) {
# try to find the best lorentzian to fit the given data
# make some rough estimates from the values of Y
who.max <- which.max(y)
if ( is.null( start.center)) start.center <- x[ who.max]
if ( is.null( start.height)) start.height <- y[ who.max]
if ( is.null( start.width)) start.width <- sum( y > (start.height/2)) / 2
# call the Nonlinear Least Squares, either fitting the floor too or not
controlList <- nls.control( maxiter=100, minFactor=1/512, warnOnly=TRUE)
if ( ! fit.floor) {
starts <- list( "center"=start.center, "width"=start.width, "height"=start.height)
nlsAns <- try( nls( y ~ lorentzian( x, center, width, height), start=starts, control=controlList))
} else {
if (is.null( start.floor)) start.floor <- quantile( y, seq(0,1,0.1))[2]
starts <- list( "center"=start.center, "width"=start.width, "height"=start.height,
"floor"=start.floor)
nlsAns <- try( nls( y ~ lorentzian( x, center, width, height, floor), start=starts, control=controlList))
}
# package up the results to pass back
if ( class( nlsAns) == "try-error") {
centerAns <- start.center
widthAns <- start.width
heightAns <- start.height
floorAns <- if ( fit.floor) start.floor else 0
yAns <- lorentzian( x, centerAns, widthAns, heightAns, floorAns)
residualAns <- y - yAns
} else {
coefs <-coef(nlsAns)
centerAns <- coefs[1]
widthAns <- coefs[2]
heightAns <- coefs[3]
floorAns <- if ( fit.floor) coefs[4] else 0
yAns <- fitted( nlsAns)
residualAns <- residuals( nlsAns)
}
# always report the SD as a possitive value
widthAns <- abs( widthAns)
out <- list( "center"=centerAns, "width"=widthAns, "height"=heightAns, "y"=yAns,
"residual"=residualAns)
if ( fit.floor) {
out <- c( out, "floor"=floorAns)
}
return( out)
}
gumbel <- function( x, center=0, width=1, height=NULL, floor=0) {
terms <- ( x - center) / width
expTerms <- exp( terms)
y <- exp( terms - expTerms) / width
# by default, the height is such that the curve has unit volume
if ( ! is.null (height)) {
scalefactor <- exp(1) * width
y <- y * scalefactor * height
}
y + floor
}
fit.gumbel <- function( x, y, start.center=NULL, start.width=NULL, start.height=NULL,
start.floor=NULL, fit.floor=FALSE) {
# try to find the best gumbel to fit the given data
# make some rough estimates from the values of Y
who.max <- which.max(y)
if ( is.null( start.center)) start.center <- x[ who.max]
if ( is.null( start.height)) start.height <- y[ who.max]
if ( is.null( start.width)) start.width <- sum( y > (start.height/2)) / 2
# call the Nonlinear Least Squares, either fitting the floor too or not
controlList <- nls.control( maxiter=100, minFactor=1/512, warnOnly=TRUE)
if ( ! fit.floor) {
starts <- list( "center"=start.center, "width"=start.width, "height"=start.height)
nlsAns <- try( nls( y ~ gumbel( x, center, width, height), start=starts, control=controlList))
} else {
if (is.null( start.floor)) start.floor <- quantile( y, seq(0,1,0.1))[2]
starts <- list( "center"=start.center, "width"=start.width, "height"=start.height,
"floor"=start.floor)
nlsAns <- try( nls( y ~ gumbel( x, center, width, height, floor), start=starts, control=controlList))
}
# package up the results to pass back
if ( class( nlsAns) == "try-error") {
centerAns <- start.center
widthAns <- start.width
heightAns <- start.height
floorAns <- if ( fit.floor) start.floor else 0
yAns <- gumbel( x, centerAns, widthAns, heightAns, floorAns)
residualAns <- y - yAns
} else {
coefs <-coef(nlsAns)
centerAns <- coefs[1]
widthAns <- coefs[2]
heightAns <- coefs[3]
floorAns <- if ( fit.floor) coefs[4] else 0
yAns <- fitted( nlsAns)
residualAns <- residuals( nlsAns)
}
# the width for a Gumbel keeps its sign!
out <- list( "center"=centerAns, "width"=widthAns, "height"=heightAns, "y"=yAns,
"residual"=residualAns)
if ( fit.floor) {
out <- c( out, "floor"=floorAns)
}
return( out)
}
|
2f4218773cc1e8a4acc3a662a03ce83fe2e1a8b8 | 1285cb6fc60d5ead0a0b48cf1e5da3687a65da24 | /man/plotConcTimeSmooth.Rd | cde3749cceb8d5a620034301f9c3d06f1ceef440 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | ayan-usgs/EGRET | 7448035c6e9c1e970fe4dc987ac068ceb2034f59 | 433e85a4ef0c8c37edee575837ad1e3ebbf9ae07 | refs/heads/master | 2021-01-16T19:06:57.563502 | 2013-05-01T15:37:45 | 2013-05-01T15:37:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,122 | rd | plotConcTimeSmooth.Rd | \name{plotConcTimeSmooth}
\alias{plotConcTimeSmooth}
\title{Plot up to three curves representing the concentration versus time relationship, each curve representing a different flow.}
\usage{
plotConcTimeSmooth(q1, q2, q3, centerDate, yearStart,
yearEnd, qUnit = 2, legendLeft = 0, legendTop = 0,
concMax = NA, bw = FALSE, printTitle = TRUE,
printValues = FALSE, localSample = Sample,
localINFO = INFO, windowY = 10, windowQ = 2,
windowS = 0.5, cex.main = 1.1, lwd = 2, ...)
}
\arguments{
\item{q1}{numeric This is the discharge value for the
first curve to be shown on the plot. It is expressed in
units specified by qUnit.}
\item{q2}{numeric This is the discharge value for the
second curve to be shown on the plot. It is expressed in
units specified by qUnit. If you don't want a second
curve then the argument must be q2=NA}
\item{q3}{numeric This is the discharge value for the
third curve to be shown on the plot. It is expressed in
units specified by qUnit. If you don't want a third curve
then the argument must be q3=NA}
\item{centerDate}{string This is the time of year to be
used as the center date for the smoothing. It is
expressed as a month and day and must be in the form
"mm-dd"}
\item{yearStart}{numeric This is the starting year for
the graph. The first value plotted for each curve will be
at the first instance of centerDate in the year
designated by yearStart.}
\item{yearEnd}{numeric This is the end of the sequence of
values plotted on the graph.The last value will be the
last instance of centerDate prior to the start of
yearEnd. (Note, the number of values plotted on each
curve will be yearEnd-yearStart.)}
\item{qUnit}{object of qUnit class. \code{\link{qConst}},
or numeric represented the short code, or character
representing the descriptive name.}
\item{legendLeft}{numeric which represents the left edge
of the legend, in the units shown on x-axis of graph,
default is 0, will be placed within the graph but may
overprint data}
\item{legendTop}{numeric which represents the top edge of
the legend, in the units shown on y-axis of graph,
default is 0, will be placed within the graph but may
overprint data}
\item{concMax}{numeric value for upper limit on
concentration shown on the graph, default = NA (which
causes the upper limit to be set automatically, based on
the data)}
\item{bw}{logical if TRUE graph is produced in black and
white, default is FALSE (which means it will use color)}
\item{printTitle}{logical variable if TRUE title is
printed, if FALSE not printed}
\item{printValues}{logical variable if TRUE the results
shown on the graph are also printed to the console (this
can be useful for quantifying the changes seen visually
in the graph), default is FALSE (not printed)}
\item{localSample}{string specifying the name of the data
frame that contains the Sample data, default name is
Sample}
\item{localINFO}{string specifying the name of the data
frame that contains the metadata, default name is INFO}
\item{windowY}{numeric specifying the half-window width
in the time dimension, in units of years, default is 10}
\item{windowQ}{numeric specifying the half-window width
in the discharge dimension, units are natural log units,
default is 2}
\item{windowS}{numeric specifying the half-window with in
the seasonal dimension, in units of years, default is
0.5}
\item{cex.main}{magnification to be used for main titles
relative to the current setting of cex}
\item{lwd}{line width, a positive number, defaulting to
1}
\item{\dots}{arbitrary functions sent to the generic
plotting function. See ?par for details on possible
parameters}
}
\description{
These plots show how the concentration-time relationship
is changing over flow.
}
\examples{
q1 <- 10
q2 <- 25
q3 <- 75
centerDate <- "07-01"
yearStart <- 2000
yearEnd <- 2010
Sample <- exSample
INFO <- exINFO
plotConcTimeSmooth(q1, q2, q3, centerDate, yearStart, yearEnd)
}
\keyword{graphics}
\keyword{statistics}
\keyword{water-quality}
|
c6f8285efb28471fbee41e63910fbec41f8f481f | 29585dff702209dd446c0ab52ceea046c58e384e | /CorReg/R/Mstep.R | 89e4160e9a49f19821c8fce38dc8000e5b3e1f8a | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 219 | r | Mstep.R | Mstep<-function(Z=Z,X=X,sigma_IR=sigma_IR,Ir=Ir){
alpha=hatB(Z = Z,X =X )
for (j in Ir){
sigma_IR[j]=sd(X[,j]-X%*%alpha[-1,j])
}
return(list(alpha=alpha,sigma_IR=sigma_IR))#en C on fera un void
} |
6c24631e8ad12570839410d386870dc46936cb44 | 28464a180a79bc91ca091f3ab92c439533589ffe | /R/tbl_odata.R | 4b9c1319fa94984d96c2d5117539517d90bf5f07 | [] | no_license | edwindj/odataplyr | 1f397b49f4b14413a661981fbb47337629d87450 | 36fac209f97342e1e04761f707cc136eb2e9007a | refs/heads/master | 2021-01-13T02:47:58.635104 | 2018-08-30T11:43:23 | 2018-08-30T11:43:23 | 77,134,523 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 169 | r | tbl_odata.R | tbl_odata <- function(url, name, ...){
tbl <- list()
structure(tbl, class=c("tbl_odata", "tbl"))
}
get_odata_query <- function(x, ...){
stop("Not implemented")
}
|
9c05805e5fa7e1d357688eeae048e1778ab0ffea | ef40bd270d85d5eb252f0286f3e365b185fb08f4 | /R/getInfo.R | 97f4dd264c59d4d3e16fe442f5f525bbc59bb7e5 | [] | no_license | dusadrian/admisc | b49e75300c3bf4d922d1d9ef2c065888c0f1576d | 44126a9d9c58dadd5f9b9c589fccfdcf3643e182 | refs/heads/master | 2023-07-19T18:57:56.391912 | 2023-07-16T09:31:30 | 2023-07-16T09:31:30 | 231,208,801 | 0 | 0 | null | 2023-06-07T07:55:07 | 2020-01-01T11:39:32 | R | UTF-8 | R | false | false | 3,674 | r | getInfo.R | `getInfo` <- function(data) {
if (is.matrix(data)) {
data <- as.data.frame(data)
}
dc.code <- unique(unlist(lapply(data, function(x) {
if (is.numeric(x)) {
return(x[x < 0])
}
else {
return(as.character(x[is.element(x, c("-", "dc"))]))
}
})))
if (length(dc.code) > 1) {
stopError("Multiple \"don't care\" codes found.")
}
fuzzy.cc <- logical(ncol(data))
hastime <- logical(ncol(data))
factor <- sapply(data, is.factor)
declared <- sapply(data, function(x) inherits(x, "declared"))
noflevels <- getLevels(data)
attributes(noflevels) <- NULL
for (i in seq(ncol(data))) {
cc <- data[, i]
label <- attr(cc, "label", exact = TRUE)
labels <- attr(cc, "labels", exact = TRUE)
if (is.factor(cc)) {
cc <- as.character(cc)
}
if (length(dc.code) > 0 && is.element(dc.code, cc)) {
cc[is.element(cc, dc.code)] <- -1
}
if (possibleNumeric(cc)) {
cc <- asNumeric(cc)
fuzzy.cc[i] <- any(na.omit(cc) %% 1 > 0)
if (!fuzzy.cc[i] & !anyNA(cc)) {
if (any(na.omit(cc) < 0)) {
hastime[i] <- TRUE
cc[cc < 0] <- max(cc) + 1 # TODO if declared...?
}
}
if (declared[i]) {
if (min(cc) != 0 && !fuzzy.cc[i]) {
# the data MUST begin with 0 and MUST be incremented by 1 for each level...!
cc <- recode(cc, paste(sort(labels), seq(noflevels[i]) - 1, sep = "=", collapse = ";"))
}
attr(cc, "label") <- label
attr(cc, "labels") <- labels
class(cc) <- c("declared", class(cc))
}
data[[i]] <- cc
}
}
factor <- factor & !hastime
categories <- list()
columns <- colnames(data)
if (any(factor | declared)) {
for (i in which(factor | declared)) {
if (factor[i]) {
categories[[columns[i]]] <- levels(data[, i])
# the data MUST begin with 0 and MUST be incremented by 1 for each level...!
data[, i] <- as.numeric(data[, i]) - 1
}
else {
x <- data[, i]
labels <- attr(x, "labels", exact = TRUE)
if (is.null(labels)) {
stopError("Declared columns should have labels.")
}
else {
if (noflevels[i] == 2) {
if (length(labels) == 1) {
stopError("Binary crisp columns should have labels for both presence and absence.")
}
}
else { # noflevels > 2 (impossible less than 2)
if (length(labels) != noflevels[i]) {
stopError("All multi-values should have declared labels.")
}
}
}
categories[[columns[i]]] <- names(sort(labels))
}
}
}
return(
list(
data = data,
fuzzy.cc = fuzzy.cc,
hastime = hastime,
factor = factor,
declared = declared,
categories = categories,
dc.code = dc.code,
noflevels = noflevels
)
)
}
|
e13dc812936ebd3bc30cdad284a2a8cb5f1b2306 | 368b6bcc2204358eff3630d17187577389352e09 | /R/tensorflow.R | 9c4fb079f233552ce8b7cb50a078d7f76e293a04 | [] | no_license | alicehua11/photo_classification | ae04132b03bfbb60281557ca4a8814d17b8e5078 | f8372d118f8ea2010c1285dc1762ac3e14cee8dd | refs/heads/master | 2020-07-20T10:44:29.668745 | 2019-09-06T17:32:44 | 2019-09-06T17:32:44 | 206,627,232 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,249 | r | tensorflow.R | #' Install TensorFlow for use with \code{MLWIC}
#'
#' \code{MLWIC} requires an installation of tensorflow that can be used by Python.
#' You need to use this before using \code{classify} or \code{train}. If this is your first time using
#' this function, you should see additional documentation at https://github.com/mikeyEcology/MLWIC .
#' This function will install tensorflow on Linux machines; if you are using Windows,
#' you will need to install tensorflow on your own following the directions here:
#' https://www.tensorflow.org/install/install_windows. I recommend using the installation with
#' Anaconda.
#'
#'
#' @param os The operating system on your computer. Options are "Mac" or "Ubuntu".
#' Specifying "Windows" will thrown an error because we cannot automatically install
#' TensorFlow on Windows at this time.
#' @export
tensorflow <- function(os="Mac"){
## Check for python 2.7
vpython <- system("pyv=\"$(python -V 2>&1)\" | echo $pyv | grep \"2.7\"") ## come back to this
if(vpython == TRUE){
print("Python is installed. Installing homebrew, protobuf, pip, and tensorflow.")
if(os == "Mac"){
system("/usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"")
system("brew install protobuf")
system("sudo easy_install --upgrade pip")
system("sudo easy_install --upgrade six")
system("sudo pip install tensorflow")
## Something to validate installation, beyond this.
#system("python import_tf.py")
# I think I need to add: conda install tensorflow
}else if(os == "Ubuntu"){
system("sudo apt-get install python-pip python-dev") # for Python 2.7
system("pip install tensorflow")
#system("python import_tf.py")
}else if(os == "Windows"){
print("Sorry. MLWIC cannot install tensorflow on Windows. Please visit
https://www.tensorflow.org/install/install_windows for tensorflow installation instructions.")
}else{
print('Specify operating system - \"Mac\", \"Windows\", or \"Ubuntu\"')
}
}else{
print("Python needs to be installed. Install Python 2.7, ideally Anaconda, before proceeding. MLWIC does not work with Python 3 at this time.")
}
}
|
e4094fc1a110aa24096a1305d3282bd6e0e21512 | 49da9f66c90668a99c783489f95522dbd1b4e474 | /descriptive analysis/WITS_clean.R | cd9b6adac736a24de50c988f28fd7a2ef615b500 | [] | no_license | michaelcbest/customsevasion | f6744ce06906559366c608dfedd66f36cc9e23df | 3f116cc520ff552033481de9a6deb78262b906f0 | refs/heads/master | 2021-01-01T04:21:53.738829 | 2017-09-22T23:15:30 | 2017-09-22T23:15:30 | 97,161,396 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,301 | r | WITS_clean.R | library(data.table)
library(plyr)
library(dplyr)
setwd(dirname(path.expand("~")))
DataPath <- file.path(paste(getwd(),"Dropbox/Customs Evasion", sep="/"))
#####CLEAN MFN DATA FOR MERGE#####
load(paste(DataPath,"Raw Data/hs12_MFN.Rda", sep = "/"))
EUmembers <- read.csv(paste(DataPath,"raw data/EUmembers.csv", sep = "/"))
EUmembers$Reporter_ISO_N <- as.character(EUmembers$Reporter_ISO_N)
hs12_MFN$Reporter_ISO_N <- as.character(hs12_MFN$Reporter_ISO_N)
hs12_MFN <- merge(hs12_MFN, EUmembers,
by = c("Reporter_ISO_N"), allow.cartesian = T, all.x = T)
hs12_MFN$Reporter_ISO_N[!is.na(hs12_MFN$CountryCode)] <- hs12_MFN$CountryCode[!is.na(hs12_MFN$CountryCode)]
hs12_MFN <- hs12_MFN[ !(Reporter_ISO_N == 191 & Country == "Croatia" & Year ==2012)|
is.na(Reporter_ISO_N == 191 & Country == "Croatia" & Year ==2012)]
hs12_MFN <- hs12_MFN[ !(Reporter_ISO_N == 191 & Country == "Croatia" & Year ==2013)|
is.na(Reporter_ISO_N == 191 & Country == "Croatia" & Year ==2013)]
hs12_MFN <- hs12_MFN[, c("Country", "DateJoined", "CountryCode") :=NULL]
hs12_MFN$ProductCode[hs12_MFN$ProductCode<100000] <- paste("0", hs12_MFN$ProductCode[hs12_MFN$ProductCode<100000], sep="")
hs12_MFN$Year <- as.character(hs12_MFN$Year)
hs12_MFN$Reporter_ISO_N[hs12_MFN$Reporter_ISO_N==250] <- 251 #France
hs12_MFN$Reporter_ISO_N[hs12_MFN$Reporter_ISO_N==380] <- 381 #Italy
hs12_MFN$Reporter_ISO_N[hs12_MFN$Reporter_ISO_N==578] <- 579 #Norway
hs12_MFN$Reporter_ISO_N[hs12_MFN$Reporter_ISO_N==756] <- 757 #Switzerland
hs12_MFN$Reporter_ISO_N[hs12_MFN$Reporter_ISO_N==840] <- 842 #USA
hs12_MFN$Reporter_ISO_N[hs12_MFN$Reporter_ISO_N==356] <- 699 #India
hs12_MFN$MFN <- 1
#####MERGE VALUE DATA W MFN TARIFFS######
#REPLACE WITH QTY OR VALUE
load(paste(DataPath,"Analysis Data/hs12_qty.Rda", sep = "/"))
hs12_all_tariffs <- merge(hs12_MFN, hs12_qty,
by.x = c("Year", "Reporter_ISO_N", "ProductCode"),
by.y = c("Period", "Reporter Code", "Commodity Code"))
save(hs12_all_tariffs,file = "Documents/hs2012/hs12_all_tariffs_qty.Rda")
rm(hs12_qty, hs12_MFN, hs12_all_tariffs)
#####MERGE EU COUNTRIES WITH PREFERENTIAL TARIFFS####
load(paste(DataPath,"raw data/hs12_pref.Rda", sep = "/"))
hs12_pref$Reporter_ISO_N <- as.character(hs12_pref$Reporter_ISO_N)
hs12_pref$Partner <- as.character(hs12_pref$Partner)
hs12_pref$Partner <- substr(hs12_pref$Partner,
regexpr("[^0]",hs12_pref$Partner),
nchar(hs12_pref$Partner))
hs12_pref <- merge(hs12_pref, EUmembers,
by = c("Reporter_ISO_N"), allow.cartesian = T, all.x = T)
hs12_pref$Reporter_ISO_N[!is.na(hs12_pref$CountryCode)] <- hs12_pref$CountryCode[!is.na(hs12_pref$CountryCode)]
hs12_pref <- hs12_pref[ !(Reporter_ISO_N == 191 & Country == "Croatia" & Year ==2012)|
is.na(Reporter_ISO_N == 191 & Country == "Croatia" & Year ==2012)]
hs12_pref <- hs12_pref[ !(Reporter_ISO_N == 191 & Country == "Croatia" & Year ==2013)|
is.na(Reporter_ISO_N == 191 & Country == "Croatia" & Year ==2013)]
hs12_pref <- hs12_pref[, c("Country", "DateJoined", "CountryCode") :=NULL]
save(hs12_pref,file = "Documents/hs2012/hs12_pref.Rda")
rm(hs12_pref)
#####CLEAN BENEFICIARY CODES####
load(paste(DataPath,"raw data/TRAINS_preference_beneficiaries.Rda", sep = "/"))
TRAINS_preference_beneficiaries$Partner <- as.character(TRAINS_preference_beneficiaries$Partner)
TRAINS_preference_beneficiaries$Partner <- substr(TRAINS_preference_beneficiaries$Partner,
regexpr("[^0]",TRAINS_preference_beneficiaries$Partner),
nchar(TRAINS_preference_beneficiaries$Partner))
TRAINS_preference_beneficiaries <- merge(TRAINS_preference_beneficiaries, EUmembers,
by.x = c("Partner"), by.y = c("Reporter_ISO_N"), allow.cartesian = T, all.x = T)
TRAINS_preference_beneficiaries$Partner[!is.na(TRAINS_preference_beneficiaries$CountryCode)] <-
TRAINS_preference_beneficiaries$CountryCode[!is.na(TRAINS_preference_beneficiaries$CountryCode)]
TRAINS_preference_beneficiaries <- TRAINS_preference_beneficiaries[, c("Country", "DateJoined", "CountryCode") :=NULL]
TRAINS_preference_beneficiaries <- rename(TRAINS_preference_beneficiaries, "Partner Code" = "Partner")
#####CLEAN and SPLIT PREFERENCE TARIFFS#####
#Function for cleaning each section of pref tariffs
pref_split <- function(hs12_pref) {
hs12_pref <- merge(hs12_pref, TRAINS_preference_beneficiaries,
by.x = c("Partner"), by.y = c("RegionCode"), allow.cartesian = T, all.x = T)
hs12_pref <- hs12_pref[!(`Partner Code` == 191 & PartnerName == "European Union" & Year == 2012)|
is.na(`Partner Code` == 191 & PartnerName == "European Union" & Year == 2012)]
hs12_pref <- hs12_pref[!(`Partner Code` == 191 & PartnerName == "European Union" & Year == 2013)|
is.na(`Partner Code` == 191 & PartnerName == "European Union" & Year == 2013)]
hs12_pref$ProductCode[hs12_pref$ProductCode<100000] <-
paste("0", hs12_pref$ProductCode[hs12_pref$ProductCode<100000], sep="")
hs12_pref$`Partner Code` <- as.character(hs12_pref$`Partner Code`)
hs12_pref$Partner <- as.character(hs12_pref$Partner)
hs12_pref$`Partner Code`[is.na(hs12_pref$`Partner Code`)] <-
hs12_pref$Partner[is.na(hs12_pref$`Partner Code`)]
hs12_pref[,Partner:=NULL]
hs12_pref$Reporter_ISO_N <- as.character(hs12_pref$Reporter_ISO_N)
hs12_pref$Year <- as.character(hs12_pref$Year)
hs12_pref$ProductCode <- as.character(hs12_pref$ProductCode)
hs12_pref$Reporter_ISO_N[hs12_pref$Reporter_ISO_N==250] <- 251 #France
hs12_pref$Reporter_ISO_N[hs12_pref$Reporter_ISO_N==380] <- 381 #Italy
hs12_pref$Reporter_ISO_N[hs12_pref$Reporter_ISO_N==578] <- 579 #Norway
hs12_pref$Reporter_ISO_N[hs12_pref$Reporter_ISO_N==756] <- 757 #Switzerland
hs12_pref$Reporter_ISO_N[hs12_pref$Reporter_ISO_N==840] <- 842 #USA
hs12_pref$Reporter_ISO_N[hs12_pref$Reporter_ISO_N==356] <- 699 #India
hs12_pref$`Partner Code`[hs12_pref$`Partner Code`==250] <- 251 #France
hs12_pref$`Partner Code`[hs12_pref$`Partner Code`==380] <- 381 #Italy
hs12_pref$`Partner Code`[hs12_pref$`Partner Code`==578] <- 579 #Norway
hs12_pref$`Partner Code`[hs12_pref$`Partner Code`==756] <- 757 #Switzerland
hs12_pref$`Partner Code`[hs12_pref$`Partner Code`==840] <- 842 #USA
hs12_pref$`Partner Code`[hs12_pref$`Partner Code`==356] <- 699 #India
hs12_pref$pref <- 1
return(hs12_pref)
}
#Pref 1
load("Documents/hs2012/hs12_pref.Rda")
hs12_pref_1 <- hs12_pref[Reporter_ISO_N <= 251, ]
rm(hs12_pref)
hs12_pref_1 <- pref_split(hs12_pref_1)
save(hs12_pref_1,file = "Documents/hs2012/hs12_pref_1.Rda")
rm(hs12_pref_1)
#Pref 2
load("Documents/hs2012/hs12_pref.Rda")
hs12_pref_2 <- hs12_pref[Reporter_ISO_N > 251 & Reporter_ISO_N <= 400 | Reporter_ISO_N == 699, ]
rm(hs12_pref)
hs12_pref_2 <- pref_split(hs12_pref_2)
save(hs12_pref_2,file = "Documents/hs2012/hs12_pref_2.Rda")
rm(hs12_pref_2)
#Pref 3
load("Documents/hs2012/hs12_pref.Rda")
hs12_pref_3 <- hs12_pref[Reporter_ISO_N > 400 & Reporter_ISO_N <= 500, ]
rm(hs12_pref)
hs12_pref_3 <- pref_split(hs12_pref_3)
save(hs12_pref_3,file = "Documents/hs2012/hs12_pref_3.Rda")
rm(hs12_pref_3)
#Pref 4
load("Documents/hs2012/hs12_pref.Rda")
hs12_pref_4 <- hs12_pref[Reporter_ISO_N > 500 & Reporter_ISO_N <= 700 & Reporter_ISO_N!=699, ]
rm(hs12_pref)
hs12_pref_4 <- pref_split(hs12_pref_4)
save(hs12_pref_4,file = "Documents/hs2012/hs12_pref_4.Rda")
rm(hs12_pref_4)
#Pref 5
load("Documents/hs2012/hs12_pref.Rda")
hs12_pref_5 <- hs12_pref[Reporter_ISO_N > 700, ]
rm(hs12_pref)
hs12_pref_5 <- pref_split(hs12_pref_5)
save(hs12_pref_5,file = "Documents/hs2012/hs12_pref_5.Rda")
rm(hs12_pref_5, TRAINS_preference_beneficiaries)
#MERGE TRADE/MFN WITH PREF#####
load("Documents/hs2012/hs12_all_tariffs_qty.Rda")
#Section 1
hs12_all_tariffs_1 <- hs12_all_tariffs[Reporter_ISO_N <= 251, ]
load("Documents/hs2012/hs12_pref_1.Rda")
hs12_all_tariffs_1 <- merge(hs12_all_tariffs_1, hs12_pref_1,
by=c("Reporter_ISO_N", "Year", "ProductCode", "Partner Code"), all.x = T)
rm(hs12_pref_1)
#Section 2
hs12_all_tariffs_2 <- hs12_all_tariffs[Reporter_ISO_N > 251 & Reporter_ISO_N <= 400 | Reporter_ISO_N == 699, ]
load("Documents/hs2012/hs12_pref_2.Rda")
hs12_all_tariffs_2 <- merge(hs12_all_tariffs_2, hs12_pref_2,
by=c("Reporter_ISO_N", "Year", "ProductCode", "Partner Code"), all.x = T)
rm(hs12_pref_2)
#Section 3
hs12_all_tariffs_3 <- hs12_all_tariffs[Reporter_ISO_N > 400 & Reporter_ISO_N <= 500, ]
load("Documents/hs2012/hs12_pref_3.Rda")
hs12_all_tariffs_3 <- merge(hs12_all_tariffs_3, hs12_pref_3,
by=c("Reporter_ISO_N", "Year", "ProductCode", "Partner Code"), all.x = T)
rm(hs12_pref_3)
#Section 4
hs12_all_tariffs_4 <- hs12_all_tariffs[Reporter_ISO_N > 500 & Reporter_ISO_N <= 700 & Reporter_ISO_N != 699, ]
load("Documents/hs2012/hs12_pref_4.Rda")
hs12_all_tariffs_4 <- merge(hs12_all_tariffs_4, hs12_pref_4,
by=c("Reporter_ISO_N", "Year", "ProductCode", "Partner Code"), all.x = T)
rm(hs12_pref_4)
#Section 5
hs12_all_tariffs_5 <- hs12_all_tariffs[Reporter_ISO_N > 700, ]
load("Documents/hs2012/hs12_pref_5.Rda")
hs12_all_tariffs_5 <- merge(hs12_all_tariffs_5, hs12_pref_5,
by=c("Reporter_ISO_N", "Year", "ProductCode", "Partner Code"), all.x = T)
rm(hs12_pref_5)
hs12_all_tariffs <- do.call("rbind", list(hs12_all_tariffs_1, hs12_all_tariffs_2,
hs12_all_tariffs_3, hs12_all_tariffs_4,
hs12_all_tariffs_5))
rm(hs12_all_tariffs_1, hs12_all_tariffs_2, hs12_all_tariffs_3, hs12_all_tariffs_4, hs12_all_tariffs_5)
#####CLEAN FULL TARIFF DATA#####
#Remove duplicates combinations with same tariff rates
hs12_all_tariffs <- hs12_all_tariffs[!duplicated(hs12_all_tariffs[,
c("Reporter_ISO_N", "Year", "ProductCode", "Partner Code", "SimpleAverage.y", "Max_Rate.y")])]
#If more than one avg tariff rate, keep lower of two
hs12_all_tariffs <- hs12_all_tariffs %>%
group_by(Reporter_ISO_N, Year, ProductCode, `Partner Code`) %>%
filter(SimpleAverage.y==min(SimpleAverage.y)|is.na(SimpleAverage.y))
#Take lower max rate if still some non-unique rates
hs12_all_tariffs <- hs12_all_tariffs %>%
group_by(Reporter_ISO_N, Year, ProductCode, `Partner Code`) %>%
filter(Max_Rate.y==min(Max_Rate.y)|is.na(Max_Rate.y))
hs12_all_tariffs <- as.data.table(hs12_all_tariffs)
hs12_all_tariffs$Sum_Of_Rates.x[!is.na(hs12_all_tariffs$Sum_Of_Rates.y)] <-
hs12_all_tariffs$Sum_Of_Rates.y[!is.na(hs12_all_tariffs$Sum_Of_Rates.y)]
hs12_all_tariffs$Min_Rate.x[!is.na(hs12_all_tariffs$Min_Rate.y)] <-
hs12_all_tariffs$Min_Rate.y[!is.na(hs12_all_tariffs$Min_Rate.y)]
hs12_all_tariffs$Max_Rate.x[!is.na(hs12_all_tariffs$Max_Rate.y)] <-
hs12_all_tariffs$Max_Rate.y[!is.na(hs12_all_tariffs$Max_Rate.y)]
hs12_all_tariffs$SimpleAverage.x[!is.na(hs12_all_tariffs$SimpleAverage.y)] <-
hs12_all_tariffs$SimpleAverage.y[!is.na(hs12_all_tariffs$SimpleAverage.y)]
hs12_all_tariffs$TotalNoOfLines.x[!is.na(hs12_all_tariffs$TotalNoOfLines.y)] <-
hs12_all_tariffs$TotalNoOfLines.y[!is.na(hs12_all_tariffs$TotalNoOfLines.y)]
hs12_all_tariffs$Nbr_NA_Lines.x[!is.na(hs12_all_tariffs$Nbr_NA_Lines.y)] <-
hs12_all_tariffs$Nbr_NA_Lines.y[!is.na(hs12_all_tariffs$Nbr_NA_Lines.y)]
hs12_all_tariffs <- hs12_all_tariffs[,
c("Sum_Of_Rates.y", "Min_Rate.y", "Max_Rate.y", "SimpleAverage.y",
"TotalNoOfLines.y", "Nbr_NA_Lines.y", "EstCode.y", "NomenCode.y") :=NULL]
colnames(hs12_all_tariffs)[grep(".x",colnames(hs12_all_tariffs))] <-
gsub(".x$","",colnames(hs12_all_tariffs)[grep(".x",colnames(hs12_all_tariffs))])
save(hs12_all_tariffs,file = paste(DataPath,"Analysis Data","hs12_all_tariffs_qty.Rda", sep = "/"))
|
9519cafd269950dc05b5e7381f6c2d6b52efe8e2 | 51ab4dbee3dc15ff8eb207fe618f1217c1cebcf2 | /man/prepData.Rd | 05bdfe707949825c76d01bf072584bc62e38d6dc | [] | no_license | cran/BIGDAWG | 8c9eb1335d8033713c23f9b2180161a52c0d6d78 | ead6acb4ef827015d1027a15539fcd1a86f724f1 | refs/heads/master | 2021-11-26T13:53:34.350326 | 2021-11-17T10:50:14 | 2021-11-17T10:50:14 | 37,374,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 365 | rd | prepData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_functions.R
\name{prepData}
\alias{prepData}
\title{Prepare imported data}
\usage{
prepData(Tab)
}
\arguments{
\item{Tab}{Genotypes dataframe.}
}
\description{
Prepare imported data for processing, checks, and analysis.
}
\note{
This function is for internal BIGDAWG use only.
}
|
217c3877af6d8bc20c6f11c6f2e147671337f207 | c4670bf1594581622401a727791cd4d8283c5f4e | /Simulation/ACS_Prep.R | 73a448a4ca1871f5cb54422d63bf97e4c1f04d5c | [] | no_license | conor-ryan/Imperfect_Insurance_Competition_Code | db274b3818a97b240de08f05e79a5dedee246da1 | e9ed4927f6a7a7670ec235a669b61b23509cc372 | refs/heads/master | 2023-07-20T05:51:29.180132 | 2023-07-05T23:06:10 | 2023-07-05T23:06:10 | 112,538,023 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 4,441 | r | ACS_Prep.R | rm(list=ls())
library(doBy)
library(randtoolbox)
library(data.table)
setwd("C:/Users/Conor/Documents/Research/Imperfect_Insurance_Competition")
## Run
# run = "2019-03-12"
#### 2015 Subsidy Percentage Function ####
subsPerc <- function(FPL){
x = FPL[!is.na(FPL)]
y = rep(100,length(x))
y[x>=1&x<1.33] = 2.01 + (x-1)[x>=1&x<1.33]/(1.33-1)*(3.02-2.01)
y[x>=1.33&x<1.5] = 3.02 + (x-1.33)[x>=1.33&x<1.5]/(1.5-1.33)*(4.02-3.02)
y[x>=1.5&x<2] = 4.02 + (x-1.5)[x>=1.5&x<2]/(2-1.5)*(6.34-4.02)
y[x>=2&x<2.5] = 6.34 + (x-2)[x>=2&x<2.5]/(2.5-2)*(8.1-6.34)
y[x>=2.5&x<3] = 8.1 + (x-2.5)[x>=2.5&x<3]/(3-2.5)*(9.56-8.1)
y[x>=3&x<=4] = 9.56
HHcont = rep(NA,length(FPL))
HHcont[!is.na(FPL)] = y/100
return(HHcont)
}
#### Read in ACS Exchange Elligible Data ####
acs = read.csv("Data/2015_ACS/exchangePopulation2015.csv")
acs = as.data.table(acs)
setkey(acs,STATEFIP,PUMA)
#Uninsured Rate
with(acs,sum(uninsured*PERWT)/sum(PERWT))
acs$person = rownames(acs)
#### Match PUMA to Rating Area ####
areaMatch = read.csv("Intermediate_Output/Zip_RatingArea/PUMA_to_RatingArea.csv")
areaMatch = as.data.table(areaMatch)
acs = merge(acs,areaMatch[,c("PUMA","RatingArea","ST","STATEFIP","alloc")],by=c("STATEFIP","PUMA"),all.x=TRUE,allow.cartesian = TRUE)
# Distribute weight by population prob that observation is in a given Rating Area
acs[,PERWT:=PERWT*alloc]
acs[,household:=as.factor(paste(household,gsub("Rating Area ","",RatingArea),sep="-"))]
acs[,insured:=!all(uninsured),by="household"]
acs = acs[,c("household","HHincomeFPL","HH_income","AGE","SEX","PERWT","RatingArea","ST","insured")]
names(acs) = c("household","HHincomeFPL","HH_income","AGE","SEX","PERWT","AREA","ST","insured")
#### Household Characteristics ####
rating = read.csv("Data/AgeRating.csv")
rating = as.data.table(rating)
# Create truncated Age variable
acs$AgeMatch = acs$AGE
acs$AgeMatch[acs$AGE<14] = 14
acs$AgeMatch[acs$AGE>64] = 64
# Merge in Default and State-Specific Age Rating Curves
acs = merge(acs,rating[rating$State=="Default",c("Age","Rating")],by.x="AgeMatch",by.y="Age",all.x=TRUE)
acs = merge(acs,rating[rating$State!="Default",],by.x=c("ST","AgeMatch"),by.y=c("State","Age"),all.x=TRUE)
acs$ageRate = acs$Rating.x
acs$ageRate[!is.na(acs$Rating.y)] = acs$Rating.y[!is.na(acs$Rating.y)]
# Drop redundant rating variables
acs = acs[,c("Rating.x","Rating.y"):=NULL]
rm(rating)
# Merge in Age-specific HHS-HCC Risk Adjustment Factors
HCC = read.csv("Risk_Adjustment/2014_HHS_HCC_AgeRA_Coefficients.csv")
names(HCC) = c("Sex","Age","PlatHCC_Age","GoldHCC_Age","SilvHCC_Age","BronHCC_Age","CataHCC_Age")
acs[,AgeMatch:= pmax(floor(AGE/5)*5,21)]
acs = merge(acs,HCC,by.x=c("AgeMatch","SEX"),by.y=c("Age","Sex"))
#Count Members
setkey(acs,household)
acs$MEMBERS=1
#Age of HoH
acs[,MaxAge:=max(AGE),by="household"]
acs[,AvgAge:=AGE*PERWT]
#Count Children
acs[,childRank:=rank(AGE,ties.method="first"),by="household"]
acs$childRank[acs$AGE>18] = NA
acs$ageRate[!is.na(acs$childRank)&acs$childRank>3]=0
acs$catas_cnt = as.numeric(acs$AGE<=30)
acs$ageRate_avg = acs$ageRate*acs$PERWT
acs[,PlatHCC_Age:=PlatHCC_Age*PERWT]
acs[,GoldHCC_Age:=GoldHCC_Age*PERWT]
acs[,SilvHCC_Age:=SilvHCC_Age*PERWT]
acs[,BronHCC_Age:=BronHCC_Age*PERWT]
acs[,CataHCC_Age:=CataHCC_Age*PERWT]
acs = acs[,lapply(.SD,sum),by=c("household","HHincomeFPL","HH_income","MaxAge","AREA","ST","insured"),
.SDcols = c("MEMBERS","AvgAge","ageRate","ageRate_avg","PERWT","catas_cnt",
"PlatHCC_Age","GoldHCC_Age","SilvHCC_Age","BronHCC_Age","CataHCC_Age")]
names(acs) = c("household","HHincomeFPL","HH_income","AGE","AREA","ST","insured",
"MEMBERS","AvgAge","ageRate","ageRate_avg","PERWT","catas_cnt",
"PlatHCC_Age","GoldHCC_Age","SilvHCC_Age","BronHCC_Age","CataHCC_Age")
acs[,AvgAge:=AvgAge/PERWT]
acs$ageRate_avg = with(acs,ageRate_avg/PERWT)
acs[,PlatHCC_Age:=PlatHCC_Age/PERWT]
acs[,GoldHCC_Age:=GoldHCC_Age/PERWT]
acs[,SilvHCC_Age:=SilvHCC_Age/PERWT]
acs[,BronHCC_Age:=BronHCC_Age/PERWT]
acs[,CataHCC_Age:=CataHCC_Age/PERWT]
acs$FAMILY_OR_INDIVIDUAL = "INDIVIDUAL"
acs$FAMILY_OR_INDIVIDUAL[acs$MEMBERS>1] = "FAMILY"
acs$catas_elig = acs$catas_cnt==acs$MEMBERS
save(acs,file="Intermediate_Output/Simulated_BaseData/acs_unrest.rData")
# Drop heads of household that are under 18 - 2,041
acs = acs[AGE>=18,]
save(acs,file="Intermediate_Output/Simulated_BaseData/acs_prepped.rData")
|
b15cb202ed4b231f0ca5bc978d93c5865b6260f9 | ca66f0414b5ffea6c4513edf8537f008ea9fc921 | /Clusterização/Municípios de SP/municipios_sp_kmeans.R | 7e12193f0905e37778f9347331e98c3b74ffa944 | [] | no_license | hernandesmjunior/R-Machine-Learning | c1ab417baab65673a1c64aa4ed0b32f28c186ac5 | 498c46be799da483787625b0a2eb876182c3b7eb | refs/heads/main | 2023-06-05T03:54:59.752224 | 2021-06-20T17:20:36 | 2021-06-20T17:20:36 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,946 | r | municipios_sp_kmeans.R | #############################################################
#
# CLUSTER NAO HIERARQUICO - Municípios de SP
#
#############################################################
# carregando as bibliotecas
library(tidyverse) # pacote para manipulação de dados
library(cluster) # algoritmo de cluster
library(factoextra) # algoritmo de cluster e visualização
library(fpc) # algoritmo de cluster e visualização
library(gridExtra) # para a funcao grid arrange
library(readxl) # leitura dos dados
#carregar base municipio
municipios <- read.table("dados/municipios.csv", sep = ";", header = T, dec = ",")
rownames(municipios) <- municipios[,1]
municipios <- municipios[,-1]
#padronizar dados
municipios.padronizado <- scale(municipios)
#Agora vamos rodar de 3 a 6 centros e visualizar qual a melhor divisao
municipios.k3 <- kmeans(municipios.padronizado, centers = 3)
municipios.k4 <- kmeans(municipios.padronizado, centers = 4)
municipios.k5 <- kmeans(municipios.padronizado, centers = 5)
municipios.k6 <- kmeans(municipios.padronizado, centers = 6)
#Graficos
G1 <- fviz_cluster(municipios.k3, geom = "point", data = municipios.padronizado) + ggtitle("k = 3")
G2 <- fviz_cluster(municipios.k4, geom = "point", data = municipios.padronizado) + ggtitle("k = 4")
G3 <- fviz_cluster(municipios.k5, geom = "point", data = municipios.padronizado) + ggtitle("k = 5")
G4 <- fviz_cluster(municipios.k6, geom = "point", data = municipios.padronizado) + ggtitle("k = 6")
#Criar uma matriz com 4 graficos
grid.arrange(G1, G2, G3, G4, nrow = 2)
#VERIFICANDO ELBOW
fviz_nbclust(municipios.padronizado, FUN = hcut, method = "wss")
#juntando dados
municipios2 <- read.table("dados/municipios.csv", sep = ";", header = T, dec = ",")
municipiosfit <- data.frame(municipios.k6$cluster)
#Agrupar cluster e base
MunicipioFinal <- cbind(municipios2, municipiosfit)
|
1c6404c9c58b5bf18198ed35eddf6a43f8deef7f | 3819c5c65f13b185b8fb714d7349abfecb793a72 | /man/dot-optimalClass.Rd | 72dcf71f337e1786c540b1b447313ab534a686b2 | [] | no_license | cran/DynTxRegime | ed877579c6ffc6156fb6c84298a58d1db5940dff | 9ecb35dfd9abf9617e0179d3d4d552dce22314e5 | refs/heads/master | 2023-06-25T03:37:01.776586 | 2023-04-25T13:50:11 | 2023-04-25T13:50:11 | 37,244,072 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,107 | rd | dot-optimalClass.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/J_class_OptimalClass.R
\name{.optimalClass}
\alias{.optimalClass}
\title{Perform Classification Step}
\usage{
.optimalClass(
moPropen,
moMain,
moCont,
moClass,
data,
response,
txName,
iter,
fSet,
suppress,
step
)
}
\arguments{
\item{moPropen}{model object(s) for propensity regression}
\item{moMain}{model object(s) for main effects of outcome regression or NULL}
\item{moCont}{model object(s) for contrasts of outcome regression or NULL}
\item{moClass}{model object(s) for classification procedure}
\item{data}{data.frame of covariates and treatment history}
\item{response}{vector of responses}
\item{txName}{character of column header of data containing tx}
\item{iter}{maximum number of iterations for outcome regression or NULL}
\item{fSet}{function defining subsets or NULL}
\item{suppress}{T/F indicating screen printing preference}
\item{step}{integer indicating step of algorithm}
}
\value{
an object of class OptimalClass
}
\description{
Perform Classification Step
}
\keyword{internal}
|
8ed0176cf69dc4a043bfcf5b0849ddbd85f11592 | 38c2e53c26200336dc0e8bf5fac819c7a8b88d42 | /R/grouped_EDA_with_nested_tables.R | a8690bfabf4fab2624931cbf7f0d23b61085653b | [] | no_license | poorjanos/Work-NBA-Process | 0d0fea3f41799cadddcfaf3328ccd2b8e6cd933b | 52ef70117f9ca3d0b39d935034a5b170492bee09 | refs/heads/master | 2021-07-04T20:43:11.223287 | 2019-02-08T13:50:09 | 2019-02-08T13:50:09 | 134,841,378 | 2 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 3,780 | r | grouped_EDA_with_nested_tables.R | library(bupaR)
library(processmapR)
library(DiagrammeR)
library(tidyverse)
library(lubridate)
library(broom)
# Load dataset
t_event_log_app <- read.csv(here::here("Data", "t_event_log.csv"),
stringsAsFactors = FALSE) %>%
mutate(
TIMESTAMP = ymd_hms(TIMESTAMP),
PRODUCT_LINE = as.factor(PRODUCT_LINE),
SALES_CHANNEL = as.factor(SALES_CHANNEL),
MEDIUM_TYPE = as.factor(MEDIUM_TYPE),
AUTOUW = as.factor(case_when(
.$AUTOUW == "I" ~ "Automatikus",
TRUE ~ "Manuális"
))
)
# Select cols to transform to eventlog with bupar::eventlog
t_event_log_clean <- t_event_log_app %>%
select(CASE_ID, EVENT_NAME, TIMESTAMP, ACTIVITY_INST_ID, LIFECYCLE_ID, PARTNER_NAME, PRODUCT_LINE)
# Simple nestting
# by_product <- t_event_log_clean %>%
# group_by(PRODUCT_LINE) %>%
# nest()
# Data manipulation funcs to use in purrr::map
trace_num <- function(df){
number_of_traces(
eventlog(
df,
case_id = "CASE_ID",
activity_id = "EVENT_NAME",
activity_instance_id = "ACTIVITY_INST_ID",
lifecycle_id = "LIFECYCLE_ID",
timestamp = "TIMESTAMP",
resource_id = "PARTNER_NAME"
))
}
trace_cov <- function(df){
trace_coverage(
eventlog(
df,
case_id = "CASE_ID",
activity_id = "EVENT_NAME",
activity_instance_id = "ACTIVITY_INST_ID",
lifecycle_id = "LIFECYCLE_ID",
timestamp = "TIMESTAMP",
resource_id = "PARTNER_NAME"
), level = "trace")
}
# Returns df 7x1 -> unnest will fail
# through_time <- function(df) {
# throughput_time(
# eventlog(
# df,
# case_id = "CASE_ID",
# activity_id = "EVENT_NAME",
# activity_instance_id = "ACTIVITY_INST_ID",
# lifecycle_id = "LIFECYCLE_ID",
# timestamp = "TIMESTAMP",
# resource_id = "PARTNER_NAME"
# ),
# level = "log", units = "day"
# )[c("mean", "median", "min", "max", "st_dev", "q1", "q3")]
# }
# Returns df 1x7 -> unnest will work
through_time <- function(df) {
tidyr::spread(
data = data.frame(
metric = c("mean", "median", "min", "max", "st_dev", "q1", "q3"),
values = throughput_time(
eventlog(
df,
case_id = "CASE_ID",
activity_id = "EVENT_NAME",
activity_instance_id = "ACTIVITY_INST_ID",
lifecycle_id = "LIFECYCLE_ID",
timestamp = "TIMESTAMP",
resource_id = "PARTNER_NAME"
),
level = "log", units = "day"
)[c("mean", "median", "min", "max", "st_dev", "q1", "q3")], row.names = NULL
),
key = metric, value = values
)
}
trace_len <- function(df) {
tidyr::spread(
data = data.frame(
metric = c("mean", "median", "min", "max", "st_dev", "q1", "q3", "iqr"),
values = trace_length(
eventlog(
df,
case_id = "CASE_ID",
activity_id = "EVENT_NAME",
activity_instance_id = "ACTIVITY_INST_ID",
lifecycle_id = "LIFECYCLE_ID",
timestamp = "TIMESTAMP",
resource_id = "PARTNER_NAME"
),
level = "log", units = "day"
)[c("mean", "median", "min", "max", "st_dev", "q1", "q3", "iqr")], row.names = NULL
),
key = metric, value = values
)
}
# Gen nested tables with aggregated stats in nested tables
by_product <- t_event_log_clean %>%
group_by(PRODUCT_LINE) %>%
nest() %>%
mutate(
trace_number = map(data, trace_num),
through_time = map(data, through_time),
trace_length = map(data, trace_len)
)
# Retrieve aggregates
by_product %>% select(PRODUCT_LINE, trace_number) %>% unnest()
by_product %>% select(PRODUCT_LINE, through_time) %>% unnest()
by_product %>% select(PRODUCT_LINE, trace_length) %>% unnest()
|
4ed596ba07612ac34a6e509a7ac9d4c80bca7d33 | a5c8aa1ed795d6b34b53ec33bc39a0670e1ca29f | /code/02-dgirt/holdover/clean-cces.R | 54a655423aadfc91234e0221704c4336c8dfe411 | [] | no_license | mikedecr/dissertation | 6a15f45647f1205f6f3fe901108a5917f231e473 | 06ec3d304821ee09a28e42ba0bc11318d774b5ed | refs/heads/main | 2021-06-24T08:52:45.349588 | 2020-11-14T17:57:54 | 2020-11-14T17:57:54 | 282,011,623 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,273 | r | clean-cces.R | # ----------------------------------------------------
# CCES cleaning
# run on 2012 data (112th congress from 2010-2012)
# (written fall or winter 2018/2019; CCES data used in June 2019)
# (CCES work will be generalized into 'survey-algo')
# (Hierarchical Covariates already redone a little bit)
# ----------------------------------------------------
library("here")
library("magrittr")
library("tidyverse")
library("ggplot2")
library("scales")
library("labelled")
library("broom")
library("boxr"); box_auth()
# library("broom")
library("latex2exp")
library("rstan")
rstan_options(auto_write = TRUE)
options(mc.cores = min(parallel::detectCores(), 10))
# will show nothing on linstat (if no data pushed)
list.files("data/cces-cdf")
theme_set(
ggthemes::theme_base(base_family = "Source Sans Pro", base_size = 14) +
theme(plot.background = element_blank(),
axis.ticks = element_line(lineend = "square"),
axis.ticks.length = unit(0.25, "lines"))
)
# ----------------------------------------------------
# data
# ----------------------------------------------------
# meta data (come back to this)
meta_cces <-
data_frame(firm = "cces",
date = "cdf",
roper_id = as.character(NA),
wt = "weight",
Qs = as.character(NA)) %>%
print()
# CCES responses
cc <- box_read(369447961216) %>%
# cc <- haven::read_dta(here("data/cces-cdf/cces_common_cumulative_4.dta")) %>%
as_tibble() %>%
mutate_all(remove_labels) %>%
print()
# state fips codes
fips <- box_read(377757394379) %>%
# read_csv(here("data", "census", "census-state-fips.csv")) %>%
as_tibble() %>%
mutate_if(is.integer, as.numeric) %>%
print()
# district covariates
# at-large coded as 1 for 'cd' variable
fm_raw <- box_read(377781507739) %>%
as_tibble() %>%
# read_csv(here("data", "secondary", "foster-molina",
# "allCongressDataPublish.csv")) %>%
print()
# tools for finding the Congress number
1789 - 2 + (93 * 2)
# dime data
dime_raw <- box_read(379360058073) %>%
as_tibble() %>%
print()
# who's in DWDIME that has lost a primary??
# is DWDIME a route (not important rn though)
dime_raw %>%
filter(ran.primary == 1 | p.elec.stat %in% c("W", "L")) %>%
count(!is.na(dwdime), p.elec.stat)
# worth investigating more
dime_raw %>%
count(seat)
# ----------------------------------------------------
# Recoding
# ----------------------------------------------------
# ---- CCES items -----------------------
# racial_resent_special_favors # 5pt, 3 is no opinion
# racial_resent_slavery # 5pt, 3 is no opinion
# jobs_environment # 5 pt, 3 and 6 are NA
# Recode items
# merge FIPS to fix state and district numbers
# recode at-large districts = 1
cc_rc <- cc %>%
mutate(
q_r_favors = case_when(racial_resent_special_favors %in% c(1, 2) ~ 1,
racial_resent_special_favors %in% c(4, 5) ~ 0),
q_r_slavery = case_when(racial_resent_slavery %in% c(4, 5) ~ 1,
racial_resent_slavery %in% c(1, 2) ~ 0),
q_e_jobs.env = case_when(jobs_environment %in% c(4, 5) ~ 1,
jobs_environment %in% c(1, 2) ~ 0),
q_e_ss.priv = case_when(soc_sec_private %in% c(1, 2) ~ 1,
soc_sec_private %in% c(4, 5) ~ 0),
q_s_imm.status = case_when(immig_legal_status == 1 ~ 0,
immig_legal_status == 2 ~ 1),
q_s_imm.guestwork = case_when(immig_guest_worker == 1 ~ 0,
immig_guest_worker == 2 ~ 1),
q_s_imm.fines = case_when(immig_fine_businesses == 1 ~ 1,
immig_fine_businesses == 2 ~ 0),
q_s_imm.patrol = case_when(immig_border_patrol == 1 ~ 1,
immig_border_patrol == 2 ~ 0),
q_s_imm.birthcit = case_when(immig_auto_citizenship == 1 ~ 1,
immig_auto_citizenship == 2 ~ 0),
q_s_imm.police = case_when(immig_police_question == 1 ~ 1,
immig_police_question == 2 ~ 0),
q_s_imm.wall = case_when(immig_border_wall == 1 ~ 1,
immig_border_wall == 2 ~ 0),
q_s_imm.public = case_when(immig_hosp_school == 1 ~ 1,
immig_hosp_school == 2 ~ 0),
q_r_aff.action = case_when(affirm_action %in% c(1, 2) ~ 0,
affirm_action %in% c(3, 4) ~ 1),
q_r_aff.action = case_when(affirm_action_06 %in% c(1, 2, 3) ~ 0,
affirm_action_06 %in% c(5, 6, 7) ~ 1),
q_s_gay.marry = case_when(gay_marriage_amendment == 1 ~ 1,
gay_marriage_amendment == 2 ~ 0),
q_s_gay.marry = case_when(gay_marriage_amendment_06 %in% c(1, 2) ~ 1,
gay_marriage_amendment_06 %in% c(3, 4) ~ 0),
q_s_gun.control = case_when(gun_control == 1 ~ 0,
gun_control == 2 ~ 1),
q_s_stem.cells = case_when(stem_cell_research == 1 ~ 0,
stem_cell_research == 2 ~ 1),
q_s_imm.citizenship = case_when(opinion_immig_citizenship == 1 ~ 0,
opinion_immig_citizenship == 2 ~ 1),
q_e_min.wage = case_when(opinion_minwage == 1 ~ 0,
opinion_minwage == 2 ~ 1),
q_s_partial.birth = case_when(opinion_partial_birth == 1 ~ 0,
opinion_partial_birth == 2 ~ 1),
q_e_stimulus = case_when(opinion_stimulus == 1 ~ 0,
opinion_stimulus == 2 ~ 1),
q_e_aca = case_when(opinion_affordablecareact == 1 ~ 0,
opinion_affordablecareact == 2 ~ 1),
q_e_cap.trade = case_when(opinion_captrade == 1 ~ 0,
opinion_captrade == 2 ~ 1),
q_s_dadt.repeal = case_when(opinion_dadt_repeal == 1 ~ 0,
opinion_dadt_repeal == 2 ~ 1),
party = case_when(pid3 == 1 ~ "D",
pid3 == 2 ~ "R")
) %>%
left_join(fips, by = c("state_pre" = "state_FIPS")) %>%
mutate(state_n = as.numeric(as.factor(state)),
dist_n = case_when(congdist_pre == 0 ~ 1,
TRUE ~ congdist_pre)) %>%
print()
# how many responses per question?
cc_rc %>%
filter(year == 2012) %>%
gather(key = item, value = value, starts_with("q_")) %>%
group_by(year) %>%
count(item, value) %>%
filter(value %in% c(0, 1)) %>%
print(n = nrow(.))
# identify other relevant data
# party (x), state (x), district (x), covariates ( )
cc_rc %>%
count(state, state_n, party, dist_n) %>%
spread(key = party, value = n)
count(cc_rc, congdist_pre, dist_n) %>%
print(n = nrow(.))
count(cc, state_pre, congdist_pre) %>%
arrange(congdist_pre)
count(cc, state_pre) %>% print(n = nrow(.))
# ---- covariates from Foster-Molina -----------------------
# 112th Congress is Jan 2011--2013, most appropes for 2012 opinion data?
# concepts:
# income: medianIncome, gini,
# under10k, over10k, over15k, over100k, over150k, over200k
# ed: prcntHS, prcntBA
# race: prcntWhite, prcntWhiteAll, prcntNotHisp,
# presidential vote/partisanship (DIME)
fm_cong <- fm_raw %>%
filter(congNum == 112) %>%
filter(state %in% c("DC", state.abb)) %>%
select(
stateDist,
medianIncome, gini, under10k, over10k, over15k, over100k, over150k, over200k,
prcntHS, prcntBA,
prcntWhite, prcntWhiteAll, prcntNotHisp,
icpsr, state, district, cd, statenm) %>%
mutate(
dist_num = ifelse(is.na(cd),
str_split(stateDist, pattern = "[.]", simplify = TRUE)[,2],
cd) %>%
as.numeric(),
dist_num = ifelse(dist_num == 0, 1, dist_num),
state_dist =
case_when(nchar(stateDist) == 4 ~ str_glue("{state}_0{dist_num}"),
nchar(stateDist) == 5 ~ str_glue("{state}_{dist_num}")) %>%
as.character()
) %>%
rename(state.dist = stateDist,
district_raw = district) %>%
print()
# at-large to equal 1 eventually?
fm_cong %>% select(state.dist, dist_num)
# figure out if at-larges are overlapping with any others?
# ---- covaraites from Dime -----------------------
# district.partisanship, district.pres.vs
# need to match CD and State code
# - fix some district codes to match the rest of the scheme
# are the covariates unique per case
dime_raw %>%
filter(seat == "federal:house") %>%
group_by(cycle, district) %>%
summarize(dist_pres = n_distinct(district.pres.vs),
dist_partisan = n_distinct(district.partisanship)) %>%
count(dist_pres, dist_partisan) %>%
print(n = nrow(.))
# aggregate
dime_cong <- dime_raw %>%
filter(seat == "federal:house") %>%
filter(cycle == 2012) %>%
filter(state %in% c("DC", state.abb)) %>%
mutate(district =
case_when(nchar(district) == 1 ~ str_glue("{state}0{district}"),
nchar(district) == 2 ~ str_glue("{state}{district}"),
TRUE ~ district)) %>%
group_by(state, district) %>%
summarize(past_repvote = unique(district.pres.vs),
past_kernell = unique(district.partisanship)) %>%
ungroup() %>%
mutate(dist_padded = str_sub(district, -2L, -1L),
dist_num = as.numeric(dist_padded),
state_dist = str_glue("{state}_{dist_padded}") %>% as.character()) %>%
rename(statedist = district) %>%
print()
count(dime_cong, dist_padded, dist_num) %>%
print(n = nrow(.))
# how does this handle at-larges?
dime_cong %>%
filter(dist_padded %in% "NA")
# ----------------------------------------------------
# Merge
# ----------------------------------------------------
# ---- district level -----------------------
anti_join(fm_cong, dime_cong) %>%
select(state.dist, state, state_dist, dist_num)
anti_join(dime_cong, fm_cong) %>%
filter(is.na(dist_num) == FALSE) %>%
select(statedist, state, state_dist, dist_num)
d_level <-
inner_join(fm_cong, dime_cong,
by = c("state", "dist_num")) %>%
filter(is.na(cd) == FALSE | is.na(dist_num) == FALSE) %>% # comment to turn "duplicates" on
group_by(state, dist_num) %>% # comment to turn "duplicates" on
sample_n(1) %>% # comment to turn "duplicates" on
print()
fm_cong %>%
filter(state.dist == "OH.8") %>%
pull(cd)
dime_cong %>%
filter(statedist == "OH08") %>%
pull(dist_num)
# duplicate districts in the district data?
# (turn duplicates on to investigate)
d_dupes <- d_level %>%
count(state, dist_num) %>%
filter(n > 1) %>%
print() %$%
str_glue("{state}_{dist_num}") %>%
print()
names(d_level)
# where don't we have unique data?
d_level %>%
filter(str_glue("{state}_{dist_num}") %in% d_dupes) %>%
group_by(state.dist) %>%
nest() %>%
mutate(
dist =
map(data, ~ .x %>%
mutate_all(n_distinct) %>%
select_if(function(x) 2 %in% x)
)
) %>%
unnest(dist) %>%
print()
# some NA for cd (dropped above)
# non-agreeing ICPSR numbers (dropped above)
d_level %>%
filter(str_glue("{state}_{dist_num}") %in% d_dupes) %>%
select(state.dist, icpsr, cd, statenm, medianIncome, gini, prcntBA, prcntWhite, past_kernell) %>%
pull(statenm)
# ---- i_level -----------------------
i_level <- cc_rc %>%
filter(year == 2012) %>%
filter(party %in% c(1, 2)) %>%
select(year, weight, starts_with("q_"), party,
state_nm = state, state = state_abb, state_num = state_n,
dist_num = dist_n) %>%
print()
count(i_level, state)
i_level %>% count(state, dist_num)
# ---- join em -----------------------
anti_join(i_level, d_level, by = c("state", "dist_num"))
anti_join(d_level, i_level, by = c("state", "dist_num"))
# how does this add cases?
joiny <- inner_join(i_level, d_level, by = c("state", "dist_num")) %>%
filter(state != "DC") %>%
ungroup() %>%
mutate(group_num = str_glue("{state}-{dist_num}--{party}") %>%
as.factor() %>%
as.numeric()) %>%
print()
# 435!
joiny %>%
count(state, dist_num)
# elongate the joined data by item
longo <- joiny %>%
gather(key = item_name, value = response, starts_with("q_")) %>%
filter(is.na(response) == FALSE) %>%
mutate(item_num = as.numeric(as.factor(item_name)),
response = as.factor(response),
party = as.factor(party)) %>%
print()
# we're missing partisans from one district in NY,
# ---- create binomial data -----------------------
# create Y data, weighted Y and N for each item response
grouped_responses <- longo %>%
group_by(state, dist_num, party, group_num, item_num) %>%
count(response, wt = weight) %>%
# ungroup() %>%
# complete(state, dist_num, party, group_num, item_num,
# fill = list(n = 0))
rename(y_j = n) %>%
group_by(group_num, item_num) %>%
mutate(n_j = sum(y_j),
n_j = case_when(n_j < 1 ~ 1,
is.na(n_j) ~ 1,
TRUE ~ round(n_j)),
y_j = round(y_j),
y_j = case_when(y_j > n_j ~ n_j,
is.na(y_j) ~ sample(c(0, 1), size = 1),
TRUE ~ y_j)) %>%
ungroup() %>%
filter(response == 1) %>%
mutate(party = as.numeric(party)) %>%
print()
grouped_responses %>% count(n_j) %>% arrange(n_j)
grouped_responses %>% count(y_j)
longo %>%
group_by(state, dist_num) %>%
summarize(parties = n_distinct(party)) %>%
filter(parties == 1)
i_level %>%
filter(state == "NY" & dist_num == 6) %>%
count(party)
# spread out the Ns
# if there are empty cells, make them n = 1
n_spread <- grouped_responses %>%
select(state, dist_num, party, group_num, item_num, n_j) %>%
spread(key = item_num, value = n_j) %>%
mutate_at(vars(-state, -dist_num, -group_num),
function(x) case_when(is.na(x) ~ 1,
TRUE ~ x)) %>%
inner_join(., d_level) %>%
print()
# spread out Ys,
# empty cells are n = 1, make y = 1 with 50% probability
y_spread <- grouped_responses %>%
select(state, dist_num, party, group_num, item_num, y_j) %>%
spread(key = item_num, value = y_j) %>%
mutate_at(vars(-state, -dist_num, -group_num),
function(x) case_when(is.na(x) ~ 1,
TRUE ~ x)) %>%
inner_join(., d_level) %>%
print()
# ---- matrix forms -----------------------
y_matrix <- y_spread %>% select(`1`:`12`) %>% as.matrix() %>% print()
n_matrix <- n_spread %>% select(`1`:`12`) %>% as.matrix() %>% print()
# there should be no 0s or NAs, no Y > N
sum(n_matrix == 0, na.rm = TRUE)
sum(is.na(n_matrix))
sum(is.na(y_matrix))
sum(y_matrix > n_matrix)
# log income, standardize everything
design_matrix <- y_spread %>%
select(medianIncome, gini, prcntBA, prcntWhite, past_kernell) %>%
mutate(median_income_log = log(medianIncome)) %>%
select(-medianIncome) %>%
mutate_all(scale) %>%
print()
ggplot(design_matrix, aes(x = prcntBA, y = median_income_log)) +
geom_point()
bayes_data <- list(
Y = y_matrix,
N = n_matrix,
G = nrow(y_matrix),
J = ncol(y_matrix),
P = n_distinct(longo$party),
S = n_distinct(y_spread$group_num),
party = y_spread$party,
geo = y_spread$group_num,
X = as.matrix(design_matrix),
k = ncol(design_matrix),
prior_mean_party_1 = 0,
prior_mean_party_2 = 0
)
bayes_data %>% lapply(length)
# ---- sampler hyperparameters -----------------------
n_iterations <- 2000
n_warmup <- 1000
n_chains <-
if (parallel::detectCores() < 10)
parallel::detectCores() else 10
n_thin <- 1
# ---- homoskedastic model -----------------------
c_homo <-
stanc(file = here("code", "dgirt", "stan", "cd", "cd-static-homo.stan"))
c_het <-
stanc(file = here("code", "dgirt", "stan", "cd", "cd-static-het.stan"))
(compiled_homo <- stan_model(stanc_ret = c_homo, verbose = TRUE))
(compiled_het <- stan_model(stanc_ret = c_het, verbose = TRUE))
# homoskedastic
cces_homo <-
sampling(object = compiled_homo,
data = bayes_data,
iter = n_iterations,
warmup = n_warmup,
init = 0,
chains = n_chains,
thin = n_thin,
pars = c("theta", "cutpoint", "discrimination", "sigma_in_g",
"theta_hypermean", "scale_theta", "z_theta", "party_int",
"party_coefs"),
# diagnostic_file =
# here(mcmc_dir, "diagnostics-static-noncenter.csv"),
verbose = TRUE)
saveRDS(cces_homo, here("data", "dgirt", "test-static", "mcmc", "static-homo-test.RDS"), compress = TRUE)
box_ul(dir_id = 63723791862,
file = here("data", "dgirt", "test-static", "mcmc", "static-homo-test.RDS"))
# heteroskedastic
cces_het <-
sampling(object = compiled_het,
data = bayes_data,
iter = n_iterations,
warmup = n_warmup,
init = 0,
chains = n_chains,
thin = n_thin,
pars = c("theta", "cutpoint", "discrimination", "sigma_in_g",
"theta_hypermean", "scale_theta", "z_theta", "party_int",
"party_coefs"),
# diagnostic_file =
# here(mcmc_dir, "diagnostics-static-noncenter.csv"),
verbose = TRUE)
beepr::beep(2)
saveRDS(cces_het, here("data", "dgirt", "test-static", "mcmc", "static-het-test.RDS"), compress = TRUE)
box_ul(dir_id = 63723791862,
file = here("data", "dgirt", "test-static", "mcmc", "static-het-test.RDS"))
# box_write(cces_het, "static-het-test.RDS", dir_id = 63723791862, compress = TRUE)
cces_homo <- readRDS(here("data", "dgirt", "test-static", "mcmc", "static-homo-test.RDS"))
cces_het <- readRDS(here("data", "dgirt", "test-static", "mcmc", "static-het-test.RDS"))
g_params <- cces_het %>%
recover_types() %>%
spread_draws(theta_hypermean[g], theta[g], sigma_in_g[g]) %>%
print()
g_params %>%
group_by(g) %>%
nest() %>%
sample_n(20) %>%
unnest() %>%
ggplot(aes(x = .iteration, y = theta)) +
geom_line(aes(color = as.factor(.chain)),
show.legend = FALSE) +
facet_wrap(~ g)
thetas <- cces_het %>%
tidy(conf.int = TRUE) %>%
mutate(index = parse_number(term),
par = str_split(term, pattern = "\\[", simplify = TRUE)[,1]) %>%
filter(par == "theta") %>%
left_join(y_spread, by = c("index" = "group_code")) %>%
print()
# what's up with the party swapping?
ggplot(thetas, aes(x = index, y = estimate)) +
geom_pointrange(aes(ymin = conf.low, ymax = conf.high,
color = as.factor(party)))
ggplot(thetas, aes(x = rank(estimate), y = estimate)) +
geom_pointrange(aes(ymin = conf.low, ymax = conf.high),
shape = 21, fill = "white") +
coord_flip() +
labs(y = TeX("$\\theta_g$"), x = "Rank")
ggplot(thetas, aes(x = medianIncome, y = estimate)) +
geom_pointrange(aes(ymin = conf.low, ymax = conf.high,
color = as.factor(party)))
# compare thetas: is the regression good
compare_thetas <- cces_het %>%
tidy(conf.int = TRUE) %>%
mutate(index = parse_number(term),
par = str_split(term, pattern = "\\[", simplify = TRUE)[,1]) %>%
inner_join(. %>% filter(par == "theta"),
. %>% filter(par == "theta_hypermean"),
by = "index")
print()
|
32359d2639d623775b740d102afb70d8aed8c541 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/geomorph/examples/gpagen.Rd.R | ff7129cf0539b9a74ba120bb03157c9e6f0cf5fd | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 948 | r | gpagen.Rd.R | library(geomorph)
### Name: gpagen
### Title: Generalized Procrustes analysis of points, curves, and surfaces
### Aliases: gpagen
### Keywords: analysis
### ** Examples
# Example 1: fixed points only
data(plethodon)
Y.gpa <- gpagen(plethodon$land,PrinAxes=FALSE)
summary(Y.gpa)
plot(Y.gpa)
# Example 2: points and semilandmarks on curves
data(hummingbirds)
###Slider matrix
hummingbirds$curvepts
# Using Procrustes Distance for sliding
Y.gpa <- gpagen(hummingbirds$land,curves=hummingbirds$curvepts)
summary(Y.gpa)
plot(Y.gpa)
# Using bending energy for sliding
Y.gpa <- gpagen(hummingbirds$land,curves=hummingbirds$curvepts,ProcD=FALSE)
summary(Y.gpa)
plot(Y.gpa)
# Example 3: points, curves and surfaces
data(scallops)
# Using Procrustes Distance for sliding
Y.gpa <- gpagen(A=scallops$coorddata, curves=scallops$curvslide, surfaces=scallops$surfslide)
# NOTE can summarize as: summary(Y.gpa)
# NOTE can plot as: plot(Y.gpa)
|
0437a5d43535be85d6fcc077ce1b631a22b69061 | c12b1b8dcc20cad0a081efc2e95c4efd78f4233b | /orestar_scrape/checkFilesForDlLimit.R | 3e4dff22224bdb5fa590a35ad98b1895446644ce | [] | no_license | stochasticTreat/hackOregonBackEnd | e200430deae0cd48587c3b14681ba6d934799431 | b5bea18920b1c2fb457436bab7ca4ed4c06b01df | refs/heads/master | 2016-09-15T23:19:21.395819 | 2015-01-26T05:09:37 | 2015-01-26T05:09:37 | 22,177,737 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,209 | r | checkFilesForDlLimit.R | #!/usr/bin/Rscript
#check all files for dl limit
setwd("~/data_infrastructure/orestar_scrape/")
source('./runScraper.R')
args <- commandArgs(trailingOnly=TRUE)
fromXLS=args[1]
indir="./"
destDir = "./transConvertedToTsv/"
if(is.null(fromXLS)) fromXLS="txt"
if(is.na(fromXLS)) fromXLS="txt"
if(fromXLS == "xls"){
cat("\nLoading xls files from the current working directory..\n")
fromXLS=TRUE
}else{
cat("\nLoading tsv or txt files from the './transConvertedToTsv/' directory\n")
fromXLS=FALSE
}
if(fromXLS){
converted = importAllXLSFiles(remEscapes=T,
grepPattern="^[0-9]+(-)[0-9]+(-)[0-9]+(_)[0-9]+(-)[0-9]+(-)[0-9]+(.xls)$",
remQuotes=T,
forceImport=T,
indir=indir,
destDir=destDir)
}
fileDir = destDir
converted = dir(fileDir)
converted = converted[grepl(pattern=".txt$|.tsv$", x=converted)]
converted = paste0(fileDir, converted)
checkHandleDlLimit(converted=converted)
tranTableName="raw_committee_transactions"
dbname="hackoregon"
transactionsFolder="./transConvertedToTsv/"
scrapedTransactionsToDatabase(tsvFolder=transactionsFolder,
tableName=tranTableName,
dbname=dbname)
|
39956425db986fe6e0ef8392388bc87203323ca9 | ac52858e5a2e6b23c2048d50279c3a612e55a2ed | /gapminder analysis.R | 402b5b78c4792c14fe2222d4b5bfa7dbad28d4c2 | [] | no_license | atindaana/swc_workshop | 1ad58f4f9a4e7edb06c0ba04c32e9f24b8e9cbc3 | 1038e0ecfac28477c1db94db74e477aafba962dc | refs/heads/master | 2021-01-11T19:21:01.499266 | 2017-01-18T20:48:21 | 2017-01-18T20:48:21 | 79,361,677 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,313 | r | gapminder analysis.R | download.file("https://raw.githubusercontent.com/swcarpentry/r-novice-gapminder/gh-pages/_episodes_rmd/data/gapminder-FiveYearData.csv", destfile = "gapminder-FiveYearData.csv")
gapminder <- read.csv("gapminder-FiveYearData.csv")
#head
#ncol
#nrow
#summary
#view
is_africa <- gapminder@continent == "Africa"instal
is_2007 <- gapfinder$year == 2007
africa_2007 <- gapminder [is_2007 & is_africa, c("country","lifeExp")]
#how to make plots with ggplot2
ggplot(data=gapminder, aes(x=gdpPercap, y=lifeExp))+geom_point()
ggplot(data=gapminder, aes(x=year, y=lifeExp))+geom_point()
#plotting with points on top of the lines
ggplot(data=gapminder, aes(x=year, y=lifeExp, by = country, color = continent))+geom_line()+geom_point()
#to get the size to be as big as the gdpPercap
ggplot(data=gapminder, aes(x=year, y=lifeExp, size =gdpPercap, by = country, color = continent))+geom_line()+geom_point()
#to add line color line
ggplot(data=gapminder, aes(x=year, y=lifeExp, size =gdpPercap, by = country, color = continent))+geom_line(color="black")+geom_point(aes(size=gdpPercap))+
facet_grid(.~continent)
#note from carpentary workshop modified but code remains the same
#facet_grid feater add to catalog the plot defined by continent
ggsave(filename = "year_vs_lifeexp_percont.png", width = 5, height = 4, units = "in")
|
2ec23e35ea8764e6160864505c39211b1a63d58e | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/clere/man/fitPacs.Rd | 92858b503201be2fea3f2c7ce9475a142e24a1bb | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,803 | rd | fitPacs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitPacs.R
\name{fitPacs}
\alias{fitPacs}
\title{fitPacs function}
\usage{
fitPacs(Y, X, lambda = 0.5, betaInput, epsPACS = 1e-05, nItMax = 1000)
}
\arguments{
\item{Y}{[numeric]: The vector of observed responses - size \code{n}.}
\item{X}{[matrix]: The matrix of predictors - size \code{n} rows and
\code{p} columns.}
\item{lambda}{[numeric]: A non-negative penalty term that controls
simultaneouly clusetering and sparsity.}
\item{betaInput}{[numeric]: A vector of initial guess of the model
parameters. The authors suggest to use coefficients obtained after fitting a
ridge regression with the shrinkage parameter selected using AIC criterion.}
\item{epsPACS}{[numeric]: A tolerance threshold that control the convergence
of the algorithm. The default value fixed in Bondell's initial script is
1e-5.}
\item{nItMax}{[numeric]: Maximum number of iterations in the algorithm.}
}
\value{
Object of class \code{\linkS4class{Pacs}} containing all the input
parameters plus parameter \code{a0} the intercept and parameter \code{K} the
dimensionality of the model.
}
\description{
This function implements the PACS (Pairwise Absolute Clustering and
Sparsity) methodology of Sharma DB et al. (2013). This methodology proposes
to estimate the regression coefficients by solving a penalized least squares
problem. It imposes a constraint on Beta (the vector of regression
coefficients) that is a weighted combination of the L1 norm and the pairwise
L-infinity norm. Upper-bounding the pairwise L-infinity norm enforces the
covariates to have close coefficients. When the constraint is strong
enough, closeness translates into equality achieving thus a grouping
property. For PACS, no software was available. Only an R script was
released on Bondell's webpage
(http://www4.stat.ncsu.edu/~bondell/Software/PACS/PACS.R.r). Since this R
script was running very slowly, we decided to reimplement it in C++ and
interfaced it with the present R package clere. This corresponds to the
option \code{type=1} in Bondell's script.
}
\examples{
n <- 100
p <- 20
Beta <- rep(c(0,2),10)
eps <- rnorm(n,sd=3)
x <- matrix(rnorm(n*p), nrow = n, ncol = p)
y <- as.numeric(10+x\%*\%Beta+eps)
bInit <- lm(y~scale(x))$coefficients[-1]
mod <- fitPacs(Y=y,X=x,lambda=1.25,betaInput=bInit,epsPACS=1e-5,nItMax=1000)
}
\seealso{
Overview : \code{\link{clere-package}} \cr
Classes : \code{\linkS4class{Clere}}, \code{\linkS4class{Pacs}} \cr
Methods : \code{\link{plot}}, \code{\link{clusters}}, \code{\link{predict}}, \code{\link{summary}} \cr
Functions : \code{\link{fitClere}}, \code{\link{fitPacs}}
Datasets : \code{\link{numExpRealData}}, \code{\link{numExpSimData}}, \code{\link{algoComp}}
}
|
daa1bc29897f15a82c7d00ebd682770796d080f6 | 5d3c70de73ace63e203bfb3368d9378d726ee401 | /R/eclass.R | 15f46edd35f42476d66918a1f7b4d91d5d7f96d5 | [
"MIT"
] | permissive | yongcha/entroPD | fa528c3b90edd7495f9b5084a2f5301d5f1ce52c | 8c8fb69ac7b6e13afaa360a1b6367c7d5119649d | refs/heads/master | 2021-01-15T13:44:26.088328 | 2017-08-18T04:49:21 | 2017-08-18T04:49:21 | 25,089,007 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,281 | r | eclass.R | #' Equivalence Class
#'
#' @param dat \code{qigrp} object
#' @param QI Quasi-Identifiers
#' @param TA Target Attribute
#' @param ...
#' @return Equivalence Class and Size
eclass <- function(dat, QI, TA, ...){
# Description : Equivalence Class and Size
#
# Arguments
# dat : QIgrouping Data Object
# QI : QI
# TA : TA
# 필요 패키지 불러오기
require(reshape2)
require(pbapply)
equi.res <- pblapply(1:length(dat), function(x){
data.QI.sort <- dat[[x]][order(dat[[x]][, QI]), ]
# QI grouping 결과 데이터를 QI마다 ordering을 시킴
# 즉, equivalence class끼리 볼 수 있도록 하는 것임
rownames(data.QI.sort) <- NULL
# equivalence class size 산출
equi.class <- melt(data.QI.sort,
id.vars = QI,
measure.vars = TA)
equi.class <- dcast(equi.class, ... ~ variable, length)
colnames(equi.class)[ncol(equi.class)] <- 'equi.size'
attr(equi.class, 'NequiClass') <- nrow(equi.class)
attr(equi.class, 'equi.size') <- equi.class$equi.size
equi.class
})
# class 지정
class(equi.res) <- c('eclass', 'list')
# Attributes 추가
attr(equi.res, 'n') <- length(equi.res) # equivalence class의 갯수
return(equi.res)
} |
c822e79b8e5059373f6f529a638a28fc6c5c8ab4 | 5217d14779a01179bfd440b689a4aea067d9e043 | /ariticles/draw_logistic.R | 37356b41c612ecb79d1ba7d7a515fb91bd1b0bca | [
"MIT"
] | permissive | CFWLoader/supreme-bassoon | f0a960a29cf052b76d5b898b4b4151776efc7536 | f20e45118a141084a0fb0d640e937e0e739cc3f6 | refs/heads/master | 2020-03-07T05:49:04.731876 | 2019-04-10T03:43:48 | 2019-04-10T03:43:48 | 127,306,468 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 431 | r | draw_logistic.R | library(ggplot2)
script.dir <- dirname(sys.frame(1)$ofile)
setwd(script.dir)
xseq <- seq(-20, 20, 0.01)
px = dlogis(xseq, location = 1.1)
cdfx = plogis(xseq, location = 1.1)
# print(px)
plt.df <- data.frame(x = xseq, y = px)
ggplot(plt.df, aes(x = x, y = y)) + geom_line()
ggsave("./logis-pdf.png")
cdf.plt.df <- data.frame(x = xseq, y = cdfx)
ggplot(cdf.plt.df, aes(x = x, y = y)) + geom_line()
ggsave("./logis-cdf.png") |
b0ca5cb1fc0631f47331e0b8117701c2fdbbaaeb | 9da5c27a28f8e8be4ed3ec8f7b8f641a438ea7c5 | /04_pivot_longer.R | 2cbe324cd71c2289ee6f218db3dbbd1455940c16 | [] | no_license | delany-ramirez/Curso_tidyverse | 9f3309d07d5ac34bdfc723e7465489f0e72af663 | 3fec82e91da6da0ed32a6fabd338c3be754a24b3 | refs/heads/master | 2023-06-08T08:39:20.359060 | 2021-06-20T00:39:04 | 2021-06-20T00:39:04 | 259,063,503 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,919 | r | 04_pivot_longer.R | ## DataMakers
## Uso del pivot_longer (o spread)
## Referencias: https://tidyr.tidyverse.org/articles/pivot.html
## Carga de librerías
library(tidyverse)
## Carga de datos
animales <- read_csv('data/04_pivot_longer.csv')
animales
## Uso del pivot_longer
animales %>%
pivot_longer(cols = c(Peso, Talla),
names_to = "Medida",
values_to = "Valor")
## Uso del gather (Versión obsoleta* del pivot_longer)
animales %>%
gather(key = "Medida", value = "Valor", Peso:Talla)
animales %>%
gather(c(Peso, Talla),
key = "Variable",
value = "Valor") %>%
as_tibble()
## Uso del gather (Versión obsoleta* del pivot_longer)
animales %>%
pivot_longer(cols = -Especie,
names_to = "Medida",
values_to = "Valor")
## Argumento values_drop_na
animales %>%
pivot_longer(cols = c(Peso, Talla),
names_to = "Medida",
values_to = "Valor",
values_drop_na = TRUE)
## Uso de prefijos en las columnas
billboard
billboard %>%
pivot_longer(
cols = starts_with("wk"),
names_to = "week",
names_prefix = "wk",
names_transform = list(week = as.integer),
values_to = "rank",
values_drop_na = TRUE,
)
## Múltiples variables en el nombre de la columna
who
who %>% pivot_longer(
cols = new_sp_m014:newrel_f65,
names_to = c("diagnosis", "gender", "age"),
names_pattern = "new_?(.*)_(.)(.*)",
values_to = "count",
values_drop_na = TRUE
)
## Múltiples observaciones por fila
### Ejemplo 1
animales2 <- read_csv('data/04_pivot_longer2.csv')
animales2 %>%
pivot_longer(!Fecha,
names_to = c(".value", "animal"),
names_sep = "_",
values_drop_na = TRUE)
### Ejemplo 2
anscombe
anscombe %>%
pivot_longer(everything(),
names_to = c(".value", "set"),
names_pattern = "(.)(.)"
)
|
0c8d86852774159e87ae1bf0e2043852ab6ff4d2 | 63583ae3e54e8b23ad0e160eade38af25db91964 | /plotting.r | dd474318141822b530d6f8876a9bc6847704f2d5 | [] | no_license | hamburger1984/fahrrad | f7a792061a901e926c3345aacb3ef64ef3ecad79 | 5d76117bf5b3b487a4d583121e806cab0be35942 | refs/heads/master | 2021-01-18T23:22:37.534350 | 2017-02-04T15:34:22 | 2017-02-04T15:34:22 | 10,343,294 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,869 | r | plotting.r | if(!require(lubridate)){
install.packages("lubridate")
}
if(!require(ggplot2)){
install.packages("ggplot2")
}
# --- reading, converting
library(lubridate)
df <- read.csv("stats.csv", sep=",", dec=".", comment.char="#")
df$date <- as.Date(df$date, "%d.%m.%Y")
df$day <- wday(df$date, label=TRUE)
df$year <- format(df$date, "%Y")
df$year_month <- format(df$date, "%Y-%m")
df$km.day <- as.numeric(as.character(df$km.day))
df$time.day <- hms(df$time.day)
df$time.day.hours <- period_to_seconds(df$time.day)/3600
df$V_mean <- as.numeric(as.character(df$V_mean))
df$V_max <- as.numeric(as.character(df$V_max))
df$total.time <- hms(df$total.time)
df$total.time.hours <- period_to_seconds(df$total.time)/3600
# ..debug data..
#lapply(df, class)
#str(df)
#which(is.na(df$V_max))
#df[rowSums(is.na(df)) > 0,]
# --- plotting
library(ggplot2)
png("stats_%d.png", width=2200, height=1000, res=120)
# ..distance related
ggplot(df, aes(x=date, y=total.km, group=bike, color=bike, fill=bike)) +
geom_point() +
geom_line(alpha=.35) +
ggtitle("Total distance") +
labs(x="Date", y="Total distance\n(km)") +
facet_wrap(bike~year, scales="free")
ggplot(df, aes(x=date, y=km.day, group=bike, color=bike, fill=bike)) +
geom_bar(stat="identity") +
#geom_area(alpha=.35) +
ggtitle("Daily distance") +
labs(x="Date", y="Distance\n(km)") +
facet_wrap(bike~year, scales="free_x")
ggplot(df, aes(day, km.day, fill=bike, color=bike)) +
geom_violin(adjust=.75, alpha=.35) +
ggtitle("Daily distance by day of week") +
labs(x="Day of week", y="Distance\n(km)") +
facet_wrap(bike~year)
# ..time related
ggplot(df, aes(x=date, y=total.time.hours, group=bike, color=bike, fill=bike)) +
geom_point() +
geom_line(alpha=.35) +
ggtitle("Total time") +
labs(x="Date", y="Total time\n(hours)") +
facet_wrap(bike~year, scales="free")
ggplot(df, aes(x=date, y=time.day.hours, group=bike, color=bike, fill=bike)) +
geom_bar(stat="identity") +
#geom_area(alpha=.35) +
ggtitle("Daily time") +
labs(x="Date", y="Time\n(hours)") +
facet_wrap(bike~year, scales="free_x")
ggplot(df, aes(day, time.day.hours, fill=bike, color=bike)) +
geom_violin(adjust=.75, alpha=.35) +
ggtitle("Daily time by day of week") +
labs(x="Day of week", y="Time\n(hours)") +
facet_wrap(bike~year)
# ..speed related
ggplot(df, aes(day, V_max, fill=bike, color=bike)) +
geom_violin(adjust=.75, alpha=.35) +
ggtitle("Max. speed by day of week") +
labs(x="Day of week", y="Speed\n(km/h)") +
facet_wrap(bike~year)
ggplot(df, aes(day, V_mean, fill=bike, color=bike)) +
geom_violin(adjust=.75, alpha=.35) +
ggtitle("Mean speed by day of week") +
labs(x="Day of week", y="Speed\n(km/h)") +
facet_wrap(bike~year)
|
fec1ba0cc0b813727a9ed0bf1dd3d02ac7e60951 | 9ba53ca4e99bafc6957cd1c2e982abec9ac12283 | /1 linear regression.R | 5b88ef0af30db93537694f3b582a0d64d8abba27 | [] | no_license | HeathRossie/programDemos | f16cf564e261c68cf63357d364426fbf32c48075 | 84c08faa3dacdb602b869ca1a9f6119c6d62dbc0 | refs/heads/main | 2023-06-19T15:42:24.481903 | 2021-07-16T06:15:18 | 2021-07-16T06:15:18 | 386,529,875 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,269 | r | 1 linear regression.R | #----- stan 練習 1 -----#
#-----線形回帰------#
library(rstan)
library(ggplot2)
x <- runif(100, 1, 100)
y <- x * 2/3 + 220 + rnorm(length(x), 0, 10)
ggplot() + geom_point(aes(x=x,y=y))
stan_code <- '
data{
int<lower=1> N;
real x[N];
real y[N];
}
parameters{
real beta1;
real beta2;
real<lower=0> sigma;
}
transformed parameters{
real yhat[N];
for(i in 1:N)
yhat[i] <- beta1 + beta2 * x[i];
}
model{
for(i in 1:N)
y[i] ~ normal(yhat[i], sigma);
}
'
dat_list <- list(x = x, y = y, N = length(x))
fit <- stan(model_code = stan_code, data = dat_list,
iter = 110, warmup = 10, chain = 1)
d <- extract(fit)
traceplot(fit)
library(gridExtra)
p1 <- ggplot() + geom_density(aes(x=d$beta1))
p2 <- ggplot() + geom_density(aes(x=d$beta2))
grid.arrange(p1, p2, ncol = 2)
pred <- NULL
for(i in 1:10000){
for(j in c(min(x),max(x))){
pred <- c(pred, d$beta1[i] + d$beta2[i] * j)
}
}
predc <- data.frame(pred = pred, iter = rep(1:10000, each = 2),
x = rep(c(min(x),max(x)), 10000))
ggplot() +
geom_line(data = predc, aes(x=x, y=pred, group=iter), colour = "pink", lwd = 1, alpha=.004) +
geom_point(aes(x=x,y=y)) +
geom_abline(aes(intercept=mean(d$beta1), slope=mean(d$beta2)), colour = "red", lwd = 1.3)
|
95791807329866c47b518de2f0d941237b68dae8 | f479db1915de128f5e162ce4515e2eb524d05bac | /R/D3Scatter.R | 3a140fe6d2c80124bdc8ddc161cf88bf515fe787 | [
"MIT"
] | permissive | gusef/d3Toolbox | e7d64635bee5ea25f41e56eec17a53d5ae2fc66e | a752880a8acca6a12d396097effa8b7d31f7e3ee | refs/heads/master | 2021-08-28T18:05:27.217405 | 2021-08-12T23:41:55 | 2021-08-12T23:41:55 | 100,711,108 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,723 | r | D3Scatter.R | #' Scatterplot based on d3
#'
#'
#'
#' @import htmlwidgets
#'
#' @export
d3Scatter <- function(data, col='black', dotsize =3.5, xlab='', ylab='',
title=NULL, subtitle=NULL, callback='ScatterSelection',
tooltip=NULL, legend=NULL,legend_title=NULL,legend_pos='topright',
legend_right_offset = 100, width = NULL, height = NULL,
xrange=NULL, yrange=NULL, margins = NULL,
col_scale = RColorBrewer::brewer.pal(11,"RdBu")[11:1],
elementId = NULL, collection = FALSE) {
if (is.null(margins)){
margins <- list(top = 40,
right = 20,
bottom = 50,
left = 60)
}
#if a numeric value was provided use the color scale to transform
if (is.numeric(col)){
if (length(col) != nrow(data)){
stop('If "col" is numeric there has to be a value for each data row')
}
breaks <- seq(min(col, na.rm = T),
max(col, na.rm = T),
length = length(col_scale) + 1 )
grps <- cut(col, breaks = breaks, include.lowest = TRUE)
col <- col_scale[grps]
}else{
#Fix the coloring
col <- gplots::col2hex(col)
if(length(col)==1){
col <- rep(col,nrow(data))
}
}
data$col <- col
if (!is.null(xrange) && length(xrange) != 2){
stop("If xrange is specified it needs to have a length of 2")
}
if (!is.null(yrange) && length(yrange) != 2){
stop("If yrange is specified it needs to have a length of 2")
}
#Add names as separate column instead of rownames
data$name <- rownames(data)
rownames(data) <- NULL
#fix the labels
if (xlab==''){
xlab <- names(data)[1]
}
if (ylab==''){
ylab <- names(data)[2]
}
names(data)[1:2] <- c('x','y')
#figure out legend positioning
if (!is.null(legend)){
if (legend_pos == 'topleft'){
legend_pos <- c(0,0)
}else if (legend_pos == 'top'){
legend_pos <- c(0,1)
}else if (legend_pos == 'topright'){
legend_pos <- c(0,2)
}else if (legend_pos == 'right'){
legend_pos <- c(1,2)
}else if (legend_pos == 'bottomright'){
legend_pos <- c(2,2)
}else if (legend_pos == 'bottom'){
legend_pos <- c(2,1)
}else if (legend_pos == 'bottomleft'){
legend_pos <- c(2,0)
}else if (legend_pos == 'left'){
legend_pos <- c(1,0)
}else{
stop('legend_pos needs to be "top","right","left","bottom","bottomright",..')
}
}
# forward options using x
x = list(
type = "d3Scatter",
data = data,
dotsize = dotsize,
xlab = xlab,
ylab = ylab,
xrange = xrange,
yrange = yrange,
title = title,
subtitle = subtitle,
tooltip=tooltip,
legend=legend,
legend_title=legend_title,
legend_pos=legend_pos,
legend_right_offset=legend_right_offset,
margins=margins,
callback = callback
)
if (collection){
return(x)
}else{
# create widget
htmlwidgets::createWidget(
name = 'd3Scatter',
x,
width = width,
height = height,
package = 'd3Toolbox',
elementId = elementId,
sizingPolicy = htmlwidgets::sizingPolicy(browser.fill = TRUE)
)
}
}
#' Shiny bindings for d3Scatter
#'
#' Output and render functions for using D3Scatter within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a D3Scatter
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @name d3Scatter-shiny
#'
#' @export
d3ScatterOutput <- function(outputId, width = '100%', height = '400px'){
htmlwidgets::shinyWidgetOutput(outputId, 'd3Scatter', width, height, package = 'd3Toolbox')
}
#' @rdname d3Scatter-shiny
#' @export
renderd3Scatter <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, d3ScatterOutput, env, quoted = TRUE)
}
|
2018b1d5b37d81f92b4d75f88796d173b78d8bbe | 3945388ee0fef9e4f99b2c0b4cd49e4fc58082b5 | /StreamNetworkTools/R/net_calc.r | 5f1b2ce856c09a032043363173cb32f4d8c95eac | [
"MIT"
] | permissive | dkopp3/StreamNetworkTools | 7ea52d4c917bbcf02314a603a3f9b6d5c5b926b5 | 7c693f3edc975493be946d400642bd99c1d9d809 | refs/heads/master | 2023-06-23T09:58:49.678987 | 2023-06-09T18:44:18 | 2023-06-09T18:44:18 | 140,187,794 | 3 | 1 | MIT | 2021-01-22T18:59:07 | 2018-07-08T17:19:10 | R | UTF-8 | R | false | false | 5,725 | r | net_calc.r | #' Network Topology Metrics
#'
#' Calculates stream network topology metrics
#'
#' Requires /NHDPlusAttributes directory (see \code{\link{net_nhdplus}})
#'
#' Length and area measures are scaled by M values
#'
#' @param netdelin output from \code{net_delin}
#' @param vpu NHDPlusV2 Vector Processing Unit
#' @param nhdplus_path Directory for NHDPlusV2 files (\code{\link{net_nhdplus}})
#'
#' @return \code{data.frame}: \code{$group.comid} stream network root COMID;
#' \code{$vpu} NHDPlusV2 vector processing unit;\code{M} Position of sampling
#' point on COMID, as proportion of COMID from upstream end; \code{WS.ord}
#' strahler order for root node;\code{$head.h2o} number of headwater reaches;
#' \code{$trib.jun} number of tributary junctions; \code{reach.cnt} number of
#' reaches in network; \code{diver.cnt} count of divergent flow paths;
#' \code{$AREASQKM} drainage area (km^2); \code{$LENGTHKM} total lenght of
#' network flowlines (km); \code{drain.den} drainage density (\code{LENGTHKM}
#' / \code{AREASQKM})
#'
#' @examples
#' # identify NHDPlusV2 COMID
#' a <- net_sample(nhdplus_path = getwd(), vpu = "01", ws_order = 6, n = 5)
#' # delineate stream network
#' b <- net_delin(group_comid = as.character(a[,"COMID"]), nhdplus_path = getwd(), vpu = "01")
#' calculate topology summary
#' c <- net_calc(netdelin = b, vpu = "01", nhdplus_path = getwd())
#' @export
net_calc <- function(netdelin, vpu, nhdplus_path){
directory <- grep(paste(vpu, "/NHDPlusAttributes", sep = ""),
list.dirs(nhdplus_path, full.names = T),
value = T)
Vaa <- grep("PlusFlowlineVAA.dbf",
list.files(directory[1], full.names = T),
value = T)
slope <- grep("elevslope.dbf",
list.files(directory, full.names = T),
value = T)
flow.files <- grep("PlusFlow.dbf",
list.files(directory[1], full.names = T),
value = T)
flow <- foreign::read.dbf(flow.files)
vaa <- foreign::read.dbf(Vaa)
slope <- foreign::read.dbf(slope)
names(slope) <- toupper(names(slope))
names(vaa) <- toupper(names(vaa))
full.net <- unique(netdelin$Network)
reach.data <- Reduce(function(x, y)
merge(x, y, by.x = "net.comid", by.y = "COMID", all.x = T),
list(full.net, vaa, slope))
#calculate network order
WS.ord <- reach.data[as.character(reach.data[,"group.comid"]) ==
as.character(reach.data[,"net.comid"]),
c("net.id","M", "STREAMORDE")]
names(WS.ord) <- c("net.id", "M", "WS.ord")
#catchemnts catchment area
#group by, substract, multiply
#value at end of flowline
cat.area <- aggregate(reach.data[, c("AREASQKM", "LENGTHKM")],
by = list(net.id = reach.data[, "net.id"],
group.comid = reach.data[,"group.comid"]),
sum)
incr <- reach.data[as.character(reach.data[,"group.comid"]) ==
as.character(reach.data[,"net.comid"]),
c("net.id", "AREASQKM","LENGTHKM", "M")]
incr <- merge(incr, cat.area, by = "net.id")
area <- (incr[,"AREASQKM.y"] - incr[,"AREASQKM.x"]) + incr[,"AREASQKM.x"]*incr[,"M"]
len <- (incr[,"LENGTHKM.y"] - incr[,"LENGTHKM.x"]) + incr[,"LENGTHKM.x"]*incr[,"M"]
#scaled length and catchment vlaues
cat.area <- data.frame(net.id = incr[,"net.id"],
AreaSQKM = area, LengthKM = len)
drain.den <- cat.area[ ,"LengthKM"] / cat.area[ ,"AreaSQKM"]
cat.area <- data.frame(cat.area, drain.den)
#diversion feature count
#counts minor flow paths of divergences
if (any(reach.data[,c("STREAMORDE")] !=
reach.data[,"STREAMCALC"] &
reach.data[,"DIVERGENCE"]==2)){
div.rm <- reach.data[reach.data[,c("STREAMORDE")] !=
reach.data[,"STREAMCALC"] &
reach.data[, "DIVERGENCE"] == 2,
c("net.id", "net.comid", "group.comid")]
diver.cnt <- aggregate(div.rm[, "group.comid"],
by = list(div.rm[,"net.id"]),
length)
names(diver.cnt) <- c("net.id", "diver.cnt")
} else {
diver.cnt <- data.frame(net.id = 99999, diver.cnt = 999999)
}
#headwaters & Tribs
head.h2o <- aggregate(reach.data[
reach.data[,"STARTFLAG"] == 1, "STREAMORDE"],
by = list(reach.data[reach.data[,"STARTFLAG"] == 1, "net.id"]),
length)
names(head.h2o) <- c("net.id", "head.h2o")
trib.jun <- as.numeric(as.character(head.h2o[, "head.h2o"])) - 1
head.h2o <- data.frame(head.h2o, trib.jun)
#edge count
edges <- head.h2o[,"head.h2o"] + head.h2o[,"trib.jun"]
reach.cnt <- data.frame(net.id = head.h2o[,"net.id"], reach.cnt = edges)
#relief - at outlet; I want to move this to basin metric
#maxelev <- aggregate(reach.data[,"MAXELEVSMO"],
# by = list(reach.data[,"group.comid"]),
# max)
#minelev <- aggregate(reach.data[, "MINELEVSMO"],
# by = list(reach.data[, "group.comid"]),
# min)
#relief <- maxelev[,"x"]-minelev[,"x"]
#relief <- data.frame(COMID = maxelev[,"Group.1"],
# maxelev = maxelev[,"x"],
# minelev = minelev[,"x"],
# releif = relief)
#aggregate table for summaries of group comid
data.out <- unique(full.net[, c("net.id","group.comid", "vpu")])
names(data.out)[2] <- "COMID"
data.out <- Reduce(function(x, y)
merge(x, y, by = "net.id", all.x = T),
list(data.out, WS.ord,head.h2o, reach.cnt, diver.cnt, cat.area))#, relief))
names(data.out)[2] <- "group.comid"
return(data.out)
}
|
d8c9009e40cf466d317427cf756d485a4f207ba7 | 3032653f72fc73183309bbc17b687600a4115873 | /code/code.main/scripts-easydiff.r | 853267ccf3a69ec6eb561b70e2da73c384a9f464 | [] | no_license | NYU-BFX/hic-bench | 59b2f21b18335ff3fc5c55cf2ee1004775e50e48 | 1d9efa8bb0265e3eef3c328434133c49584ecfcc | refs/heads/master | 2023-02-15T12:26:52.246291 | 2023-01-30T01:44:21 | 2023-01-30T01:44:21 | 53,605,035 | 45 | 21 | null | 2020-04-11T22:55:09 | 2016-03-10T17:49:42 | HTML | UTF-8 | R | false | false | 14,333 | r | scripts-easydiff.r | #!/usr/bin/Rscript
# global variables
VERSION = '1.0'
normalize_matrix <- function(D)
{
D_norm <- normalize.quantiles(D);
dimnames(D_norm) <- dimnames(D);
return(D_norm);
}
calc_fdr_cutoff <- function(pos,neg,fdr)
{
if (fdr<=0) return(Inf);
pos <- sort(pos);
neg <- sort(neg);
kpos <- 1;
kneg <- 1;
while ((kpos<=length(pos))&(kneg<=length(neg))) {
if ((length(neg)-kneg+1)/(length(pos)-kpos+1)<=fdr) { break; }
if (pos[kpos]<neg[kneg]) { kpos <- kpos+1; }
else if (pos[kpos]>neg[kneg]) { kneg <- kneg+1; }
else { kpos <- kpos+1; kneg <- kneg+1; }
}
if (kpos>length(pos)) { y <- 1.01*pos[length(pos)] }
else { y <- pos[kpos]; }
return(y);
}
calc_fdr_cutoff_with_bins <- function(values,obs_scores,exp_scores,fc_cutoff,fdr,fdr_bin_size)
{
# values: values (e.g. RPKMs) as a function of which the fdr will be computed
# obs_scores: observed scores (e.g. fold-changes across samples)
# exp_scores: scores expected by chance (e.g. fold-changes within replicates)
# fc_cutoff: minimum required fold change
# fdr: false discovery rate cutoff
# fdr_bin_size: minimum number of instances per bin where FDR will be computed (as a function of value)
n = length(values)
ivalues = order(values)
value_bin = c()
t_cutoff = c()
t_score = rep(0,n)
m = 2 # smoothing parameter, number of micro-bins per bin = 2m+1
mbin_size = fdr_bin_size/(2*m+1) # micro-bin size
mbin_starts = seq(1,n,by=mbin_size)
k = 1
for (s in mbin_starts) {
imbin = ivalues[s:min(n,s+mbin_size-1)]
bin = max(1,s-m*mbin_size):min(n,s+(m+1)*mbin_size-1) # flank microbin by m*mbin_size
ibin = ivalues[bin] # values inside bin
t_bin_cutoff = max(calc_fdr_cutoff(obs_scores[ibin],exp_scores[ibin],fdr),fc_cutoff) # cutoff is determined using all values in bin
t_score[imbin] = obs_scores[imbin]/t_bin_cutoff # score is only updated in micro-bin
t_cutoff = c(t_cutoff,t_bin_cutoff,t_bin_cutoff)
value_bin = c(value_bin,min(values[imbin]),max(values[imbin]))
k = k + 1
if (k%%10==0) write(paste('* ',round(100*k/length(mbin_starts),0),'% complete',sep=''),stderr())
}
return(list(value_bin=value_bin,t_cutoff=t_cutoff,t_score=t_score))
}
score <- function(x,y,method) # method = { 'mean', 'paired', 'pairwise' }
{
if (method=='paired') {
s = mean(x/y)
} else if (method=='mean') {
s = mean(x)/mean(y)
} else if (method=='pairwise') {
if (length(y)==0) {
z = combn(x,2)
# s = mean(c(z[1,]/z[2,],z[2,]/z[1,]))
s = mean(z[1,]/z[2,])
} else {
z = expand.grid(x,y)
s = mean(z[,1]/z[,2])
}
} else {
s = NA
}
return(s)
}
my_ttest <- function(x,y,alternative,paired)
{
return(0) # TODO: fix this
t.test(x,y,alternative='two.sided',paired=paired)$'p.value'
}
diff_peaks.calc <- function(D,signal_cols,ref_cols,fdr,fdr_bin_size,fold_cutoff,method)
{
write('Initializing...',stderr())
diffpeaks = {}
diffpeaks$signal_cols = signal_cols
diffpeaks$ref_cols = ref_cols
diffpeaks$fdr = fdr
diffpeaks$fdr_bin_size = fdr_bin_size
diffpeaks$fold_cutoff = fold_cutoff
write('Enforcing positive lower bound on matrix values...',stderr())
lbound = min(D[D>0])
D[D<lbound] = lbound
diffpeaks$D = D
write('Computing fold-changes between samples...',stderr())
diffpeaks$gain = apply(D,1,function(x) score(x=x[signal_cols],y=x[ref_cols],method=method))
diffpeaks$loss = apply(D,1,function(x) score(x=x[ref_cols],y=x[signal_cols],method=method))
write('Computing p-values...',stderr())
diffpeaks$pval = apply(log2(D),1,function(z) my_ttest(z[signal_cols],z[ref_cols],alternative='two.sided',paired=ifelse(method=='paired',TRUE,FALSE)))
write('Computing fold-changes within replicates...',stderr())
diffpeaks$loss_bg = apply(D,1,function(x) score(x=x[ref_cols],y=NULL,method='pairwise'))
diffpeaks$gain_bg = apply(D,1,function(x) score(x=x[signal_cols],y=NULL,method='pairwise'))
write('Computing FDR on peak gains...',stderr())
diffpeaks$gain_cutoff = calc_fdr_cutoff_with_bins(apply(D[,ref_cols,drop=FALSE],1,mean),diffpeaks$gain,diffpeaks$gain_bg,fold_cutoff,fdr,fdr_bin_size)
write('Computing FDR on peak losses...',stderr())
diffpeaks$loss_cutoff = calc_fdr_cutoff_with_bins(apply(D[,signal_cols,drop=FALSE],1,mean),diffpeaks$loss,diffpeaks$loss_bg,fold_cutoff,fdr,fdr_bin_size)
diffpeaks$gain_significant = (diffpeaks$gain>=fold_cutoff)&(diffpeaks$gain_cutoff$t_score>=1)
diffpeaks$loss_significant = (diffpeaks$loss>=fold_cutoff)&(diffpeaks$loss_cutoff$t_score>=1)
write(paste('Gain = ',sum(diffpeaks$gain_significant),sep=''),stderr())
write(paste('Loss = ',sum(diffpeaks$loss_significant),sep=''),stderr())
return(diffpeaks)
}
diff_peaks.plot <- function(diffpeaks,scale)
{
f = function(z) { z }
if (scale=='log2') f = log2
x = apply(diffpeaks$D[,diffpeaks$ref_cols,drop=FALSE],1,mean)
y = apply(diffpeaks$D[,diffpeaks$signal_cols,drop=FALSE],1,mean)
vlim = f(c(min(c(x,y)),max(c(x,y))))
x_lab = paste(colnames(diffpeaks$D)[diffpeaks$ref_cols[1]],' mean (',scale,')',sep='')
y_lab = paste(colnames(diffpeaks$D)[diffpeaks$signal_cols[1]],' mean (',scale,')',sep='')
fclim = log2(c(min(c(diffpeaks$gain,diffpeaks$loss)),max(c(diffpeaks$gain,diffpeaks$loss))))
# x=ref y=sig/ref
smoothScatter(f(x),log2(diffpeaks$gain),ylim=fclim,xlab=x_lab,ylab='fold-change (log2)',main='signal vs reference')
lines(f(diffpeaks$gain_cutoff$value_bin),log2(diffpeaks$gain_cutoff$t_cutoff),col='red')
# x=sig y=ref/sig
smoothScatter(f(y),log2(diffpeaks$loss),ylim=fclim,xlab=y_lab,ylab='fold-change (log2)',main='reference vs signal')
lines(f(diffpeaks$loss_cutoff$value_bin),log2(diffpeaks$loss_cutoff$t_cutoff),col='green')
# x=ref y=sig
smoothScatter(f(x),f(y),xlim=vlim,ylim=vlim,xlab=x_lab,ylab=y_lab,main='differential peaks')
igain = diffpeaks$gain_significant
points(f(x[igain]),f(y[igain]),pch=18,col='red')
iloss = diffpeaks$loss_significant
points(f(x[iloss]),f(y[iloss]),pch=18,col='green')
# boxplots
boxplot(log2(diffpeaks$gain),log2(diffpeaks$loss),log2(diffpeaks$gain_bg),log2(diffpeaks$loss_bg))
}
diff_peaks.store <- function(x,y,w_signal,w_ref,diffpeaks,out_prefix)
{
# compute mean/min for reference and signal samples
y_filt = y[w_signal&w_ref,]
ref_mean = apply(y_filt[,colnames(y)=='reference'],1,mean)
ref_min = apply(y_filt[,colnames(y)=='reference'],1,min)
sig_mean = apply(y_filt[,colnames(y)=='signal'],1,mean)
sig_min = apply(y_filt[,colnames(y)=='signal'],1,min)
# save score data
score_file <- paste(out_prefix,'.score',sep='')
scores <- cbind(rownames(y_filt),round(log2(diffpeaks$gain),3),diffpeaks$pval,ref_mean,sig_mean,ref_min,sig_min,y_filt)
colnames(scores)[1] <- 'locus'
colnames(scores)[2] <- 'fold-change(log2)';
colnames(scores)[3] <- 'p-value';
write.table(scores,score_file,quote=F,row.names=F,col.names=T,sep='\t')
# save gain/loss reg files
write.table(scores[diffpeaks$gain_significant,c(1,2),drop=FALSE],paste(out_prefix,'.gain',sep=''),quote=F,row.names=F,col.names=F,sep='\t');
write.table(scores[diffpeaks$loss_significant,c(1,2),drop=FALSE],paste(out_prefix,'.loss',sep=''),quote=F,row.names=F,col.names=F,sep='\t');
# save outlier data
outlier_file <- paste(out_prefix,'.outliers',sep='');
d <- rbind(y[!w_signal,],y[!w_ref,]);
outliers <- round(d,digits=6);
outliers <- cbind(rownames(d),outliers);
colnames(outliers)[1] <- 'locus';
write.table(outliers,outlier_file,quote=F,row.names=F,col.names=T,sep='\t');
# create RData file
save(x,y,w_signal,w_ref,diffpeaks,file=paste(out_prefix,'.RData',sep=''))
}
remove_outliers <- function(D,outlier_prob,scale)
{
w <- 1:nrow(D)
if (ncol(D)==1) return(w)
# enforce lower bound
lbound = min(D[D>0])
D[D<lbound] = lbound
# scale
f = function(z) { z }
if (scale=='log2') f = log2
signal_z <- f(D)
vlim = c(min(signal_z[signal_z>-Inf]),max(signal_z))
signal_label <- colnames(D)[1]
smoothScatter(signal_z,xlab=paste(signal_label,' #1 (',scale,')',sep=''),xlim=vlim,ylim=vlim,ylab=paste(signal_label,' #2 (',scale,')',sep=''),main='replicate reproducibility');
if (outlier_prob>0) {
n_sample = 20000;
i_extrema <- as.vector(c(which(signal_z[,1]==min(signal_z[,1]))[1],which(signal_z[,1]==max(signal_z[,1]))[1]));
i <- c(i_extrema,sample(nrow(signal_z),n_sample,rep=T))
fit <- loess(signal_z[i,2] ~ signal_z[i,1],span=0.5,degree=1);
x <- sort(signal_z[i,1]);
lines(x,predict(fit,x),col='magenta');
r <- signal_z[,2]-predict(fit,signal_z[,1]);
w <- dnorm(r,mean(r,na.rm=T),sd(r,na.rm=T))>outlier_prob;
points(signal_z[!w,],pch=19,col='brown');
}
return(w);
}
# ##############################################
# op_easydiff
# ##############################################
op_easydiff <- function(cmdline_args)
{
# usage
usage = "\
easydiff.r [OPTIONS] INPUT-MATRIX\
\
Function:\
Identifies differences between two samples. Use --help for list of options.\
\
Input files:\
INPUT-MATRIX tab-separated input data file (columns are samples), use --help for more details\
\
Output files:\
diff.RData RData file containing all relevant data structures used for the analysis \
diff.gain gains \
diff.loss losses \
diff.outliers outliers \
diff.pdf scatter plots \
diff.score all scores and data \
"
# process command-line arguments
option_list <- list(
make_option(c("-v","--verbose"), action="store_true",default=FALSE, help="Print more messages."),
make_option(c("-o","--output-dir"), default="", help="Output directory (required) [default=\"%default\"]."),
make_option(c("--nref"), default=0, help="Number of reference samples [default=%default]."),
make_option(c("--normalize"), default="none", help="Matrix normalization: none or normq [default=%default]."),
make_option(c("--scale"), default="none", help="Scale to be used for plotting: none or log2 [default=%default]."),
make_option(c("--method"), default="paired", help="Method for fold change computations: paired, mean, pairwise [default=%default]."),
make_option(c("--outlier-prob"), default=0.0, help="Outlier probability cutoff [default=%default]."),
make_option(c("--fdr-cutoff"), default=0.05, help="False discovery rate cutoff [default=%default]."),
make_option(c("--fc-cutoff"), default=1.5, help="Fold-change cutoff [default=%default]."),
make_option(c("--val-cutoff"), default=-Inf, help="Value cutoff [default=%default].")
)
# get command line options (if help option encountered print help and exit)
arguments <- parse_args(args=cmdline_args,OptionParser(usage=usage,option_list=option_list),positional_arguments=c(0,Inf));
opt <- arguments$options
files <- arguments$args
if (length(files)!=1) { write(usage,stderr()); quit(save='no'); }
# process input parameters
data_file = files[1]
nref = opt$'nref'
out_dir = opt$'output-dir'
normalization = opt$'normalize'
scale = opt$'scale'
method = opt$'method'
outlier_prob = opt$'outlier-prob'
fdr = opt$'fdr-cutoff'
fold_cutoff = opt$'fc-cutoff'
val_cutoff = opt$'val-cutoff'
# check parameters
if (nref<=0) { write('Error: number of reference samples must be greater than zero!',stderr()); quit(save='no') }
# read data
x = as.matrix(read.table(data_file,check.names=F,header=T,row.names=1,sep='\t'))
ref_cols = 1:nref
signal_cols = (nref+1):ncol(x)
sample_labels = c('reference','signal')
# check parameters
if (length(signal_cols)!=length(ref_cols)) write('Warning: number of reference samples not equal to number of signal samples!',stderr())
# create output directory
if (out_dir=="") { write('Error: please specify output directory!',stderr()); quit(save='no') }
if (file.exists(out_dir)==FALSE) { dir.create(out_dir) } else { write('Warning: output directory already exists, results will be overwritten!',stderr()) }
out_prefix = paste(out_dir,'/diff',sep='')
image_file <- paste(out_prefix,'.pdf',sep='')
# set column labels
colnames(x)[ref_cols] = sample_labels[1]
colnames(x)[signal_cols] = sample_labels[2]
# normalize
if (normalization == 'normq') {
y = normalize_matrix(x)
} else {
y = x
}
# filter based on original values
if (opt$verbose) write('Filtering out low values...',stderr())
i_filtered = (apply(x[,signal_cols],1,mean)<val_cutoff)&(apply(x[,ref_cols],1,mean)<val_cutoff)
y = y[i_filtered==FALSE,]
# setup pdf
pdf(image_file,width=5,height=7);
par(mfrow=c(3,2),cex=0.5,mar=c(4,4,4,4))
# remove outliers
if (opt$verbose&(outlier_prob>0)) write('Removing outliers...',stderr())
w_ref <- remove_outliers(y[,ref_cols,drop=FALSE],outlier_prob=outlier_prob,scale=scale)
w_sig <- remove_outliers(y[,signal_cols,drop=FALSE],outlier_prob=outlier_prob,scale=scale)
z = y[w_sig&w_ref,]
# determine FDR bin size
fdr_bin_size = min(floor(nrow(z)/10),10000)
if (opt$verbose) write(paste("FDR bin size = ",fdr_bin_size,sep=''),stderr())
# find differential peaks
diffpeaks = diff_peaks.calc(D=z,signal_cols=signal_cols,ref_cols=ref_cols,fdr=fdr,fdr_bin_size=fdr_bin_size,fold_cutoff=fold_cutoff,method=method)
# plot peak differences
if (opt$verbose) write('Plotting differences...',stderr())
diff_peaks.plot(diffpeaks,scale=scale)
dev.off()
# store results
if (opt$verbose) write('Storing results...',stderr())
diff_peaks.store(x,y,w_sig,w_ref,diffpeaks,out_prefix)
if (opt$verbose) write('Done.',stderr())
}
# ##################################################################
# MAIN PROGRAM
# ##################################################################
# process command-line arguments
args <- commandArgs(trailingOnly=T)
# install packages
for (p in c('optparse','preprocessCore','MASS'))
if (!require(p,character.only=TRUE,quietly=TRUE,warn.conflicts=FALSE)) {
install.packages(p,repos="http://cran.rstudio.com/")
library(p,character.only=TRUE,verbose=FALSE)
}
# run
op_easydiff(args)
quit(save='no')
|
bfa23c8f3f93eb46817e6f1a60ccc48fd915bdb9 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /dipsaus/R/queue-abstract.R | 3c5a4df3120d272f3485ae5e350faf5d9f77e510 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,896 | r | queue-abstract.R | #' @title Defines abstract queue class
#'
#' @description This class is inspired by \url{https://cran.r-project.org/package=txtq}.
#' The difference is \code{AbstractQueue} introduce an abstract class that can
#' be extended and can queue not only text messages, but also arbitrary R
#' objects, including expressions and environments. All the queue types in this
#' package inherit this class.
#'
#' @name AbstractQueue
#'
#' @section Abstract Public Methods:
#'
#' Methods start with \code{@@...} are not thread-safe. Most of them are not
#' used directly by users. However, you might want to override them if you
#' inherit this abstract class. Methods marked as "(override)" are not
#' implemented, meaning you are supposed to implement the details. Methods
#' marked as "(optional)" usually have default alternatives.
#'
#' \describe{
#' \item{\code{initialize(...)} (override)}{
#' The constructor. Usually three things to do during the process:
#' 1. set \code{get_locker} \code{free_locker} if you don't want to use the
#' default lockers. 2. set lock file (if using default lockers). 3. call
#' \code{self$connect(...)}
#' }
#' \item{\code{get_locker()}, \code{free_locker()} (optional)}{
#' Default is \code{NULL} for each methods, and queue uses an internal
#' \code{private$default_get_locker} and \code{private$default_free_locker}.
#' These two methods are for customized locker, please
#' implement these two methods as functions during \code{self$initialization}
#' \code{get_locker} obtains and lock access (exclusive), and \code{free_locker}
#' frees the locker. Once implemented, \code{private$exclusive} will take care
#' the rest. Type: function; parameters: none; return: none
#' }
#' \item{\code{@@get_head()}, \code{@@set_head(v)} (override)}{
#' Get head so that we know where we are in the queue \code{self$@@get_head()}
#' should return a integer indicating where we are at the queue
#' \code{self$@@set_head(v)} stores that integer. Parameter \code{v} is always
#' non-negative, this is guaranteed. Users are not supposed to call these
#' methods directly, use \code{self$head} and \code{self$head<-} instead.
#' However, if you inherit this class, you are supposed to override the methods.
#' }
#' \item{\code{@@get_total()}, \code{@@set_total(v)} (override)}{
#' Similar to \code{@@get_head} and \code{@@set_head}, defines the total items
#' ever stored in the queue. total-head equals current items in the queue.
#' }
#' \item{\code{@@inc_total(n=1)} (optional)}{
#' Increase total, usually this doesn't need to be override, unless you are
#' using files to store total and want to decrease number of file connections
#' }
#' \item{\code{@@append_header(msg, ...)} (override)}{
#' \code{msg} will be vector of strings, separated by "|", containing encoded
#' headers: `time`, `key`, `hash`, and `message`. to decode what's inside, you
#' can use \code{self$print_items(stringr::str_split_fixed(msg, '\\|', 4))}.
#' \strong{Make sure} to return a number, indicating number of items stored.
#' Unless handled elsewhere, usually \code{return(length(msg))}.
#' }
#' \item{\code{@@store_value(value, key)} (override)}{
#' Defines how to store value. `key` is unique identifier generated from
#' time, queue ID, and value. Usually I use it as file name or key ID in
#' database. value is an arbitrary R object to store. you need to store value
#' somewhere and return a string that will be passed as `hash` in
#' \code{self$restore_value}.
#' }
#' \item{\code{restore_value(hash, key, preserve = FALSE)} (override)}{
#' Method to restore value from given combination of `hash` and `key`.
#' `hash` is the string returned by \code{@@store_value}, and `key` is the same
#' as key in \code{@@store_value}. preserve is a indicator of whether to
#' preserve the value for future use. If set to \code{FALSE}, then you are
#' supposed to free up the resource related to the value. (such as free memory
#' or disk space)
#' }
#' \item{\code{@@log(n = -1, all = FALSE) (override)}}{
#' get \code{n} items from what you saved to during \code{@@append_header}.
#' \code{n} less equal than 0 means listing all possible items.
#' If \code{all=TRUE}, return all items (number of rows should equals to
#' \code{self$total}), including popped items. If \code{all=FALSE}, only
#' return items in the queue (number of rows is \code{self$count}). The
#' returned value should be a \code{n x 4} matrix. Usually I use
#' \code{stringr::str_split_fixed(..., '\\|', 4)}. Please see all other
#' types implemented for example.
#' }
#' \item{\code{@@reset(...)} (override)}{
#' Reset queue, remove all items and reset head, total to be 0.
#' }
#' \item{\code{@@clean()} (override)}{
#' Clean the queue, remove all the popped items.
#' }
#' \item{\code{@@validate()} (override)}{
#' Validate the queue. Stop if the queue is broken.
#' }
#' \item{\code{@@connect(con, ...)} (override)}{
#' Set up connection. Usually should be called at the end of
#' \code{self$initialization} to connect to a database, a folder, or an
#' existing queue you should do checks whether the connection is new or it's
#' an existing queue.
#' }
#' \item{\code{connect(con, ...)} (optional)}{
#' Thread-safe version. sometimes you need to override this function instead
#' of \code{@@connect}, because \code{private$exclusive} requires \code{lockfile}
#' to exist and to be locked. If you don't have lockers ready, or need to set
#' lockers during the connection, override this one.
#' }
#' \item{\code{destroy()} (optional)}{
#' Destroy a queue, free up space and call
#' \code{delayedAssign('.lockfile', {stop(...)}, assign.env=private)} to raise
#' error if a destroyed queue is called again later.
#' }
#' }
#'
#' @section Public Methods:
#'
#' Usually don't need to override unless you know what you are doing.
#'
#' \describe{
#' \item{\code{push(value, message='',...)}}{
#' Function to push an arbitrary R object to queue. \code{message} is a string
#' giving notes to the pushed item. Usually message is stored with header,
#' separated from values. The goal is to describe the value. \code{...} is
#' passed to \code{@@append_header}
#' }
#' \item{\code{pop(n = 1, preserve = FALSE)}}{
#' Pop \code{n} items from the queue. \code{preserve} indicates whether not to
#' free up the resources, though not always guaranteed.
#' }
#' \item{\code{print_item(item)}, \code{print_items(items)}}{
#' To decode matrix returned by \code{log()}, returning named list or data frame
#' with four heads: `time`, `key`, `hash`, and `message`.
#' }
#' \item{\code{list(n=-1)}}{
#' List items in the queue, decoded. If \code{n} is less equal than 0, then
#' list all results. The result is equivalent to
#' \code{self$print_items(self$log(n))}
#' }
#' \item{\code{log(n=-1,all=FALSE)}}{
#' List items in the queue, encoded. This is used with \code{self$print_items}.
#' When \code{all=TRUE}, result will list the records ever pushed to the queue
#' since the last time queue is cleaned. When \code{all=FALSE}, results will be
#' items in the queue. \code{n} is the number of items.
#' }
#' }
#'
#' @section Public Active Bindings:
#'
#' \describe{
#' \item{\code{id}}{
#' Read-only property. Returns unique ID of current queue.
#' }
#' \item{\code{lockfile}}{
#' The lock file.
#' }
#' \item{\code{head}}{
#' Integer, total number of items popped, i.e. inactive items.
#' }
#' \item{\code{total}}{
#' Total number of items ever pushed to the queue since last cleaned, integer.
#' }
#' \item{\code{count}}{
#' Integer, read-only, equals to total - head, number of active items in the
#' queue
#' }
#' }
#'
#' @section Private Methods or properties:
#'
#' \describe{
#' \item{\code{.id}}{
#' Don't use directly. Used to store queue ID.
#' }
#' \item{\code{.lockfile}}{
#' Location of lock file.
#' }
#' \item{\code{lock}}{
#' Preserve the file lock.
#' }
#' \item{\code{exclusive(expr,...)}}{
#' Function to make sure the methods are thread-safe
#' }
#' \item{\code{default_get_locker()}}{
#' Default method to lock a queue
#' }
#' \item{\code{default_free_locker}}{
#' Default method to free a queue
#' }
#' }
NULL
not_implemented <- function(msg = 'Not yet implemented', default = 0){
warning(msg)
default
}
rand_string <- function(length = 50){
paste(sample(c(letters, LETTERS, 0:9), length, replace = TRUE), collapse = '')
}
null_item <- data.frame(
time = character(0),
key = character(0),
hash = character(0),
message = character(0)
)
#' @rdname AbstractQueue
#' @export
AbstractQueue <- R6::R6Class(
classname = "AbstractQueue",
portable = TRUE,
cloneable = TRUE,
private = list(
.id = character(0),
# Lock file that each queue should have
# If lock file is locked, then we should wait till the next transaction period
.lockfile = character(0),
lock = NULL,
# Run expr making sure that locker is locked to be exclusive (for write-only)
exclusive = function(expr, ...) {
on.exit({
if(is.function(self$free_locker)){
self$free_locker()
}else{
private$default_free_locker()
}
})
if(is.function(self$get_locker)){
self$get_locker(...)
}else{
private$default_get_locker(...)
}
force(expr)
},
default_get_locker = function(timeout = 5){
dipsaus_lock(self$lockfile, timeout = timeout)
},
default_free_locker = function(){
dipsaus_unlock(self$lockfile)
}
),
public = list(
# By default, queue uses file locker, if you have customized locker, please
# implement these two methods as functions:
# get_locker obtain and lock access (exclusive)
# free_locker free the lock
# private$exclusive will take care the rest
get_locker = NULL,
free_locker = NULL,
# Get head so that we know where we are in the queue
# @get_head should return a integer indicating where we are at the queue
# @set_head stores that integer
# param `v` is always non-negative, this is guaranteed
# Users are not supposed to call these methods directly,
# they use self$head and self$head<-
`@get_head` = function(){ not_implemented() },
`@set_head` = function(v){ not_implemented() },
# Get total number of items in the queue, similar to @get_head and @set_head
`@get_total` = function(){ not_implemented() },
`@set_total` = function(v){ not_implemented() },
# Increase total, usually this doesn't need to be override, unless you are
# using files to store total and want to decrease number of file connections
`@inc_total` = function(n=1){
self$total <- self$total + n
},
# msg will be vector of strings, separated by "|", containing encoded headers
# 1. time, key, hash, and message, to view what's inside, you can use
# self$print_items(stringr::str_split_fixed(msg, '\\|', 4))
# to decode
#
# Make **sure** to return a number as $push() function uses the returned
# value as indicator of how many items are stored
# Unless handled elsewhere, usually return length of msg
`@append_header` = function(msg, ...){
not_implemented()
return(length(msg))
},
# Defines how to store value. `key` is unique identifier generated from
# time, queue ID, and value, you can use it as file name
# value is an arbitrary R object to store. you need to store value somewhere
# and return a string (hash, or key or whatever) that will be used in
# restore_value
# For example, in rds_queue, I use key as file name and saveRDS(value) to
# that file. and in `restore_value` I use `hash` to retrive the file name
# and read the value
#
# Make sure return a string, it'll be encoded and stored as `hash`
`@store_value` = function(value, key){
not_implemented()
},
# hash is the string returned by `@store_value`, and
# key is the same as key in `@store_value`
# preserve is a indicator of whether to preserve the value for future use
# or remove the value to free memory/disk space
restore_value = function(hash, key, preserve = FALSE){
not_implemented()
},
# Fixed usage, don't override unless you know what's inside
push = function(value, message = '', ...){
time <- safe_urlencode(microtime())
digest_val <- digest::digest(message)
key <- digest::digest(list(self$id, time, digest_val))
hash <- safe_urlencode(self$`@store_value`(value, key))
message <- safe_urlencode(message)
if(length(hash) != 1){
cat2('store_value returns hash value that has length != 1', level = 'FATAL')
}
out <- paste( time, key, hash, message, sep = "|" )
private$exclusive({
n <- self$`@append_header`(msg = out, ...)
if( n > 0 ){
self$`@inc_total`( n )
}
})
},
# decode headers and return a data.frame
# items should be a nx4 matrix. Easiest example is the matrix returned by
# `log()`
print_items = function(items){
# Take the results from log() and translate into a data.frame with time, key, hash, and message
do.call('rbind', apply(items, 1, function(item){
as.data.frame(self$print_item(item), stringsAsFactors = FALSE)
}))
},
# Print single item, similar to `print_items`, returns a list
print_item = function(item){
list(
time = safe_urldecode(item[[1]]),
key = item[[2]],
hash = safe_urldecode(item[[3]]),
message = safe_urldecode(item[[4]])
)
},
# List n items in the queue. if n <= 0, then list all
# value will not be obtained during the process,
# only time, key, hash, and message will be returned, as obtaining value
# is usually much heavier. However, you can use
# self$restore_value(hash, key, preserve=TRUE) to
# obtain the value. The value is not always available though.
list = function(n = -1){
out <- self$log(n=n, all=FALSE)
if( !length(out) ){ return(null_item) }
if( !is.matrix(out) && !is.data.frame(out) ){
cat2('list must return a matrix or a data.frame', level = 'FATAL')
}
nrows <- nrow(out)
if(!nrows){ return( null_item ) }
out <- lapply(seq_len(nrows), function(ii){
re <- self$print_item(out[ii, ])
as.data.frame(re, stringsAsFactors=FALSE)
})
do.call('rbind', out)
},
# pop first n items from queue, `preserve` will be passed to `restore_value`
# Don't override unless you know what's inside
pop = function(n = 1, preserve = FALSE) {
private$exclusive({
# Check count first, in this case, we don't read header file
count <- self$count
if(count < 0.5){ return(list()) }
out <- self$`@log`(n = n)
if( !length(out) ){ return(list()) }
if( !is.matrix(out) && !is.data.frame(out) ){
cat2('list must return a matrix or a data.frame', level = 'FATAL')
}
nrows <- nrow(out)
if(!nrows){ return( list() ) }
# parse time, key, hash
out <- lapply(seq_len(nrows), function(ii){
re <- self$print_item(out[ii, ])
re$value <- self$restore_value( re$hash, re$key, preserve = preserve )
re
})
self$head <- self$head + nrows
out
})
},
# get n items from what you saved to during `@append_header`. n<=0 means
# list all possible items.
# If all=TRUE, return all items (#items=self$total), including popped items
# If all=FALSE, only return items in the queue
# The returned value should be a nx4 matrix
# I use stringr::str_split_fixed(..., '\\|', 4) in all queues implemented
`@log` = function(n = -1, all = FALSE){
not_implemented()
},
# log with locks (thread-safe)
log = function(n=-1, all=FALSE){
private$exclusive({
self$`@log`(n=n, all=all)
})
},
# Remove all items and reset head=total=0
`@reset` = function(...) {
not_implemented()
},
# thread-safe version
reset = function(...) {
private$exclusive({
self$`@reset`(...)
})
},
# clean all popped items. Usually you don't have to do this manually as
# pop(..., preserve=FALSE) will clean automatically (except for `text_queue`)
`@clean` = function(...) {
not_implemented()
},
# thread-safe version
clean = function(...){
private$exclusive({
self$`@clean`(...)
})
},
# check the validity of queue. Usually the followings need to be checked
# 1. head<=total, and non-negative
# 2. all the necessary files exist
# 3. all the connections exist
`@validate` = function(...) {
not_implemented()
},
validate = function(...){
private$exclusive({
self$`@validate`(...)
})
},
# Usually should be called at the end of `initialization` to connect to
# a database, a folder, or an existing queue
# you should do checks whether the connection is new or it's an existing
# queue
`@connect` = function(con = NULL, ...){
not_implemented()
},
# thread-safe version. sometimes you need to override this function instead
# of `@connect`, because `private$exclusive` requires lockfile to be locked
# If you don't have lockers ready, or need to set lockers during the
# connection, override this one
connect = function(...){
private$exclusive({
self$`@connect`(...)
})
},
# will be called during Class$new(...), three tasks,
# 1. set `get_locker` `free_locker` if lock type is not a file
# 2. set lockfile (if using default lockers)
# 3. call self$connect
initialize = function(con = NULL, lockfile, ...){
self$lockfile <- lockfile
self$connect(con, ...)
},
# destroy a queue, free up space
# and call `delayedAssign('.lockfile', {stop(...)}, assign.env=private)`
# to raise error if a destroyed queue is called again later.
destroy = function(){
private$default_free_locker()
delayedAssign('.lockfile', {
cat2("Queue is destroyed", level = 'FATAL')
}, assign.env=private)
}
),
active = list(
# read-only version of self$id. It's safer than private$.id as the latter
# one does not always exist
id = function(){
if(length(private$.id) != 1){
private$.id <- rand_string()
}
private$.id
},
# set/get lock file. Don't call private$.lockfile directly
lockfile = function(v){
if(!missing(v)){
private$default_free_locker()
private$.lockfile <- v
}else if(!length(private$.lockfile)){
private$.lockfile <- rand_string()
}
private$.lockfile
},
# a safe wrapper for `@get_head` and `@set_head`
head = function(v) {
if(missing(v)){ return(as.integer(self$`@get_head`())) }
if( length(v) != 1 ){ cat2('head must be a number',level = 'FATAL') }
if( !is.numeric(v) || v < 0 ){ cat2('head must be a non-negative integer',
level = 'FATAL') }
if( v > self$total ){ cat2('head must not exceed total',
level = 'FATAL') }
self$`@set_head`( v )
},
# a safe wrapper for `@get_total` and `@set_total`
total = function(v){
if(missing(v)){ return(as.integer(self$`@get_total`())) }
if( length(v) != 1 ){ cat2('total must be a number', level = 'FATAL') }
if( !is.numeric(v) || v < 0 ){ cat2('total must be a non-negative integer',
level = 'FATAL') }
self$`@set_total`( v )
},
# How many items in the queue right now, = total - head
count = function(){
tryCatch({
self$total - self$head
}, error = function(e){
warning('Cannot get count, return 0')
0
})
}
)
)
|
c2099375825921e102323a1f083e2f7297bea153 | a1c59394a2b42d6756c2b9564697db714b27fe49 | /R/PlotPDFsOLE.R | bf95abb76c745410447d0cdf59c22f34b2509231 | [] | no_license | cran/CSTools | e06a58f876e86e6140af5106a6abb9a6afa7282e | 6c68758da7a0dadc020b48cf99bf211c86498d12 | refs/heads/master | 2023-06-26T01:20:08.946781 | 2023-06-06T13:10:05 | 2023-06-06T13:10:05 | 183,258,656 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 9,977 | r | PlotPDFsOLE.R | #'Plotting two probability density gaussian functions and the optimal linear
#'estimation (OLE) as result of combining them.
#'
#'@author Eroteida Sanchez-Garcia - AEMET, //email{esanchezg@aemet.es}
#'
#'@description This function plots two probability density gaussian functions
#'and the optimal linear estimation (OLE) as result of combining them.
#'
#'@param pdf_1 A numeric array with a dimension named 'statistic', containg
#' two parameters: mean' and 'standard deviation' of the first gaussian pdf
#' to combining.
#'@param pdf_2 A numeric array with a dimension named 'statistic', containg
#' two parameters: mean' and 'standard deviation' of the second gaussian pdf
#' to combining.
#'@param nsigma (optional) A numeric value for setting the limits of X axis.
#' (Default nsigma = 3).
#'@param legendPos (optional) A character value for setting the position of the
#' legend ("bottom", "top", "right" or "left")(Default 'bottom').
#'@param legendSize (optional) A numeric value for setting the size of the
#' legend text. (Default 1.0).
#'@param plotfile (optional) A filename where the plot will be saved.
#' (Default: the plot is not saved).
#'@param width (optional) A numeric value indicating the plot width in
#' units ("in", "cm", or "mm"). (Default width = 30).
#'@param height (optional) A numeric value indicating the plot height.
#' (Default height = 15).
#'@param units (optional) A character value indicating the plot size
#' unit. (Default units = 'cm').
#'@param dpi (optional) A numeric value indicating the plot resolution.
#' (Default dpi = 300).
#'
#'@return PlotPDFsOLE() returns a ggplot object containing the plot.
#'
#'@examples
#'# Example 1
#'pdf_1 <- c(1.1,0.6)
#'attr(pdf_1, "name") <- "NAO1"
#'dim(pdf_1) <- c(statistic = 2)
#'pdf_2 <- c(1,0.5)
#'attr(pdf_2, "name") <- "NAO2"
#'dim(pdf_2) <- c(statistic = 2)
#'
#'PlotPDFsOLE(pdf_1, pdf_2)
#'@import ggplot2
#'@export
PlotPDFsOLE <- function(pdf_1, pdf_2, nsigma = 3, legendPos = 'bottom',
legendSize = 1.0, plotfile = NULL, width = 30,
height = 15, units = "cm", dpi = 300) {
y <- type <- NULL
if(!is.null(plotfile)){
if (!is.numeric(dpi)) {
stop("Parameter 'dpi' must be numeric.")
}
if (length(dpi) > 1) {
warning("Parameter 'dpi' has length greater than 1 and ",
"only the first element will be used.")
dpi <- dpi[1]
}
if (!is.character(units)) {
stop("Parameter 'units' must be character")
}
if (length(units) > 1) {
warning("Parameter 'units' has length greater than 1 and ",
"only the first element will be used.")
units <- units[1]
}
if(!(units %in% c("in", "cm", "mm"))) {
stop("Parameter 'units' must be equal to 'in', 'cm' or 'mm'.")
}
if (!is.numeric(height)) {
stop("Parameter 'height' must be numeric.")
}
if (length(height) > 1) {
warning("Parameter 'height' has length greater than 1 and ",
"only the first element will be used.")
height <- height[1]
}
if (!is.numeric(width)) {
stop("Parameter 'width' must be numeric.")
}
if (length(width) > 1) {
warning("Parameter 'width' has length greater than 1 and ",
"only the first element will be used.")
width <- width[1]
}
if (!is.character(plotfile)) {
stop("Parameter 'plotfile' must be a character string ",
"indicating the path and name of output png file.")
}
}
if (!is.character(legendPos)) {
stop("Parameter 'legendPos' must be character")
}
if(!(legendPos %in% c("bottom", "top", "right", "left"))) {
stop("Parameter 'legendPos' must be equal to 'bottom', 'top', 'right' or 'left'.")
}
if (!is.numeric(legendSize)) {
stop("Parameter 'legendSize' must be numeric.")
}
if (!is.numeric(nsigma)) {
stop("Parameter 'nsigma' must be numeric.")
}
if (length(nsigma) > 1) {
warning("Parameter 'nsigma' has length greater than 1 and ",
"only the first element will be used.")
nsigma <- nsigma[1]
}
if (!is.array(pdf_1)) {
stop("Parameter 'pdf_1' must be an array.")
}
if (!is.array(pdf_2)) {
stop("Parameter 'pdf_2' must be an array.")
}
if (!is.numeric(pdf_1)) {
stop("Parameter 'pdf_1' must be a numeric array.")
}
if (!is.numeric(pdf_2)) {
stop("Parameter 'pdf_2' must be a numeric array.")
}
if (is.null(names(dim(pdf_1))) ||
is.null(names(dim(pdf_2)))) {
stop("Parameters 'pdf_1' and 'pdf_2' ",
"should have dimmension names.")
}
if(!('statistic' %in% names(dim(pdf_1)))) {
stop("Parameter 'pdf_1' must have dimension 'statistic'.")
}
if(!('statistic' %in% names(dim(pdf_2)))) {
stop("Parameter 'pdf_2' must have dimension 'statistic'.")
}
if (length(dim(pdf_1)) != 1) {
stop("Parameter 'pdf_1' must have only dimension 'statistic'.")
}
if (length(dim(pdf_2)) != 1) {
stop("Parameter 'pdf_2' must have only dimension 'statistic'.")
}
if ((dim(pdf_1)['statistic'] != 2) || (dim(pdf_2)['statistic'] != 2)) {
stop("Length of dimension 'statistic'",
"of parameter 'pdf_1' and 'pdf_2' must be equal to 2.")
}
if(!is.null(attr(pdf_1, "name"))){
if(!is.character(attr(pdf_1, "name"))){
stop("The 'name' attribute of parameter 'pdf_1' must be a character ",
"indicating the name of the variable of parameter 'pdf_1'.")
}
}
if(!is.null(attr(pdf_2, "name"))){
if(!is.character(attr(pdf_2, "name"))){
stop("The 'name' attribute of parameter 'pdf_2' must be a character ",
"indicating the name of the variable of parameter 'pdf_2'.")
}
}
if(is.null(attr(pdf_1, "name"))){
name1 <- "variable 1"
} else {
name1 <- attr(pdf_1, "name")
}
if(is.null(attr(pdf_2, "name"))){
name2 <- "Variable 2"
} else {
name2 <- attr(pdf_2, "name")
}
#-----------------------------------------------------------------------------
# Set parameters of gaussian distributions (mean and sd)
#-----------------------------------------------------------------------------
mean1 <- pdf_1[1]
sigma1 <- pdf_1[2]
mean2 <- pdf_2[1]
sigma2 <- pdf_2[2]
pdfBest <- CombinedPDFs(pdf_1, pdf_2)
meanBest <- pdfBest[1]
sigmaBest <- pdfBest[2]
#-----------------------------------------------------------------------------
# Plot the gaussian distributions
#-----------------------------------------------------------------------------
nameBest <- paste0(name1, " + ", name2)
graphicTitle <- "OPTIMAL LINEAR ESTIMATION"
xlimSup <- max(nsigma * sigmaBest + meanBest, nsigma * sigma1 + mean1,
nsigma * sigma2 + mean2)
xlimInf <- min(-nsigma * sigmaBest+meanBest, - nsigma * sigma1 + mean1,
-nsigma * sigma2 + mean2)
# deltax <- 0.02
deltax <- (xlimSup - xlimInf) / 10000
x <- seq(xlimInf, xlimSup, deltax)
df1 <- data.frame(x = x, y = dnorm(x, mean = mean1, sd = sigma1),
type = name1)
df2 <- data.frame(x = x, y = dnorm(x, mean = mean2, sd = sigma2),
type = name2)
df3 <- data.frame(x = x, y = dnorm(x, mean = meanBest, sd = sigmaBest),
type = nameBest)
df123 <- rbind(df1, df2, df3)
label1 <- paste0(name1, ": N(mean=",round(mean1, 2), ", sd=", round(sigma1, 2),
")")
label2 <- paste0(name2, ": N(mean=",round(mean2, 2), ", sd=", round(sigma2, 2),
")")
labelBest <- paste0(nameBest, ": N(mean=",round(meanBest,2), ", sd=",
round(sigmaBest, 2), ")")
cols <- c("#DC3912", "#13721A", "#1F5094")
names(cols) <- c(name1, name2, nameBest)
g <- ggplot(df123) + geom_line(aes(x, y, colour = type), size = rel(1.2))
g <- g + scale_colour_manual(values = cols,
limits = c(name1, name2, nameBest),
labels = c(label1, label2, labelBest))
g <- g + theme(plot.title=element_text(size=rel(1.1), colour="black",
face= "bold"),
axis.text.x = element_text(size=rel(1.2)),
axis.text.y = element_text(size=rel(1.2)),
axis.title.x = element_blank(),
legend.title = element_blank(),
legend.position = legendPos,
legend.text = element_text(face = "bold", size=rel(legendSize)))
g <- g + ggtitle(graphicTitle)
g <- g + labs(y="probability", size=rel(1.9))
g <- g + stat_function(fun = dnorm_limit, args = list(mean=mean1, sd=sigma1),
fill = cols[name1], alpha=0.2, geom="area")
g <- g + stat_function(fun = dnorm_limit, args = list(mean=mean2, sd=sigma2),
fill = cols[name2], alpha=0.2, geom="area")
g <- g + stat_function(fun = dnorm_limit, args = list(mean=meanBest,
sd=sigmaBest),
fill = cols[nameBest], alpha=0.2, geom="area")
#-----------------------------------------------------------------------------
# Save to plotfile if needed, and return plot
#-----------------------------------------------------------------------------
if (!is.null(plotfile)) {
ggsave(plotfile, g, width = width, height = height, units = units, dpi = dpi)
}
return(g)
}
# Auxiliar function to plot
CombinedPDFs <- function(pdf_1, pdf_2) {
mean_1 <- pdf_1[1]
sigma_1 <- pdf_1[2]
mean_2 <- pdf_2[1]
sigma_2 <- pdf_2[2]
a_1 <- (sigma_2^2)/((sigma_1^2)+(sigma_2^2))
a_2 <- (sigma_1^2)/((sigma_1^2)+(sigma_2^2))
pdf_mean <- a_1*mean_1 + a_2*mean_2
pdf_sigma <- sqrt((sigma_1^2)*(sigma_2^2)/((sigma_1^2)+(sigma_2^2)))
data <- c(pdf_mean, pdf_sigma)
dim(data) <- c(statistic = 2)
return(data)
}
dnorm_limit <- function(x,mean,sd){
y <- dnorm(x,mean,sd)
y[x<mean | x > mean+sd] <- NA
return(y)
}
|
b62947fccd970e56d734c0df92beca488edd3429 | ba27dafd9f2dfda2cf487dd089a8d9c20dcf24ad | /man/sem.aic.Rd | 9fd7d419e93cf14c5d4078ea8b7bb07557bdb8fb | [] | no_license | guhjy/piecewiseSEM | 9dc9016c8d788228075a917660a854241607d0b7 | 4c74a86efc47e610bf4ebe9c1d180c8da1b6873f | refs/heads/master | 2020-05-21T21:45:21.494051 | 2016-12-08T18:54:35 | 2016-12-08T18:54:35 | 84,651,786 | 1 | 0 | null | 2017-03-11T13:27:26 | 2017-03-11T13:27:26 | null | UTF-8 | R | false | false | 3,590 | rd | sem.aic.Rd | \name{sem.aic}
\alias{sem.aic}
\title{
Extracts AIC scores for piecewise SEM
}
\description{
Extracts the AIC and AICc (corrected for small sample size) values from a piecewise structural equation model (SEM).
}
\usage{
sem.aic(modelList, data, corr.errors, add.vars, grouping.vars,
grouping.fun, adjust.p, basis.set, pvalues.df, model.control,
.progressBar)
}
\arguments{
\item{modelList}{
a \code{list} of regressions representing the structural equation model.
}
\item{data}{
a \code{data.frame} used to construct the structured equations.
}
\item{corr.errors}{
a vector of variables with correlated errors (separated by "~~").
}
\item{add.vars}{
a vector of additional variables whose independence claims should be evaluated, but which do not appear in the model list.
}
\item{grouping.vars}{
an optional variable that represents the levels of data aggregation for a multi-level dataset.
}
\item{grouping.fun}{
a function defining how variables are aggregated in \code{grouping.vars}. Default is \code{mean}.
}
\item{adjust.p}{
whether p-values degrees of freedom should be adjusted (see below). Default is \code{FALSE}.
}
\item{basis.set}{
provide an optional basis set.
}
\item{pvalues.df}{
an optional \code{data.frame} corresponding to p-values for independence claims.
}
\item{model.control}{
a \code{list} of model control arguments to be passed to d-sep models.
}
\item{.progressBar}{
enable optional text progress bar. Default is \code{TRUE}.
}
}
\details{
This function calculates AIC and AICc (corrected for small sample sizes) values for a piecewise structural equation model (SEM).
For linear mixed effects models, p-values can be adjusted to accommodate the full model degrees of freedom using the argument \code{p.adjust = TRUE}. For more information, see Shipley 2013.
}
\value{
Returns a \code{data.frame} where the first entry is the AIC score, and the second is the AICc score, and the third is the likelihood degrees of freedom (K).
}
\references{
Shipley, Bill. "The AIC model selection method applied to path analytic models compared using a d-separation test." Ecology 94.3 (2013): 560-564.
}
\author{
Jon Lefcheck
}
\examples{
# Load example data
data(shipley2009)
# Reduce dataset for example
shipley2009.reduced = shipley2009[1:200, ]
# Load model packages
library(lme4)
library(nlme)
# Create list of models
shipley2009.reduced.modlist = list(
lme(DD ~ lat, random = ~1|site/tree, na.action = na.omit,
data = shipley2009.reduced),
lme(Date ~ DD, random = ~1|site/tree, na.action = na.omit,
data = shipley2009.reduced),
lme(Growth ~ Date, random = ~1|site/tree, na.action = na.omit,
data = shipley2009.reduced),
glmer(Live ~ Growth+(1|site)+(1|tree),
family=binomial(link = "logit"), data = shipley2009.reduced)
)
# Get AIC and AICc values for the SEM
sem.aic(shipley2009.reduced.modlist, shipley2009.reduced)
\dontrun{
# Repeat with full dataset as in Shipley (2009)
# Create list of models
shipley2009.modlist = list(
lme(DD ~ lat, random = ~1|site/tree, na.action = na.omit,
data = shipley2009),
lme(Date ~ DD, random = ~1|site/tree, na.action = na.omit,
data = shipley2009),
lme(Growth ~ Date, random = ~1|site/tree, na.action = na.omit,
data = shipley2009),
glmer(Live ~ Growth+(1|site)+(1|tree),
family=binomial(link = "logit"), data = shipley2009)
)
# Get AIC and AICc values for the SEM
sem.aic(shipley2009.modlist, shipley2009)
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.