blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b6be7100038b03518ddb868b49ace14b99e1d993
|
3270487664d61509b5235184f2130c47d00d11ed
|
/R/humidity.R
|
6c5673b9229dce0125d36ab825869a6d6c1ea377
|
[] |
no_license
|
cran/meteor
|
97903459a6020dccc017a855a77348b44b094ce3
|
3b1fa5d21cd393674ec7d254e06ec97b9185c670
|
refs/heads/master
| 2023-07-22T08:29:54.369133
| 2023-07-16T18:00:02
| 2023-07-16T19:30:41
| 236,625,464
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,401
|
r
|
humidity.R
|
# Author: Robert J. Hijmans
# License GPL3
.saturatedVaporPressure <- function(tmp) {
.611 * 10^(7.5 * tmp / (237.7 + tmp)) #kpa
}
.vaporPressureDeficit <- function(tmp, rh) {
svp <- .saturatedVaporPressure(tmp)
(1-(rh/100)) * svp
}
.rhMinMax <- function(rh, tmin, tmax) {
tmin <- pmax(tmin, -5)
tmax <- pmax(tmax, -5)
tmp <- (tmin + tmax) / 2
es <- .saturatedVaporPressure(tmp)
vp <- rh / 100 * es
es <- .saturatedVaporPressure(tmax)
rhmn <- 100 * vp / es;
rhmn <- pmax(0, pmin(100, rhmn))
es <- .saturatedVaporPressure(tmin)
rhmx <- 100*vp/es;
rhmx <- pmax(0, pmin(100, rhmx))
cbind(rhmn, rhmx)
}
.rhMinMax2 <- function(tmin, tmax, rhum) {
tmin <- pmax(tmin, -5)
tmax <- pmax(tmax, -5)
tmp <- (tmin + tmax) / 2
es <- .saturatedVaporPressure(tmp)
vp <- rhum / 100 * es
es <- .saturatedVaporPressure(tmax)
rhmn <- 100 * vp / es;
rhmn <- pmax(0, pmin(100, rhmn))
es <- .saturatedVaporPressure(tmin)
rhmx <- 100*vp/es;
rhmx <- pmax(0, pmin(100, rhmx))
cbind(rhmn, rhmx)
}
.diurnalRH <- function(rh, tmin, tmax, lat, date) {
tmin <- pmax(tmin, -5)
tmax <- pmax(tmax, -5)
tmp <- (tmin + tmax) / 2
vp <- .saturatedVaporPressure(tmp) * rh / 100
hrtemp <- ...diurnalTemp(lat, date, tmin, tmax)
hr <- 1:24
es <- .saturatedVaporPressure(hrtemp[hr])
rh <- 100*vp/es
rh <- pmin(100, pmax(0, rh))
return(rh)
}
.tDew <- function(temp, rh) {
temp - (100 - rh)/5
}
.FtoC <- function(x) {(5/9)*(x-32) }
.CtoF <- function(x) { x*9/5 + 32 }
.atmp <- function(alt) {
101.325 * (1 - 2.25577 * 10^-5 * alt) ^ 5.25588 # kPa
}
.rel2abshum <- function(rh, t) {
es <- .saturatedVaporPressure(t)
ea <- rh * es / 100
M <- 18.02 # g/mol
R <- 8.314472 # Pa?m?/(mol?K)
T <- t + 273.15 # C to K
hum <- ea*M/(T*R)
return(hum)
}
.abs2rhumum <- function(hum, t) {
M <- 18.02 # g/mol
R <- 8.314472 # Pa?m?/(mol?K)
T <- t + 273.15 # C to K
ea <- hum / (M/(T*R))
es <- .saturatedVaporPressure(t)
rh <- 100 * ea / es
rh <- pmin(rh, 100)
return(rh)
}
.rel2spechum <- function(rh, t, alt) {
es <- .saturatedVaporPressure(t)
ea <- es * (rh / 100)
p <- .atmp(0)
0.62198*ea / (p - ea)
}
.spec2rhumum <- function(spec, t, alt) {
es <- .saturatedVaporPressure(t)
100 * (spec * .atmp(alt)) / ((0.62198 + spec) * es)
}
|
6275eff666018ea41b57dd6e67eef2f9c90ef3fc
|
f05d4533890ae6b4942790feabd7472cb144e95e
|
/man/SectionCount-class.Rd
|
4e55db590a0db758f759c40e58a610db486e97ec
|
[] |
no_license
|
pvrqualitasag/rqudocuhelper
|
cf891663ca0fac46a2b5530f76b7d2b542bba8c5
|
596e88c3915e413bebdb5b3c92bf16458f2c0b92
|
refs/heads/master
| 2021-01-21T13:30:08.989349
| 2016-05-26T15:00:39
| 2016-05-26T15:00:39
| 51,913,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,780
|
rd
|
SectionCount-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rqudocusectioncountrefclass.R
\docType{class}
\name{SectionCount-class}
\alias{SectionCount-class}
\alias{sectionCount}
\title{Reference Class for section counts}
\description{
A reference object of reference class \code{SectionCount}
represents the numbers in front of a section title.
}
\details{
The section title number counts the numbers of different
section at any given level up and until a given section
title. In a markdown (md) document section levels
of titles are denoted by hash (#) signs. Based on the
number of hash signs of a given section title, the level of
the corresponding section title can be inferred. The more
hash signs the lower the level of the section title. Hence
one hash means top-level section title, two hashes stand
for subsections, three hashes denote subsubsectiones, etc.
For a given section title the level determines the corresponding
number of the section title. For a top-level section there
is just one number, for a subsection there are two numbers
separated by a dot (.) and for subsubsections there are
three numbers all separated by dots. Each of the numbers
that are associated with a given section title count the
number of sections for a specific level up
and until that given section title.
}
\section{Fields}{
\describe{
\item{\code{vSectionCount}}{vector with section counts}
}}
\section{Methods}{
\describe{
\item{\code{incrSectionCounts()}}{Increment section counts based on number of hash signs}
\item{\code{initialize()}}{Initialize count fields and set default for count separator}
\item{\code{sGetSectionNumber()}}{Return section number as string, as soon as a count is zero
we stop pasting together. This assumes counts are 1-based.}
}}
|
2f812fea8808f8f0ce5725d3438f3ccdaa8c03c8
|
2a490a3d2140e977c6f462f573ebe63adab4d5f6
|
/deer_ABUND_random_effects.R
|
28d17c2fa5f6904333109e42c4f64934fd80bf11
|
[] |
no_license
|
robcrystalornelas/deer_ma
|
2d97cf0914a99cb40bb531306e837c0cb5e33cfd
|
68278599c65d9ac6b5a290ee65ffb20111361e06
|
refs/heads/master
| 2023-01-13T08:20:49.766192
| 2020-11-18T17:11:29
| 2020-11-18T17:11:29
| 129,975,179
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,923
|
r
|
deer_ABUND_random_effects.R
|
## Load Libraries ####
library(metafor)
library(tidyverse)
library(ggplot2)
## Load data ####
source(
"~/Desktop/research/side_projects/Crystal-Ornelas_et_al_deer_meta/scripts/deer_ma/deer_source_data.R"
)
## Clean data ####
# Calculate effect sizes for each row of data
effect_sizes_abundance <-
escalc(
"SMD", # Specify the outcome that we are measuing, RD, RR, OR, SMD etc.
m1i = abundance_raw_data$mean_t,
n1i = abundance_raw_data$sample_size_t,
# Follow with all of the columns needed to compute SMD
sd1i = abundance_raw_data$SD_t,
m2i = abundance_raw_data$mean_c,
n2i = abundance_raw_data$sample_size_c,
sd2i = abundance_raw_data$SD_c,
data = abundance_raw_data
)
# random effects model, assigning random effect to each row in database
effect_sizes_abundance$ID <- seq.int(nrow(effect_sizes_abundance))
random_effects_abundance_results <-
rma(yi = effect_sizes_abundance$yi, # Outcome variable
vi = effect_sizes_abundance$vi,# Variance
method = "REML",
weighted = TRUE) # REML is common estimator
random_effects_abundance_results
re_with_row_numbers <- rma.mv(yi, vi, random = ~ 1 |
ID, data = effect_sizes_abundance)
re_with_row_numbers
## Mixed effects meta-analytic model account for data coming from the same articles
mixed_effects_abundance <-
rma.mv(yi, vi, random = ~ 1 |
author, data = effect_sizes_abundance)
mixed_effects_abundance
# figures ####
# First, order by years
effect_sizes_abundance <- effect_sizes_abundance[order(effect_sizes_abundance$pub_year),]
View(effect_sizes_abundance)
effect_sizes_abundance$pub_year
plyr::count(effect_sizes_abundance$unique_id)
# First, get labels, so that we don't repeat farming systems
abundance_study_labels <- c(
"DeGraaf, 1991",
strrep("", 1:5),
"McShea, 2000",
strrep("", 1:2),
"Berger, 2001",
strrep("", 1:11),
"Anderson, 2007",
strrep("", 1:15),
"Martin, 2008",
strrep("", 1:12),
"Martin, 2011",
strrep("", 1:29),
"Okuda, 2012",
strrep("", 1:31),
"Cardinal, 2012",
"Tymkiw, 2013",
strrep("", 1:26),
"Graham, 2014",
strrep("", 1:33),
"Carpio, 2015",
"Chollet, 2016",
strrep("",1:16))
length(abundance_study_labels)
plyr::count(effect_sizes_abundance$author)
forest(
effect_sizes_abundance$yi,
effect_sizes_abundance$vi,
annotate = FALSE,
xlab = "Hedge's g",
slab = abundance_study_labels,
ylim = c(-1,200),
cex = 1.3,
pch = 15,
cex.lab = 1.3,
col = c(
rep('#a6cee3', 6),
rep('#1f78b4', 3),
rep('#cc6a70ff', 12),
rep("#b2df8a", 16),
rep('#33a02c', 13),
rep('#fb9a99', 30),
rep('#f9b641ff', 32),
rep('#e31a1c', 1),
rep ("#b15928", 27),
rep ("#ff7f00", 34),
rep ("#cab2d6", 1),
rep ("#6a3d9a", 17)))
addpoly(mixed_effects_abundance, row = -4 , cex = 1.3,col ="#eb8055ff", annotate = TRUE, mlab = "Summary")
dev.off()
|
0e2796cbb635dceb5cfeaf6e8d9c6eb391554e1a
|
a4460da00ea395dbf706d8d308b83b4b99c2e5e3
|
/man/test_inventory.Rd
|
ad5ec035a4fe130a11361f1966d5c864e44928e3
|
[
"MIT"
] |
permissive
|
JDOsborne1/inventoRy
|
38d987fd975dcb5292a1c78f85b87b8fcf550b12
|
c48805e191815bdc877b9b2093d1e43e0df21274
|
refs/heads/master
| 2022-04-18T00:01:01.882009
| 2020-04-16T13:33:23
| 2020-04-16T13:33:23
| 255,642,885
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 344
|
rd
|
test_inventory.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{test_inventory}
\alias{test_inventory}
\title{A testing Dataset for the inventory}
\format{
An object of class \code{list} of length 1.
}
\usage{
test_inventory
}
\description{
A testing Dataset for the inventory
}
\keyword{datasets}
|
275e6314e734e2c3a7d272fa7bfa1df78da05ca1
|
23f90a78c345b64a5be77d3fef45481d686b1cba
|
/man/legco-package.Rd
|
a4e5c24db04d94f63cce0852d5f6703c4f93aae4
|
[
"MIT"
] |
permissive
|
elgarteo/legco
|
cdd3123ba389ed38358f5cf486a51d83f0fb4844
|
53ce2022d77eb0c674fc898fff2bf6d39c386455
|
refs/heads/master
| 2022-11-15T02:52:49.100951
| 2022-10-28T11:33:08
| 2022-10-28T11:33:08
| 190,210,032
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,647
|
rd
|
legco-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/legco-package.R
\docType{package}
\name{legco-package}
\alias{legco}
\alias{legco-package}
\title{legco: R bindings for the Hong Kong Legislative Council API}
\description{
Provides functions to fetch data from the Hong Kong Legislative Council API.
}
\section{Details}{
Most functions of this package correspond to the data
endpoints of the API. It is therefore necessary to understand the structure
of the API in order to extract the data needed. Please refer to the
vignettes for more details.
This package supports five databases of the LegCo API: \emph{Bills},
\emph{Hansard}, \emph{Meeting Attendance}, \emph{Meeting Schedule} and
\emph{Voting Result}. It is essential to understand what data these
databases store in order to utilise the API effectively. Please refer to
the vignettes and the API documentations for more details (links in ‘See
Also’).
}
\section{API Limits}{
The LegCo API does not have a specified rate limit, but
by experience the limit is approximately 1000 requests per IP per hour.
When the rate limit is reached, the server will return an empty json.
LegCo's API server also has a node count limit of 100 nodes per request,
which can be translated as 20 filtering conditions per request in most
cases in meaningful term. This package automatically blocks requests that
exceed the node count.
It is common for the connection to the LegCo API to experience SSL error
from time to time, especially during repeated requests. This can usually be
resolved simply by retrying. This package automatically retries the request
once when an SSL error occurs.
Another common problem is that the LegCo API sometimes returns an empty
json file when it is not supposed to. Again, this can usually be resolved
by retrying. This package automatically retries the request once to make
sure that an invalid search query or rate limit is not the cause of the
problem.
}
\section{Functions}{
Generic function: \itemize{\item\code{\link{legco_api}}:
Generic LegCo API}
Functions of the Bills database: \itemize{ \item \code{\link{all_bills}}:
All Bills discussed in LegCo }
Functions of the Meeting Attendance Database: \itemize{ \item
\code{\link{attendance}}: Attendance of members }
Functions of the Voting Result Database: \itemize{ \item
\code{\link{voting_record}}: Voting record in LegCo meetings }
Functions of the Hansard database: \itemize{ \item
\code{\link{hansard}}: Hansard files \item
\code{\link{legco_section_type}}: Section code \item
\code{\link{subjects}}: Subjects \code{\link{speakers}}: Speakers in the
council, including members, government officials and secretariat staff
\item \code{\link{rundown}}: Rundown (Paragraphs in hansard) \item
\code{\link{questions}}: Questions raised by members \item
\code{\link{bills}}: Bills \item \code{\link{motions}}: Motions \item
\code{\link{petitions}}: Petitions \item \code{\link{addresses}}: Addresses
made by members or government officials when presenting papers to the
Council \item \code{\link{statements}}: Statements made by government
officials \item \code{\link{voting_results}}: Results of votes in council
meetings \item \code{\link{summoning_bells}}: Instances of summoning bells
being rung }
Functions of the Meeting Schedule Database: \itemize{
\item \code{\link{term}}: LegCo terms \item \code{\link{session}}: LegCo
sessions \item \code{\link{committee}}: LegCo committees \item
\code{\link{membership}}: Membership of LegCo committees \item
\code{\link{member}}: LegCo members \item \code{\link{member_term}}: Terms
served by LegCo members \item \code{\link{meeting}}: Meetings of LegCo
committees \item \code{\link{meeting_committee}}: Committees of LegCo
meetings }
Complementary Functions: \itemize{ \item
\code{\link{search_committee}}: Search LegCo committees \item
\code{\link{search_member}}: Search LegCo members \item
\code{\link{search_voting_record}}: Search Voting Record in LegCo meetings
\item \code{\link{search_question}}: Search full text of question put to
the government by LegCo members}
}
\section{Notes}{
In addition to the standard function names, each function in
this package has a wrapper where the name is prefixed with \code{legco_}.
For example, both \code{speakers()} and \code{legco_speakers()} will return
the same result. This is because function names are taken from the data
endpoints provided by the API on, which nonetheless are often not very
informative and could clash with functions in other packages (e.g.
\code{speakers()} is not a term unique to LegCo).
}
\section{Disclaimer}{
This package is not officially related to or endorsed by
the Legislative Council of Hong Kong.
The Legislative Council of Hong Kong is the copyright owner of data
retrieved from its open data API.
}
\seealso{
GitHub page: \url{https://github.com/elgarteo/legco/}
Online Vignettes: \url{https://elgarteo.github.io/legco/}
LegCo API Documentations \itemize{ \item Bills Database:
\url{https://www.legco.gov.hk/odata/english/billsdb.html} \item Hansard
Database: \url{https://www.legco.gov.hk/odata/english/hansard-db.html}
\item Meeting Attendance Database:
\url{https://www.legco.gov.hk/odata/english/attendance-db.html} \item
Meeting Schedule Database:
\url{https://www.legco.gov.hk/odata/english/schedule-db.html} \item Voting
Result Database: \url{https://www.legco.gov.hk/odata/english/vrdb.html} }
}
\author{
Elgar Teo (\email{elgarteo@connect.hku.hk})
}
\keyword{internal}
|
7c4e0e75007b1fe6f5f954f5754cdd56f7d9980e
|
050854230a7cead95b117237c43e1c8ff1bddcaa
|
/data-raw/WiDNR/do_parse.R
|
0f0482d93ef582d40286482ea60baa06ac7f3b40
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
USGS-R/mda.lakes
|
7b829d347e711416cbadbf50f8ac52c20546e7bc
|
eba6ddfba4d52c74e7b09fb1222772630dfa7f30
|
refs/heads/main
| 2023-04-15T18:10:46.043228
| 2020-11-13T18:43:09
| 2020-11-13T18:43:09
| 7,429,212
| 1
| 11
| null | 2023-04-07T22:44:55
| 2013-01-03T19:50:59
|
R
|
UTF-8
|
R
| false
| false
| 1,535
|
r
|
do_parse.R
|
# Process the raw WiDNR Database output. Mostly just metadata and units cleanup
Sys.setenv(tz='GMT')
d = read.csv('data-raw/WiDNR/temp_DO.csv', header=TRUE, as.is=TRUE)
d$date = as.POSIXct(d$START_DATETIME)
d$Dissolved.Oxygen.Units = tolower(d$Dissolved.Oxygen.Units)
d$UNIT_CODE = tolower(d$UNIT_CODE)
d$UNIT_CODE_1 = tolower(d$UNIT_CODE_1)
#set empty DO to NA
d$Dissolved.Oxygen[d$Dissolved.Oxygen==''] = NA
d$Dissolved.Oxygen = as.numeric(d$Dissolved.Oxygen)
#just want DO data as mg/l or ppm (same thing)
d = subset(d, Dissolved.Oxygen.Units == 'mg/l' || Dissolved.Oxygen.Units == 'ppm')
#merge START_AMT and START_AMT_1
missing_start = is.na(d$START_AMT)
d$START_AMT[missing_start] = d$START_AMT_1[missing_start]
d$UNIT_CODE[missing_start] = d$UNIT_CODE_1[missing_start]
d$UNIT_CODE_1 = NULL
d$START_AMT_1 = NULL
#convert UNIT_CODE from FEET/FT to METERS/M (there are inches in there, but I don't trust them)
old_units = d$UNIT_CODE == 'feet'
d$START_AMT[old_units] = d$START_AMT[old_units]* 0.3048
d$UNIT_CODE[old_units] = 'meters'
#drop the weird waterbody types
d = subset(d, Waterbody.Type != 'RIVER')
d = subset(d, Waterbody.Type != 'GRAVEL-PIT')
#cleanup header
tosave = d[,c('WBIC', 'date', 'START_AMT', 'Dissolved.Oxygen')]
names(tosave) = c('WBIC', 'date', 'depth', 'doobs_mg_l')
tosave = na.omit(tosave)
#drop impossibly high (and negative) DO values
tosave = tosave[tosave$doobs_mg_l < 20 & tosave$doobs_mg_l >= 0, ]
write.table(tosave, 'inst/supporting_files/doobs.obs.tsv', sep='\t', row.names=FALSE)
|
2837510c65205b20a7dfb5884cdb0fb30c6e7f1b
|
26c22484790669525fe639b0cb5bdd1ec9239840
|
/man/calc_catchment_attributes.Rd
|
a409a1432878b6e54fd5efc473d6f2602551322b
|
[
"MIT"
] |
permissive
|
MBaken/openSTARS
|
2895f750257aa2b6e34b31efbf758067084fb8d8
|
255405d2c043771852b1793b9e7c657a8ed3459e
|
refs/heads/master
| 2021-06-24T19:20:06.615085
| 2017-08-15T15:01:06
| 2017-08-15T15:44:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,084
|
rd
|
calc_catchment_attributes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_attributes_edges.R
\name{calc_catchment_attributes}
\alias{calc_catchment_attributes}
\title{calc_catchment_attributes
Aggregate attributes for the total catchment of each stream segment.}
\usage{
calc_catchment_attributes(dt, stat, attr_name, round_dig)
}
\arguments{
\item{dt}{data.table of stream topology and attributes per segment.}
\item{stat}{name or character vector giving the statistics to be calculated,
must be one of: min, max, mean, percent, sum.}
\item{attr_name}{name or character vector of column names for the attribute(s)
to be calculated.}
\item{round_dig}{integer; number of digits to round results to. Can be a vector
of different values or just one value for all attributes.}
}
\value{
Nothing. The function changes the values of the columns attr_name in dt.
}
\description{
This function aggregates the attributes of each segment for the total
catchment of each stream segment. It is called within \code{\link{calc_attributes_edges}}
and should not be called by the user.
}
|
641cf2a4ee27c5f9dbde03e11c553a02a3c3ea21
|
12886e35fd6c2216940935b82a3a7e701e60e594
|
/code/dist.R
|
1b7823cd9d58c87ff3de1707d9c5a7e7a31684c5
|
[] |
no_license
|
muschellij2/ich_detection_challenge
|
b7dbb94102fef42ba44c7ed484196e2076e33ca3
|
c94e1a3f045ee35a73b5eaec4093a373cfae5917
|
refs/heads/master
| 2020-08-02T01:11:47.290417
| 2019-11-13T18:46:35
| 2019-11-13T18:46:35
| 211,188,432
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,817
|
r
|
dist.R
|
# Create human mask
rm(list = ls())
library(ANTsRCore)
library(neurobase)
library(lungct)
library(ichseg)
library(dplyr)
library(fslr)
library(extrantsr)
setwd(here::here())
# Rcpp::sourceCpp("code/dist_min.cpp")
hausdorffDistance <- function(binarySeg1, binarySeg2 ) {
binarySeg1 = check_ants(binarySeg1)
binarySeg2 = check_ants(binarySeg2)
d1 = iMath( binarySeg1, "MaurerDistance" ) * binarySeg2
d2 = iMath( binarySeg2, "MaurerDistance" ) * binarySeg1
return( max( c( max( abs( d1 ) ), max( abs( d2 ) ) ) ) )
}
stage_number = 1
pre = ifelse(stage_number == 1, "", "stage2_")
n_folds = 200
df = readr::read_rds(paste0(pre, "wide_headers_with_folds.rds"))
# all_df = df
# df = all_df
df = df %>%
select(outfile, index, scan_id, fold, maskfile, ss_file) %>%
mutate(dist_file = file.path("dist", basename(outfile))) %>%
distinct()
# 7646
# ID_02c48e85-ID_bd2131d216
ifold = as.numeric(Sys.getenv("SGE_TASK_ID"))
if (is.na(ifold)) {
ifold = 155
}
df = df[ df$fold == ifold,]
uids = unique(df$index)
iid = uids[1]
for (iid in uids) {
print(iid)
run_df = df[ df$index == iid, ]
outfile = unique(run_df$outfile)
ofile = run_df$dist_file[1]
if (!file.exists(ofile)) {
ss_file = unique(run_df$ss_file)
maskfile = unique(run_df$maskfile)
out_maskfile = sub("[.]nii", "_Mask.nii", ss_file)
fill_size = 5
filled = filler(out_maskfile, fill_size = fill_size)
res = oMath(filled, "MaurerDistance")
mask = readnii(out_maskfile)
result = mask_img(res * -1, mask)
write_nifti(result, ofile)
}
#
# ero = filler(filled, fill_size = 1, dilate = FALSE)
# surf = filled - ero
#
# rm(ero)
#
# vdim = voxdim(surf)
# all_ind = t(which(filled > 0, arr.ind = TRUE))
# all_ind = all_ind * vdim
# surf_ind = t(which(surf > 0, arr.ind = TRUE))
# surf_ind = surf_ind * vdim
#
# rm(surf)
# # rm(filled)
# gc()
#
# # all_ind = matrix(rnorm(3e5*3), nrow = 3)
# # surf_ind = matrix(rnorm(1e4*3), nrow = 3)
#
# s2 = colSums(surf_ind^2)
# y2 = colSums(all_ind^2)
#
# # 12gb
# n_gb = 2
# n_gb = n_gb * 1024^3
# chunk_size = ceiling(n_gb / 8 / ncol(surf_ind))
# chunks = rep(1:ceiling(ncol(all_ind)/chunk_size), each = chunk_size)
# chunks = chunks[1:ncol(all_ind)]
# d = rep(NA, length = ncol(all_ind))
# ichunk = 1
# for (ichunk in 1:chunk_size) {
# print(ichunk)
# ind = which(chunks == ichunk)
# x = t(all_ind[,ind])
# yy = y2[ind]
# # -2xy
# xy = -2 * (x %*% surf_ind)
# # y^2 - 2xy
# xy = xy + yy
# # y^2 - 2xy + x^2
# xy = t(xy) + s2
# res = matrixStats::colMins(xy)
# rm(xy)
# res = round(res, digits = 5)
# d[ind] = res
# rm(ind);
# }
# dimg = remake_img(vec = d, img = filled, mask = filled)
#
}
|
f60c383de2aeb88388a0757eb6f4554ded913dce
|
092e6cb5e99b3dfbb089696b748c819f98fc861c
|
/scripts/doSimulateLDS.R
|
78adc5fccf7b97ab91ecfb5bb6ad8c6498dadc58
|
[] |
no_license
|
joacorapela/kalmanFilter
|
522c1fbd85301871cc88101a9591dea5a2e9bc49
|
c0fb1a454ab9d9f9a238fa65b28c5f6150e1c1cd
|
refs/heads/master
| 2023-04-16T09:03:35.683914
| 2023-04-10T16:36:32
| 2023-04-10T16:36:32
| 242,138,106
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,000
|
r
|
doSimulateLDS.R
|
require(plotly)
require(ini)
require(htmlwidgets)
source("../src/simulateLDS.R")
processAll <- function() {
simConfigNumber <- 5
xlab <- "x"
ylab <- "y"
simConfigFilenamePattern <- "data/%08d_simulation_metaData.ini"
simResFilenamePattern <- "results/%08d_simulation.RData"
simResMetaDataFilenamePattern <- "results/%08d_simulation.ini"
simFigFilenamePattern <- "figures/%08d_simulation.%s"
simConfigFilename <- sprintf(simConfigFilenamePattern, simConfigNumber)
simConfig <- read.ini(simConfigFilename)
exit <- FALSE
while(!exit) {
simResNumber <- sample(1e8, 1)
simFilename <- sprintf(simResFilenamePattern, simResNumber)
if(!file.exists(simFilename)) {
exit <- TRUE
}
}
simResMetaDataFilename <- sprintf(simResMetaDataFilenamePattern, simResNumber)
show(sprintf("Simulation results in: %s", simFilename))
browser()
# sampling rate
sRate <- as.double(simConfig$control_variables$sRate)
dt <- 1/sRate
N <- as.numeric(simConfig$control_variables$N)
# state transition
Btmp <- eval(parse(text=simConfig$state_variables$B))
B <- dt*Btmp + diag(nrow(Btmp))
# state noise covariance
Q <- eval(parse(text=simConfig$state_variables$Q))
# initial state mean
m0 <- eval(parse(text=simConfig$initial_state_variables$m0))
# initial state covariance
V0 <- eval(parse(text=simConfig$initial_state_variables$V0))
# state-measurement transfer
Z <- eval(parse(text=simConfig$measurements_variables$Z))
# measurements noise covariance
R <- eval(parse(text=simConfig$measurements_variables$R))
res <- simulateLDS(N=N, B=B, Q=Q, m0=m0, V0=V0, Z=Z, R=R)
simRes <- c(res, list(B=B, Q=Q, m0=m0, V0=V0, Z=Z, R=R))
save(simRes, file=simFilename)
metaData <- list()
metaData[["simulation_info"]] <- list(simConfigNumber=simConfigNumber)
write.ini(x=metaData, filepath=simResMetaDataFilename)
hoverTextLatents <- sprintf("sample %d, x %.02f, y %.02f", 1:N, res$x[1,], res$x[2,])
hoverTextObservations <- sprintf("sample %d, x %.02f, y %.02f", 1:N, res$y[1,], res$y[2,])
df <- data.frame(t(cbind(res$x, res$y)))
df <- cbind(df, c(rep("latent", N), rep("measurement", N)))
df <- cbind(df, c(hoverTextLatents, hoverTextObservations))
colnames(df) <- c("x", "y", "type", "hoverText")
fig <- plot_ly(data=df, type="scatter", mode="lines+markers")
fig <- fig %>% add_trace(x=~x, y=~y, text=~hoverText, color=~type, hoverinfo="text")
fig <- fig %>% add_annotations(x=c(res$x[1,1], res$x[1,N]), y=c(res$x[2,1], res$x[2,N]), text=c("start", "end"))
simPNGFilename <- sprintf(simFigFilenamePattern, simResNumber, "png")
simHTMLFilename <- sprintf(simFigFilenamePattern, simResNumber, "html")
orca(p=fig, file=simPNGFilename)
saveWidget(widget=fig, file=file.path(normalizePath(dirname(simHTMLFilename)),basename(simHTMLFilename)))
print(fig)
browser()
}
processAll()
|
cb7e1f24a682965a2d81933adeff757f79ec949c
|
3b049264791dc77e30f691c87b34c2c1f8f8c9bc
|
/Rprofile
|
297a569af5a7fc3eba01d99916c58fbe90cf922a
|
[] |
no_license
|
mdlerch/dotfiles
|
a53a370aeb540656a652e6a51e3055c2ea4c0a96
|
49d2352eebd828e9f2db17fe7763480a42f7dd1e
|
refs/heads/master
| 2020-12-24T14:46:10.596875
| 2016-01-31T18:51:32
| 2016-01-31T18:52:38
| 2,833,260
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,053
|
Rprofile
|
if (interactive() & Sys.getenv("TERM")!="") {
# options(nvimcom.verbose = 0)
options(nvimcom.verbose = 0)
# options(vimcom.vimpager = FALSE)
library(nvimcom)
# library(colorout)
library(rlerch)
# options(pager = "vimrpager")
#if (Sys.getenv("VIM_PANE") != "")
#{
# options(help_type = "text", pager = vim.pager)
#}
}
# library(grDevices)
# X11.options(type="nbcairo")
local({r <- getOption("repos"); r["CRAN"] <- "http://cran.fhcrc.org/"; options(repos = r)})
options(menu.graphics = F)
options(continue = "++ ")
# complete library names
utils::rc.settings(ipck = TRUE)
cd <- setwd
pwd <- getwd
h <- utils::head
man <- utils::help
l <- base::list
less <- function() options(pager = "less")
create <- function(...) devtools::create(..., rstudio = F)
updatevimcom <- function() devtools::install_github("jalvesaq/nvimcom")
myvimcom <- function(branch="master") devtools::install_bitbucket("mdlerch/nvimcom", branch)
updaterlerch <- function() devtools::install_github("mdlerch/rlerch")
# vim:ft=r
|
|
b87adc5d74a5af6dcadb4781c9024f77a1f6cee5
|
a7f245ce1c93426dfda2c3d85922fa28645b190e
|
/R/new-benchmark.R
|
0585b1aea7e966ab1fc450c1aaaf3d80d46adba6
|
[
"MIT"
] |
permissive
|
labordynamicsinstitute/benchmarks
|
dbd39d1a6b8812d6b5ed915cd9d6cf6c2fd92e6f
|
0566079baef119eaead0420ba28dbb28d872576f
|
refs/heads/master
| 2023-06-12T06:05:12.935108
| 2023-06-06T02:35:24
| 2023-06-06T02:35:24
| 23,373,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,049
|
r
|
new-benchmark.R
|
# https://www.alexejgossmann.com/benchmarking_r/
library(rbenchmark)
size = 1000000
benchmark("lm" = {
X <- matrix(rnorm(size), 100, 10)
y <- X %*% sample(1:10, 10) + rnorm(100)
b <- lm(y ~ X + 0)$coef
},
"pseudoinverse" = {
X <- matrix(rnorm(size), 100, 10)
y <- X %*% sample(1:10, 10) + rnorm(100)
b <- solve(t(X) %*% X) %*% t(X) %*% y
},
"linear system" = {
X <- matrix(rnorm(size), 100, 10)
y <- X %*% sample(1:10, 10) + rnorm(100)
b <- solve(t(X) %*% X, t(X) %*% y)
},
replications = 1000,
columns = c("test", "replications", "elapsed",
"relative", "user.self", "sys.self"))
# test replications elapsed relative user.self sys.self
# 3 linear system 1000 0.167 1.000 0.208 0.240
# 1 lm 1000 0.930 5.569 0.952 0.212
# 2 pseudoinverse 1000 0.240 1.437 0.332 0.612
|
08faf77fbd027c2405f98c0a0a1e04b7b5d3e50c
|
233ef600be69735d3054fda4a1f89da72fe5c3e6
|
/ui-WEBAPP.R
|
e83a2814efd2f445c968f8164f982b1ceb3fc46d
|
[] |
no_license
|
praveenmec67/TimeSeriesForecasting---AutoARIMA
|
0a19d06cfb6d73ea517b767ad38d2d2f1e7d0a91
|
df75b4c0d1feac66a656214dd7190a4a1a46b504
|
refs/heads/master
| 2022-04-08T15:23:31.543846
| 2020-03-03T18:21:42
| 2020-03-03T18:21:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,459
|
r
|
ui-WEBAPP.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
install.packages('rsconnect')
library(shiny)
library(shinydashboard)
library(plotly)
library(shiny)
library(forecast)
title=tags$a(tags$img(src="logo.png",height=50,width=60),"CPGRAMS Dashboard")
df = read.csv("DataSet-MonthwiseReceiptsDisposal.csv")
# Define UI for application that draws a histogram
shinyUI(
dashboardPage(skin = "black",
dashboardHeader(title = title),
dashboardSidebar(sidebarMenu(menuItem("Forecasting", tabName = "Forecasting"),
fileInput(inputId ="file",label="choose your file"),
selectInput(inputId="category",label="category", choices=c("Month","Department")
),
submitButton("Update View", icon("refresh")
),actionButton("goButton", "Go!")
)
),
dashboardBody(tabItem(tabName = "Forecasting",selectInput(inputId = "model", label = "Choose Your Deparment/Month",choices =namess),
numericInput(inputId="number", label ="months", value=4, min = 1, max = 100, step = 1
)),dataTableOutput("out1"))
)
)
|
64572047c221984a8cc2dac590ce23cfe01986eb
|
5ac5920bc54c456669b9c1c1d21ce5d6221e27eb
|
/facebook/delphiFacebook/man/filter_responses.Rd
|
1f88b3cc63bbf280becacd71b923c80298daf885
|
[
"MIT"
] |
permissive
|
alexcoda/covidcast-indicators
|
50e646efba61fbfe14fd2e78c6cf4ffb1b9f1cf0
|
0c0ca18f38892c850565edf8bed9d2acaf234354
|
refs/heads/main
| 2023-08-13T04:26:36.413280
| 2021-09-16T18:16:08
| 2021-09-16T18:16:08
| 401,882,787
| 0
| 0
|
MIT
| 2021-09-01T00:41:47
| 2021-09-01T00:41:46
| null |
UTF-8
|
R
| false
| true
| 451
|
rd
|
filter_responses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/responses.R
\name{filter_responses}
\alias{filter_responses}
\title{Filter responses for privacy and validity}
\usage{
filter_responses(input_data, params)
}
\arguments{
\item{input_data}{data frame containing response data}
\item{params}{named list containing values "static_dir", "start_time", and
"end_time"}
}
\description{
Filter responses for privacy and validity
}
|
5be353966b446921a0f0ca4905150fe7d456dd55
|
3c939f5d5a694042ce7f39a4adfa00faa3386b94
|
/man/gender.Rd
|
48c1f6cc029dc21197b24bfd43aee160840ab43e
|
[
"MIT"
] |
permissive
|
rslepoy/gender
|
555d18bc5722acb4cf5e30241d4f246400f95ca7
|
bda32131fa641fb9aa9492295ebc654255904353
|
refs/heads/master
| 2021-01-18T08:06:19.321265
| 2014-07-01T20:35:24
| 2014-07-01T20:35:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,889
|
rd
|
gender.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{package}
\name{gender}
\alias{gender}
\alias{gender-package}
\title{Gender: find gender by name and date}
\usage{
gender(data, years = c(1932, 2012), method = "ssa", certainty = TRUE)
}
\arguments{
\item{data}{A character string of a first name or a data frame with a column
named \code{name} with a character vector containing first names. The names
must all be lowercase.}
\item{years}{This argument can be either a single year, a range of years in
the form \code{c(1880, 1900)}, or the value \code{TRUE}. If no value is
specified, then for the \code{ssa} method it will use the period 1932 to 2012
and for the \code{ipums} method it will use the period 1789 to 1930. If
a year or range of years is specified, then the names will be looked up for
that period. If the value is \code{TRUE}, then the function will look for
a column in the data frame named \code{year} containing an integer vector
of the year of birth associated with each name. This permits you to do a
precise lookup for each person in your data set. Valid dates in the columns
will depend on the method used to determine the gender; if earlier or later
dates are included in a column in the data frame, they will not be matched.}
\item{method}{This value determines the data set that is used to predict the
gender of the name. The \code{"ssa"} method looks up names based from the U.S.
Social Security Administration baby name data. (This method is based on an
implementation by Cameron Blevins.) The \code{"ipums"} method looks up names
from the U.S. Census data in the Integrated Public Use Microdata Series. (This
method was contributed by Benjamin Schmidt.) The \code{"kantrowitz"} method,
in which case the function uses the Kantrowitz corpus of male and female names.}
\item{certainty}{A boolean value, which determines whether or not to return
the proportion of male and female uses of names in addition to determining
the gender of names.}
}
\description{
Gender: find gender by name and date
This function looks up the gender of either a single first name or of a
column of first names in a data frame. Optionally it can take a year, a
range of years, or a column of years in the data frame to take into account
variation in the use of names over time. It can determine the likely gender
of a name from several different data sets.
}
\details{
Encodes gender based on names and dates of birth, using U.S. Census or Social
Security data sets.
}
\examples{
library(dplyr)
gender("madison")
gender("madison", years = c(1900, 1985))
gender("madison", years = 1985)
gender(sample_names_data)
gender(sample_names_data, years = TRUE)
gender(sample_names_data, certainty = FALSE)
gender(sample_names_data, method = "ipums", years = TRUE)
gender(sample_names_data, method = "kantrowitz")
}
\author{
\email{lincoln@lincolnmullen.com}
}
\keyword{gender}
|
c6f02e8c6236ca25a50b11767c3cfd87dfbe4511
|
c38d46f9d9730ee94b67c8faaeefd1da86113116
|
/R/zzz_DataPrep_WHO.R
|
e0d0b1a9bfdd547c727e03a4dc99336cb1d12435
|
[] |
no_license
|
timriffe/GlobalViolence
|
bb7ea094bc4eb65ce18a5b69d00f6b34c499637e
|
9082445b36f9a7f85df6a2e7c4c38f9438813108
|
refs/heads/master
| 2023-02-21T22:26:39.052071
| 2023-02-04T09:43:10
| 2023-02-04T09:43:10
| 169,438,288
| 3
| 4
| null | 2021-11-10T09:39:48
| 2019-02-06T16:34:53
|
R
|
UTF-8
|
R
| false
| false
| 10,115
|
r
|
zzz_DataPrep_WHO.R
|
# Author: tim
# WARNING, this script in progress. May crash your memory
###############################################################################
# step 1, for each age format, get an Age, AgeInterval column made.
# group infant deaths if necessary (not sure).
me <- system("whoami",intern=TRUE)
# change this as needed
if (me == "tim"){
setwd("/home/tim/git/GlobalViolence/GlobalViolence")
}
library(data.table)
who.folder <- file.path("Data","Inputs","WHO")
# output direwctory for grouped data
dir.create(file.path("Data","Grouped","WHO"), showWarnings = FALSE, recursive = TRUE)
readWHO_1 <- function(){
WHO <- local(get(load(file.path(who.folder,"WHO.Rdata"))))
setnames(WHO, paste0("Deaths",1:26),as.character(c(9999,0:5,seq(10,95,by=5),999)))
# 9999 for total, and 999 for unk Age
WHO[,c("IM_Deaths1","IM_Deaths2","IM_Deaths3","IM_Deaths4","IM_Frmat","Frmat"):=NULL];gc()
# Brasil filter
ind1 <- !is.na(WHO$Admin1) & WHO$Admin1 == "901" & WHO$Country == "2070" ;gc()
ind2 <- !is.na(WHO$Admin1) & WHO$Admin1 == "902" & WHO$Country == "2070" ;gc()
# so, we can remove ind1 and ind2
keep <- !(ind1 | ind2)
WHO <- WHO[keep,];gc()
WHO
}
WHO_2_Long <- function(WHOchunk){
WHOL<- melt(WHOchunk, id.vars = c("Country", "Year", "List", "Cause", "Sex"),
measure.vars = as.character(c(9999,0:5,seq(10,95,by=5),999)),
variable.name = "Age",
value.name = "Deaths");gc()
WHOL[,Age := as.character(Age)]
WHOL[,Age := as.integer(Age)]
WHOL
}
# There are 3 WHO files we need to deal with
# here the first one, in several chunks, here chunk 1
# TO BE USED FOR 3-DIGIT CODES, 4-DIGIT CODES TO BE REDUCED TO 3
# strict homicide: x85-y09
h3 <- c(paste0("X", sprintf("%02d", 85:99)), paste0("Y", sprintf("%02d", 0:9)))
# suspicious external
y3 <- paste0("Y", sprintf("%02d", 20:30))
# police & war
w3 <- paste0("Y", sprintf("%02d", 35:36))
grouph3 <- function(.SD,h3,w3,y3){
data.frame(D = sum(.SD$Deaths),
Dh = sum(.SD$Deaths[.SD$Cause %in% h3]),
Dw = sum(.SD$Deaths[.SD$Cause %in% w3]),
Dy = sum(.SD$Deaths[.SD$Cause %in% y3]))
}
# -------------------------------------------------------
# 1)
WHO <- readWHO_1()
# list 104, Males, years 1988 to 2005
WHO_1 <- WHO[List == "104" & Sex == 1 & Year < 2006];rm(WHO);gc()
# remove unneeded columns for this chunk
WHO_1[,c("Admin1","SubDiv"):=NULL];gc()
# now to long
WHO_1 <- WHO_2_Long(WHO_1)
# cut to first 3 characters:
WHO_1[,Cause := substr(Cause, 1, 3)];gc()
# regroup deaths
WHO_1[,
Deaths := sum(Deaths),
by = .(Country, Year, Cause, Sex, Age)];gc()
# and create new group columns
WHO_1 <- WHO_1[,grouph3(.SD,h3,w3,y3),
by = .(Country, Year, Sex, Age)];gc()
save(WHO_1, file=file.path("Data","Grouped","WHO","WHO_1.Rdata"))
rm(WHO_1);gc()
# ----------------#
# WHO chunk 2: #
# ----------------#
WHO <- readWHO_1()
WHO_2 <- WHO[List == "104" & Sex == 1 & Year >= 2006];rm(WHO);gc()
WHO_2[,c("Admin1","SubDiv"):=NULL];gc()
# now to long
WHO_2 <- WHO_2_Long(WHO_2)
WHO_2[,Cause := substr(Cause, 1, 3)];gc()
WHO_2[,
Deaths := sum(Deaths),
by = .(Country, Year, Cause, Sex, Age)];gc()
WHO_2 <- WHO_2[,grouph3(.SD,h3,w3,y3),
by = .(Country, Year, Sex, Age)];gc()
save(WHO_2, file=file.path("Data","Grouped","WHO","WHO_2.Rdata"))
rm(WHO_2);gc()
# ----------------#
# WHO chunk 3: #
# ----------------#
WHO <- readWHO_1()
WHO_3 <- WHO[List == "104" & Sex == 2 & Year < 2006];rm(WHO);gc()
WHO_3[,c("Admin1","SubDiv"):=NULL];gc()
# now to long
WHO_3 <- WHO_2_Long(WHO_3)
WHO_3[,Cause := substr(Cause, 1, 3)];gc()
WHO_3[,
Deaths := sum(Deaths),
by = .(Country, Year, Cause, Sex, Age)];gc()
WHO_3 <- WHO_3[,grouph3(.SD,h3,w3,y3),
by = .(Country, Year, Sex, Age)];gc()
save(WHO_3, file=file.path("Data","Grouped","WHO","WHO_3.Rdata"))
rm(WHO_3);gc()
# ----------------#
# WHO chunk 3: #
# ----------------#
WHO <- readWHO_1()
WHO_4 <- WHO[List == "104" & Sex == 2 & Year >= 2006];rm(WHO);gc()
WHO_4[,c("Admin1","SubDiv"):=NULL];gc()
# now to long
WHO_4 <- WHO_2_Long(WHO_4)
WHO_4[,Cause := substr(Cause, 1, 3)];gc()
WHO_4[,
Deaths := sum(Deaths),
by = .(Country, Year, Cause, Sex, Age)];gc()
WHO_4 <- WHO_4[,grouph3(.SD,h3,w3,y3),
by = .(Country, Year, Sex, Age)];gc()
save(WHO_4, file=file.path("Data","Grouped","WHO","WHO_4.Rdata"))
rm(WHO_4);gc()
# -----------------------------------------
# Now the chunks that already come in 3 digit codes
WHO <- readWHO_1()
WHO_5 <- WHO[List == "103" & Sex == 1];rm(WHO);gc()
# need to spot check, seems ok
WHO_5[,c("Admin1","SubDiv"):=NULL];gc()
# now to long
WHO_5 <- WHO_2_Long(WHO_5)
WHO_5 <- WHO_5[,grouph3(.SD,h3,w3,y3),
by = .(Country, Year, Sex, Age)];gc()
save(WHO_5, file=file.path("Data","Grouped","WHO","WHO_5.Rdata"))
rm(WHO_5);gc()
# again for females
WHO <- readWHO_1()
WHO_6 <- WHO[List == "103" & Sex == 2];rm(WHO);gc()
# need to spot check, seems ok
WHO_6[,c("Admin1","SubDiv"):=NULL];gc()
# now to long
WHO_6 <- WHO_2_Long(WHO_6)
WHO_6 <- WHO_6[,grouph3(.SD,h3,w3,y3),
by = .(Country, Year, Sex, Age)];gc()
save(WHO_6, file=file.path("Data","Grouped","WHO","WHO_6.Rdata"))
rm(WHO_6);gc()
# -------------------------------------
# Portugal special years
WHO <- readWHO_1()
WHO_7 <- WHO[List == "UE1"];rm(WHO);gc()
"UE64" # is h3
"UE65" # is y3 approx(Y10-Y34) instead of our Y20-Y30
# w3 is 0s
WHO_7[,c("Admin1","SubDiv"):=NULL];gc()
WHO_7 <- WHO_2_Long(WHO_7)
groupUE1 <- function(.SD){
data.frame(D = sum(.SD$Deaths),
Dh = sum(.SD$Deaths[.SD$Cause %in% "UE64"]),
Dw = 0,
Dy = sum(.SD$Deaths[.SD$Cause %in% "UE65"]))
}
WHO_7 <- WHO_7[,groupUE1(.SD),
by = .(Country, Year, Sex, Age)];gc()
save(WHO_7, file=file.path("Data","Grouped","WHO","WHO_7.Rdata"))
rm(WHO_7);gc()
# -------------------------------------
WHO <- readWHO_1()
WHO_8 <- WHO[List == "101"];rm(WHO);gc()
# D "1000" use because there are redundant groupings
# Dh "1102"
# 1103 is also larger than Dy + Dw....
# Dy "1103", but much too inclusive set to NA
# Dw set to NA
WHO_8[,c("Admin1","SubDiv"):=NULL];gc()
WHO_8 <- WHO_2_Long(WHO_8)
group101 <- function(.SD){
data.frame(D = sum(.SD$Deaths[.SD$Cause %in% "1000"]),
Dh = sum(.SD$Deaths[.SD$Cause %in% "1102"]),
Dw = NA,
Dy = NA)
}
WHO_8 <- WHO_8[,group101(.SD),
by = .(Country, Year, Sex, Age)];gc()
save(WHO_8, file=file.path("Data","Grouped","WHO","WHO_8.Rdata"))
rm(WHO_8);gc()
# --------------------------
# Mixed codes can be reduced to 3. These are mutually exclusive and therefore sum.
# so we can treat as if they were 4 digits
WHO <- readWHO_1()
WHO_9 <- WHO[List == "10M"];rm(WHO);gc()
WHO_9[,c("Admin1","SubDiv"):=NULL];gc()
WHO_9 <- WHO_2_Long(WHO_9)
# Codes are mutually exclusive, so can collapse to 3
WHO_9[,Cause := substr(Cause, 1, 3)];gc()
WHO_9[,
Deaths := sum(Deaths),
by = .(Country, Year, Cause, Sex, Age)];gc()
WHO_9 <- WHO_9[,grouph3(.SD,h3,w3,y3),
by = .(Country, Year, Sex, Age)];gc()
save(WHO_9, file=file.path("Data","Grouped","WHO","WHO_9.Rdata"))
rm(WHO_9);gc()
# -------------------------
files <- paste0("WHO_",1:9,".Rdata")
WHO <- do.call("rbind",lapply(files,function(x){
local(get(load(file.path("Data","Grouped","WHO",x))))
}))
save(WHO,file=file.path("Data","Grouped","WHO","WHO1_Combined.Rdata"))
# some cleaning
rm(files,group101,grouph3,groupUE1,readWHO_1,WHO_2_Long)
# ------------------------------------------------------------
# now, what's GHE?
GHE <- local(get(load(file.path(who.folder,"WHO_GHE.Rdata"))))
GHE$sex <- ifelse(GHE$sex == "FMLE",2,1)
GHE <- GHE[GHE$causename %in% c("All Causes","Intentional injuries","Interpersonal violence")]
GHE <- reshape(GHE, direction='long',
varying=c(paste0('dths',2000:2016), paste0('low',2000:2016), paste0('upp',2000:2016)),
timevar='Year',
times=c(2000:2016),
v.names=c('dths','low',"upp"))
# standardize ages
# age 1 -> 0
# age 2 -> 1
ind1 <- GHE$age == 1
GHE$age[ind1] <- 0
ind2 <- GHE$age == 2
GHE$age[ind2] <- 1
rm(ind1,ind2);gc()
GHE <- GHE[,.(dths=sum(dths),low=sum(low),upp=sum(upp)), by = .(iso3,causename,sex,age,Year)]
D <- GHE[causename == "All Causes"]
GHE <- GHE[causename != "All Causes"];gc()
Dh <- GHE[causename == "Interpersonal violence"]
GHE <- GHE[causename != "Interpersonal violence"];gc()
Dwy <- GHE[causename == "Intentional injuries"]
rm(GHE);gc()
# Make 3 datsets
MID <- copy(D) # seems to only make reference
MID[,c("low","upp"):=NULL]
setnames(MID, "dths","D")
MID$Dh <- Dh$dths
MID$Dwy <- Dwy$dths
save(MID, file=file.path("Data","Grouped","WHO","GHEmid.Rdata"))
rm(MID);gc()
# lower bound
LOW <- copy(D) # seems to only make reference
LOW[,c("dths","upp"):=NULL]
setnames(LOW, "low","D")
LOW$Dh <- Dh$low
LOW$Dwy <- Dwy$low
save(LOW, file=file.path("Data","Grouped","WHO","GHElow.Rdata"))
rm(LOW);gc()
# upper bound
UPP <- copy(D) # seems to only make reference
UPP[,c("dths","low"):=NULL]
setnames(UPP, "upp","D")
UPP$Dh <- Dh$upp
UPP$Dwy <- Dwy$upp
save(UPP, file=file.path("Data","Grouped","WHO","GHEupp.Rdata"))
rm(UPP);gc()
rm(Dh,D,Dwy);gc()
# --------------------------------------#
# Take a look at Population Data #
# --------------------------------------#
POP <- local(get(load(file.path(who.folder,"WHO_POP.Rdata"))))
setnames(POP, paste0("Pop",1:26),as.character(c(9999,0:5,seq(10,95,by=5),999)))
# 9999 for total, and 999 for unk Age
POP[,c("Lb"):=NULL];gc()
# affects Brasil, Panama, and Israel
ind1 <- !is.na(POP$Admin1) & (POP$Admin1 == "901" | POP$Admin1 == "902") ;gc()
POP <- POP[!ind1]
rm(ind1);gc()
POP[,c("Admin1","SubDiv"):=NULL];gc()
# get Age to long
POP <- melt(POP, id.vars = c("Country", "Year", "Sex"),
measure.vars = as.character(c(9999,0:5,seq(10,95,by=5),999)),
variable.name = "Age",
value.name = "Pop");gc()
POP[,Age := as.character(Age)]
POP[,Age := as.integer(Age)]
# save out
save(POP, file=file.path("Data","Grouped","WHO","WHO_POP.Rdata"))
# --------------------------------------------------------------- #
# Done with WHO for now. Still prefer single ages 0-100+ though. #
# --------------------------------------------------------------- #
|
6109773158fcaad872e000526bd6b6467c5e149a
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/valgrind_test_dir/outlierCpp-test.R
|
0236fd89888eb6924d3d85e1edf6d9df40cc5bcf
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
outlierCpp-test.R
|
function (K, R, xy, ratio, imat, rmin)
{
e <- get("data.env", .GlobalEnv)
e[["outlierCpp"]][[length(e[["outlierCpp"]]) + 1]] <- list(K = K,
R = R, xy = xy, ratio = ratio, imat = imat, rmin = rmin)
invisible(c(".Call", "'_Benchmarking_outlierCpp`", "K", "R",
"xy", "ratio", "imat", "rmin"))
}
|
0bb399593ee5fbff195e086e2f939f0b1820a9bc
|
155f3439a2f45d9dc0c6e6fd509d20a4c510754e
|
/Aleksey-R-Programming/Week_4/best.R
|
8d53b5e05020a852b06592317895c6b2fea49fb7
|
[] |
no_license
|
voite1/Coursera
|
2cada1da8ddde92dceadd8f7865176319f1a10ca
|
33d85bd7ee6fd1e9e97eb8aa73b12ef192b71535
|
refs/heads/master
| 2016-08-08T06:09:53.790863
| 2014-12-09T03:04:28
| 2014-12-09T03:04:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,212
|
r
|
best.R
|
best <- function(state, outcome) {
# Read the data file
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# Validate the outcomes and states
outcomes = c("heart attack", "heart failure", "pneumonia")
states <- unique(data[, 7])
if( outcome %in% outcomes == FALSE ) {
stop("invalid outcome")
} else if (state %in% states == FALSE) {
stop("invalid state")
}
# Simplify the data by shrinking the data size and naming
# columns in the data.frame. Found this approach on the web used by
# many other folks to simply data and calculations. Works very well
data <- data[c(2, 7, 11, 17, 23)]
names(data)[1] <- "name"
names(data)[2] <- "state"
names(data)[3] <- "heart attack"
names(data)[4] <- "heart failure"
names(data)[5] <- "pneumonia"
# Select the data pertaining to only the state variable passed to function
# and furtehr reduce the size of the data.frame
data <- data[data$state == state & data[outcome] != 'Not Available', ]
# Determine the row containing the lowest death rate (already separeted
# by state)
row <- which.min(data[, outcome])
# Print out the hospital name based on the row number.
data[row,]$name
}
|
5b367bfee23097dbda1de504342f918438acd02d
|
c0750d140505642f64a4308dc9a58946d06dabab
|
/R/mlRegressionKnn.R
|
233331b5ca16ba7998c4ecd2ecf00207cb5e5e75
|
[] |
no_license
|
AlexanderLyNL/jaspMachineLearning
|
3f2e17511b27927776b54f3c1db762f56c6ec76d
|
803d43a3d20fb4ecc39145782704393881e16f33
|
refs/heads/master
| 2023-07-28T21:29:05.215981
| 2021-09-25T03:23:42
| 2021-09-25T03:23:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,714
|
r
|
mlRegressionKnn.R
|
#
# Copyright (C) 2017 University of Amsterdam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
mlRegressionKnn <- function(jaspResults, dataset, options, state=NULL) {
# Preparatory work
dataset <- .readDataRegressionAnalyses(dataset, options)
.errorHandlingRegressionAnalyses(dataset, options, type = "knn")
# Check if analysis is ready to run
ready <- .regressionAnalysesReady(options, type = "knn")
# Compute results and create the model summary table
.regressionMachineLearningTable(dataset, options, jaspResults, ready, position = 1, type = "knn")
# If the user wants to add the values to the data set
.regressionAddValuesToData(dataset, options, jaspResults, ready)
# Add test set indicator to data
.addTestIndicatorToData(options, jaspResults, ready, purpose = "regression")
# Create the data split plot
.dataSplitPlot(dataset, options, jaspResults, ready, position = 2, purpose = "regression", type = "knn")
# Create the evaluation metrics table
.regressionEvaluationMetrics(dataset, options, jaspResults, ready, position = 3)
# Create the mean squared error plot
.knnErrorPlot(dataset, options, jaspResults, ready, position = 4, purpose = "regression")
# Create the predicted performance plot
.regressionPredictedPerformancePlot(options, jaspResults, ready, position = 5)
}
.knnRegression <- function(dataset, options, jaspResults, ready){
# Import model formula from jaspResults
formula <- jaspResults[["formula"]]$object
# Set model specific parameters
weights <- options[["weights"]]
distance <- options[["distanceParameterManual"]]
# Split the data into training and test sets
if(options[["holdoutData"]] == "testSetIndicator" && options[["testSetIndicatorVariable"]] != ""){
# Select observations according to a user-specified indicator (included when indicator = 1)
train.index <- which(dataset[,options[["testSetIndicatorVariable"]]] == 0)
} else {
# Sample a percentage of the total data set
train.index <- sample.int(nrow(dataset), size = ceiling( (1 - options[['testDataManual']]) * nrow(dataset)))
}
trainAndValid <- dataset[train.index, ]
# Create the generated test set indicator
testIndicatorColumn <- rep(1, nrow(dataset))
testIndicatorColumn[train.index] <- 0
if(options[["modelOpt"]] == "optimizationManual"){
# Just create a train and a test set (no optimization)
train <- trainAndValid
test <- dataset[-train.index, ]
kfit_test <- kknn::kknn(formula = formula, train = train, test = test, k = options[['noOfNearestNeighbours']],
distance = distance, kernel = weights, scale = FALSE)
nn <- options[['noOfNearestNeighbours']]
} else if(options[["modelOpt"]] == "optimizationError"){
# Create a train, validation and test set (optimization)
valid.index <- sample.int(nrow(trainAndValid), size = ceiling(options[['validationDataManual']] * nrow(trainAndValid)))
test <- dataset[-train.index, ]
valid <- trainAndValid[valid.index, ]
train <- trainAndValid[-valid.index, ]
if(options[["modelValid"]] == "validationManual"){
nnRange <- 1:options[["maxK"]]
errorStore <- numeric(length(nnRange))
trainErrorStore <- numeric(length(nnRange))
startProgressbar(length(nnRange))
for(i in nnRange){
kfit_valid <- kknn::kknn(formula = formula, train = train, test = valid, k = i,
distance = distance, kernel = weights, scale = FALSE)
errorStore[i] <- mean( (kfit_valid$fitted.values - valid[,options[["target"]]])^2 )
kfit_train <- kknn::kknn(formula = formula, train = train, test = train, k = i,
distance = distance, kernel = weights, scale = FALSE)
trainErrorStore[i] <- mean( (kfit_train$fitted.values - train[,options[["target"]]])^2 )
progressbarTick()
}
nn <- base::switch(options[["modelOpt"]],
"optimizationError" = nnRange[which.min(errorStore)])
kfit_test <- kknn::kknn(formula = formula, train = train, test = test, k = nn,
distance = distance, kernel = weights, scale = FALSE)
} else if(options[["modelValid"]] == "validationKFold"){
nnRange <- 1:options[["maxK"]]
errorStore <- numeric(length(nnRange))
startProgressbar(length(nnRange))
for(i in nnRange){
kfit_valid <- kknn::cv.kknn(formula = formula, data = trainAndValid, distance = distance, kernel = weights,
kcv = options[['noOfFolds']], k = i)
errorStore[i] <- mean( (kfit_valid[[1]][,1] - kfit_valid[[1]][,2])^2 )
progressbarTick()
}
nn <- base::switch(options[["modelOpt"]],
"optimizationError" = nnRange[which.min(errorStore)])
kfit_valid <- kknn::cv.kknn(formula = formula, data = trainAndValid, distance = distance, kernel = weights,
kcv = options[['noOfFolds']], k = nn)
kfit_valid <- list(fitted.values = as.numeric(kfit_valid[[1]][, 2]))
kfit_test <- kknn::kknn(formula = formula, train = trainAndValid, test = test, k = nn, distance = distance, kernel = weights, scale = FALSE)
train <- trainAndValid
valid <- trainAndValid
test <- test
} else if(options[["modelValid"]] == "validationLeaveOneOut"){
nnRange <- 1:options[["maxK"]]
kfit_valid <- kknn::train.kknn(formula = formula, data = trainAndValid, ks = nnRange, scale = FALSE, distance = distance, kernel = weights)
errorStore <- as.numeric(kfit_valid$MEAN.SQU)
nn <- base::switch(options[["modelOpt"]],
"optimizationError" = nnRange[which.min(errorStore)])
kfit_valid <- list(fitted.values = kfit_valid[["fitted.values"]][[1]])
kfit_test <- kknn::kknn(formula = formula, train = trainAndValid, test = test, k = nn, distance = distance, kernel = weights, scale = FALSE)
train <- trainAndValid
valid <- trainAndValid
test <- test
}
}
# Use the specified model to make predictions for dataset
predictions <- predict(kknn::kknn(formula = formula, train = train, test = dataset, k = nn, distance = distance, kernel = weights, scale = FALSE))
# Create results object
regressionResult <- list()
regressionResult[["formula"]] <- formula
regressionResult[["model"]] <- kfit_test
regressionResult[["nn"]] <- nn
regressionResult[["weights"]] <- weights
regressionResult[["distance"]] <- distance
regressionResult[['testMSE']] <- mean( (kfit_test$fitted.values - test[,options[["target"]]])^2 )
regressionResult[["ntrain"]] <- nrow(train)
regressionResult[["ntest"]] <- nrow(test)
regressionResult[["testReal"]] <- test[, options[["target"]]]
regressionResult[["testPred"]] <- kfit_test$fitted.values
regressionResult[["testIndicatorColumn"]] <- testIndicatorColumn
regressionResult[["values"]] <- predictions
if(options[["modelOpt"]] != "optimizationManual"){
regressionResult[["accuracyStore"]] <- errorStore
regressionResult[['validMSE']] <- mean( (kfit_valid$fitted.values - valid[,options[["target"]]])^2 )
regressionResult[["nvalid"]] <- nrow(valid)
regressionResult[["valid"]] <- valid
if(options[["modelValid"]] == "validationManual")
regressionResult[["trainAccuracyStore"]] <- trainErrorStore
}
return(regressionResult)
}
.knnErrorPlot <- function(dataset, options, jaspResults, ready, position, purpose){
if(!is.null(jaspResults[["plotErrorVsK"]]) || !options[["plotErrorVsK"]] || options[["modelOpt"]] == "optimizationManual") return()
plotTitle <- base::switch(purpose, "classification" = gettext("Classification Accuracy Plot"), "regression" = gettext("Mean Squared Error Plot"))
plotErrorVsK <- createJaspPlot(plot = NULL, title = plotTitle, width = 500, height = 300)
plotErrorVsK$position <- position
plotErrorVsK$dependOn(options = c("plotErrorVsK","noOfNearestNeighbours", "trainingDataManual", "distanceParameterManual", "weights", "scaleEqualSD", "modelOpt",
"target", "predictors", "seed", "seedBox", "modelValid", "maxK", "noOfFolds", "modelValid",
"testSetIndicatorVariable", "testSetIndicator", "validationDataManual", "holdoutData", "testDataManual"))
jaspResults[["plotErrorVsK"]] <- plotErrorVsK
if(!ready) return()
result <- base::switch(purpose,
"classification" = jaspResults[["classificationResult"]]$object,
"regression" = jaspResults[["regressionResult"]]$object)
ylabel <- base::switch(purpose,
"classification" = gettext("Classification Accuracy"),
"regression" = gettext("Mean Squared Error"))
if(options[["modelValid"]] == "validationManual"){
xvalues <- rep(1:options[["maxK"]], 2)
yvalues1 <- result[["accuracyStore"]]
yvalues2 <- result[["trainAccuracyStore"]]
yvalues <- c(yvalues1, yvalues2)
type <- rep(c(gettext("Validation set"), gettext("Training set")), each = length(yvalues1))
d <- data.frame(x = xvalues, y = yvalues, type = type)
xBreaks <- jaspGraphs::getPrettyAxisBreaks(c(0, d$x), min.n = 4)
yBreaks <- jaspGraphs::getPrettyAxisBreaks(d$y, min.n = 4)
pointData <- data.frame(x = result[["nn"]],
y = yvalues1[result[["nn"]]],
type = gettext("Validation set"))
p <- ggplot2::ggplot(data = d, ggplot2::aes(x = x, y = y, linetype = type)) +
jaspGraphs::geom_line() +
ggplot2::scale_x_continuous(name = gettext("Number of Nearest Neighbors"), breaks = xBreaks, labels = xBreaks, limits = c(0, max(xBreaks))) +
ggplot2::scale_y_continuous(name = ylabel, breaks = yBreaks, labels = yBreaks) +
ggplot2::labs(linetype = "") +
ggplot2::scale_linetype_manual(values = c(2,1)) +
jaspGraphs::geom_point(data = pointData, ggplot2::aes(x = x, y = y, linetype = type), fill = "red")
p <- jaspGraphs::themeJasp(p, legend.position = "top")
} else if(options[["modelValid"]] != "validationManual"){
xvalues <- 1:options[["maxK"]]
yvalues <- result[["accuracyStore"]]
type <- rep(gettext("Training and validation set"), each = length(xvalues))
d <- data.frame(x = xvalues, y = yvalues, type = type)
xBreaks <- jaspGraphs::getPrettyAxisBreaks(c(0, d$x), min.n = 4)
yBreaks <- jaspGraphs::getPrettyAxisBreaks(d$y, min.n = 4)
p <- ggplot2::ggplot(data = d, ggplot2::aes(x = x, y = y, linetype = type)) +
jaspGraphs::geom_line() +
ggplot2::scale_x_continuous(name = gettext("Number of Nearest Neighbors"), breaks = xBreaks, labels = xBreaks, limits = c(0, max(xBreaks))) +
ggplot2::scale_y_continuous(name = ylabel, breaks = yBreaks, labels = yBreaks) +
jaspGraphs::geom_point(ggplot2::aes(x = x, y = y, linetype = type), data = data.frame(x = result[["nn"]], y = yvalues[result[["nn"]]], type = gettext("Training and validation set")), fill = "red") +
ggplot2::labs(linetype = "")
p <- jaspGraphs::themeJasp(p, legend.position = "top")
}
plotErrorVsK$plotObject <- p
}
# kknn::kknn calls stats::model.matrix which needs these two functions and looks for them by name in the global namespace
contr.dummy <- kknn::contr.dummy
contr.ordinal <- kknn::contr.ordinal
|
6e012b7eda490d4f8f198c630781537e44fe83e6
|
7f9ab53d7494744e6c5b0c33b1b2c17a080c979a
|
/1.2 Matematycy R/customdist.R
|
b3ee73caf31bfd69d2949a1617a41ffd5b2ef237
|
[] |
no_license
|
arkadiusz-wieczorek/roqad-ppb2015
|
cb40d526960753c7338710a4200c32bd4884c67f
|
aa9efc6b43d9ee991cede8fbe0bc5105a7a75616
|
refs/heads/master
| 2020-04-10T07:34:11.023113
| 2015-12-09T07:31:55
| 2015-12-09T07:31:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 303
|
r
|
customdist.R
|
custom.dist <- function(n, my.function) {
n <- length(c(1:n))
mat <- matrix(0, ncol = n, nrow = n)
colnames(mat) <- rownames(mat) <- listaURL[1][0:n,]
for(i in 1:nrow(mat)) {
for(j in 1:ncol(mat)) {
mat[i,j] <- my.function(i,j)
}
print(i)
flush.console()}
return(mat)
}
|
7166e7eff8e85bbdf645343344b67ab42e78a664
|
4c699cae4a32824d90d3363302838c5e4db101c9
|
/03_Importacao_Limpeza_dados/03-TrabalhandoComArquivosCsv.R
|
3a2a9e291609d041439bd53eb2023c2c9cf87d47
|
[
"MIT"
] |
permissive
|
janes/BigData_Analytics_com_R
|
470fa6d758351a5fc6006933eb5f4e3f05c0a187
|
431c76b326e155715c60ae6bd8ffe7f248cd558a
|
refs/heads/master
| 2020-04-27T19:39:10.436271
| 2019-02-06T11:29:36
| 2019-02-06T11:29:36
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,081
|
r
|
03-TrabalhandoComArquivosCsv.R
|
# Trabalhando com arquivos csv
# Usando o pacote readr
install.packages("readr")
library(readr)
# Abre o promt para escolher o arquivo
meu_arquivo <- read_csv(file.choose())
meu_arquivo <- read_delim(file.choose(), delim = "|")
# Importando arquivos
df1 <- read_table("data/temperaturas.txt", col_names = c("DAY", "MONTH", "YEAR", "TEMP"))
head(df1)
str(df1)
read_lines("data/temperaturas.txt", skip = 0, n_max = -1L)
read_file("data/temperaturas.txt")
# Exportando e Importando
write_csv(iris, "data/iris.csv")
dir()
# col_integer():
# col_double():
# col_logical():
# col_character():
# col_factor():
# col_skip():
# col_date() (alias = ?D?), col_datetime() (alias = ?T?), col_time() (?t?)
df_iris <- read_csv("data/iris.csv", col_types = list(
Sepal.Length = col_double(),
Sepal.Width = col_double(),
Petal.Length = col_double(),
Petal.Width = col_double(),
Species = col_factor(c("setosa", "versicolor", "virginica"))
))
dim(df_iris)
str(df_iris)
# Importando
df_cad <- read_csv("http://datascienceacademy.com.br/blog/aluno/RFundamentos/Datasets/Parte3/cadastro.csv")
head(df_cad)
update.packages ()
install.packages('knitr')
install.packages("dplyr")
library(dplyr)
options(warn = -1)
df_cad <- tbl_df(df_cad)
head(df_cad)
View(df_cad)
write_csv(df_cad, "data/df_cad_bkp.csv")
# Importando vários arquivos simultaneamente
list.files()
lista_arquivos <- list.files("C:/Projetos/Git_Projetos/BD_Analytics_com_R/03_Importação e Limpeza de dados/data/", full.names = TRUE)
class(lista_arquivos)
lista_arquivos
lista_arquivos2 <- lapply(lista_arquivos, read_csv)
problems(lista_arquivos2)
# Parsing
parse_date("01/02/15", "%m/%d/%y")
parse_date("01/02/15", "%d/%m/%y")
parse_date("01/02/34", "%y/%m/%d")
parse_date("01/02/22", "%y/%m/%d")
locale("en")
locale("fr")
locale("pt")
# http://www.bigmemory.org
install.packages("bigmemory")
library(bigmemory)
?bigmemory
bigdata <- read.big.matrix(filename = "data/cadastro.csv", sep = ",", header = TRUE, skip = 1)
|
383785052a55ae3abe601fe96957e157aa12f72d
|
6ec80d98b62b3da24250ff660bb760edf1b0b712
|
/man/max_col.Rd
|
5e4fbbcaf0cb4d3e21baea95f42b4808bcba61f4
|
[] |
no_license
|
PROMiDAT/traineR
|
753454edfc395cafefc6e449ba1de35205b430ac
|
4558e0e4a5eb6042c3e4dcc301cdd3530f3cfe4a
|
refs/heads/master
| 2022-09-29T07:38:40.010644
| 2022-09-05T22:03:58
| 2022-09-05T22:03:58
| 195,308,267
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 201
|
rd
|
max_col.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utilities.R
\name{max_col}
\alias{max_col}
\title{max_col}
\usage{
max_col(m)
}
\description{
max_col
}
\keyword{internal}
|
2fb805c8e26eb7208bde419ec6e0b013db10f6d7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/evd/examples/tcplot.Rd.R
|
944ca706ad6f27776886b3c5141d0fa3374036e5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
tcplot.Rd.R
|
library(evd)
### Name: tcplot
### Title: Threshold Choice Plot
### Aliases: tcplot
### Keywords: hplot
### ** Examples
tlim <- c(3.6, 4.2)
## Not run: tcplot(portpirie, tlim)
## Not run: tcplot(portpirie, tlim, nt = 100, lwd = 3, type = "l")
## Not run: tcplot(portpirie, tlim, model = "pp")
|
7d0fd7270e2a7242f4ae8be6ed720e03db7816d5
|
d5a1bf85f845d0d4d23375003f42842ad811fe8e
|
/man/data.hc.Rd
|
d412cd2e56b46c764b6b04c88eec74c3bc3601ac
|
[] |
no_license
|
vishalbelsare/rare
|
cbc0e73d8e04411d630f1ed3429c3500e9a60f2f
|
93ce5266c9cef4a4c958b06cbfd325f9ae8d9d4b
|
refs/heads/master
| 2022-01-26T22:43:05.776091
| 2022-01-24T23:55:21
| 2022-01-24T23:55:21
| 155,524,195
| 0
| 0
| null | 2022-01-25T06:32:22
| 2018-10-31T08:34:49
|
R
|
UTF-8
|
R
| false
| true
| 856
|
rd
|
data.hc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rare.data.R
\docType{data}
\name{data.hc}
\alias{data.hc}
\title{Hierarchical clustering tree for adjectives in TripAdvisor data set}
\format{An object of class \code{hclust} of length 7.}
\source{
Embeddings available at \url{http://nlp.stanford.edu/data/glove.6B.zip}
}
\usage{
data.hc
}
\description{
An \code{hclust} tree for the 200 adjectives appearing in the TripAdvisor reviews.
The tree was generated with 100-dimensional word embeddings pre-trained by GloVe
(Pennington et al., 2014) on Gigaword5 and Wikipedia2014 corpora for the adjectives.
}
\references{
Pennington, J., Socher, R., and Manning, C. D. (2014).
Glove: Global vectors for word representation.
\emph{In Empirical Methods in Natural Language Processing (EMNLP)}, pages 1532–1543.
}
\keyword{datasets}
|
e2baa76c47bc2d81ba1f9675c72800154ea8d98e
|
fb7969219b11f64fbec9ba9aceabeeaf32513777
|
/man/NW.weights_multi.Rd
|
8aea177ba20e15feed6c633e8bdc4142a2493e04
|
[] |
no_license
|
cgrazian/BICC
|
35d74e1efb2d2a3d2151d13026cfa4aee66fd438
|
484ba1a2baa3477000e742263a5e603c28d0e5aa
|
refs/heads/master
| 2023-04-25T23:39:21.778112
| 2021-05-19T02:05:24
| 2021-05-19T02:05:24
| 368,717,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 806
|
rd
|
NW.weights_multi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{NW.weights_multi}
\alias{NW.weights_multi}
\title{Multivariate Nadaraya-Watson weights}
\usage{
NW.weights_multi(x.mat, x.vec, bdw, kern = "gauss")
}
\arguments{
\item{x.mat}{matrix of value on which to compute the Nadaraya-Watson weights}
\item{x.vec}{vector: this is the point with respect to which to compute the Nadaraya-Watson weights for each of the points of x.vec}
\item{bdw}{bandwidth}
\item{kern}{kernel function to use to compute the Nadaraya-Watson weights. Two alternatives: either "gauss" for Gaussian
kernel or "t" for triweight kernel Default: "gauss".}
}
\value{
vector of weights
}
\description{
This function computes the multivariate Nadaraya-Watson function weights
}
\keyword{CondCop}
|
1a64155fc2933ef0321720a626e6d76a5a4dc8cd
|
63caf4d9e0f4b9c9cb5ab101f5795a94f27d575d
|
/man/binmapAdp.Rd
|
ca94d7d3675a9af8310aced98f69c6cd932610bd
|
[] |
no_license
|
marie-geissler/oce
|
b2e596c29050c5e2076d02730adfc0c4f4b07bb4
|
2206aaef7c750d6c193b9c6d6b171a1bdec4f93d
|
refs/heads/develop
| 2021-01-17T20:13:33.429798
| 2015-12-24T15:38:23
| 2015-12-24T15:38:23
| 48,561,769
| 1
| 0
| null | 2015-12-25T01:36:30
| 2015-12-25T01:36:30
| null |
UTF-8
|
R
| false
| false
| 1,725
|
rd
|
binmapAdp.Rd
|
\name{binmapAdp}
\alias{binmapAdp}
\title{Bin-map an ADP object}
\description{Bin-map an ADP object, by interpolating velocities, backscatter
amplitudes, etc., to uniform depth bins, thus compensating for the pitch
and roll of the instrument. This only makes sense for ADP objects that are
in beam coordinates.}
\usage{binmapAdp(x, debug=getOption("oceDebug"))}
\arguments{
\item{x}{an object of class \code{"adp"}}
\item{debug}{a flag that turns on debugging. Set to 1 to get a
moderate amount of debugging information, or to 2 to get more.}
}
\details{This is a preliminary function that is still undergoing testing. Once
the methods have been tested more, efforts may be made to speed up the
processing, either by vectorizing in R or by doing some of the calculation
in C.}
\section{Bugs}{This only works for 4-beam RDI ADP objects.}
\value{An object of \code{\link[base]{class}} \code{"adp"}.}
\examples{
\dontrun{
library(oce)
beam <- read.oce("adp_rdi_2615.000",
from=as.POSIXct("2008-06-26", tz="UTC"),
to=as.POSIXct("2008-06-26 00:10:00", tz="UTC"),
longitude=-69.73433, latitude=47.88126)
beam2 <- binmapAdp(beam)
plot(enuToOther(toEnu(beam), heading=-31.5))
plot(enuToOther(toEnu(beam2), heading=-31.5))
plot(beam, which=5:8) # backscatter amplitude
plot(beam2, which=5:8)
}
}
\references{The method was devised by Clark Richards for use in his PhD work at
Department of Oceanography at Dalhousie University.}
\author{Dan Kelley and Clark Richards}
\seealso{See \code{\link{adp-class}} for a discussion of \code{adp} objects and
notes on the many functions dealing with them.}
\keyword{misc}
|
f01df9adc8966f61f2089eafbe97915474edaca8
|
0c9036e9bae17e52e5f7dffffa8e5d79e6344793
|
/Silge, Julia - Text Mining with R - A Tidy Approach (2017).r
|
bfed735426acf13d1c79e51e9b67d50824f2e7ad
|
[] |
no_license
|
OblateSpheroid/Book_notes
|
452f60e89b841d79c66cf189862a17e80130ff24
|
a552e580bc29ec37cf3ce4a206693e5ed4dcebcc
|
refs/heads/master
| 2021-05-08T23:11:51.680161
| 2018-02-06T22:13:19
| 2018-02-06T22:13:19
| 119,699,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,383
|
r
|
Silge, Julia - Text Mining with R - A Tidy Approach (2017).r
|
### Silge Book Notes ###
# Chapter 1
require(tidytext)
library(dplyr)
text <- c("Because I could not stop for Death -",
"He kindly stopped for me -",
"The Carriage held but just Ourselves -",
"and Immortality")
text2df <- function(t) data_frame(line = 1:length(t), text = t)
text_df <- text2df(text)
unnest_tokens(text_df, word, text)
library(janeaustenr)
library(dplyr)
library(stringr)
library(tidytext)
library(ggplot2)
data(stop_words)
original_books <- austen_books() %>%
group_by(book) %>%
mutate(linenumber = row_number(),
chapter = cumsum(str_detect(text, regex("^chapter [\\divxlc]",
ignore_case = TRUE)))) %>%
ungroup()
tidy_books <- unnest_tokens(original_books, word, text) %>%
anti_join(stop_words)
count(tidy_books, word, sort = TRUE)
tidy_books %>%
count(word, sort = TRUE) %>%
filter(n > 600) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n)) +
geom_col() +
xlab(NULL) +
coord_flip()
library(gutenbergr)
library(scales)
library(tidyr)
hgwells <- gutenberg_download(c(35, 36, 5230, 159))
bronte <- gutenberg_download(c(1260, 768, 969, 9182, 767))
tidy_hgwells <- unnest_tokens(hgwells, word, text) %>%
anti_join(stop_words)
tidy_bronte <- unnest_tokens(bronte, word, text) %>%
anti_join(stop_words)
count(tidy_hgwells, word, sort = TRUE)
count(tidy_bronte, word, sort = TRUE)
frequency <- bind_rows(mutate(tidy_bronte, author = "Brontë Sisters"),
mutate(tidy_hgwells, author = "H.G. Wells"),
mutate(tidy_books, author = "Jane Austen")) %>%
mutate(word = str_extract(word, "[a-z']+")) %>%
count(author, word) %>%
group_by(author) %>%
mutate(proportion = n / sum(n)) %>%
select(-n) %>%
spread(author, proportion) %>%
gather(author, proportion, `Brontë Sisters`:`H.G. Wells`)
ggplot(frequency, aes(x = proportion, y = `Jane Austen`,
color = abs(`Jane Austen` - proportion))) +
geom_abline(color = "gray40", lty = 2) +
geom_jitter(alpha = 0.1, size = 2.5, width = 0.3, height = 0.3) +
geom_text(aes(label = word), check_overlap = TRUE, vjust = 1.5) +
scale_x_log10(labels = percent_format()) +
scale_y_log10(labels = percent_format()) +
scale_color_gradient(limits = c(0, 0.001),
low = "darkslategray4", high = "gray75") +
facet_wrap(~author, ncol = 2) +
theme(legend.position="none") +
labs(y = "Jane Austen", x = NULL)
# Chapter 2
library(tidytext)
library(janeaustenr)
library(dplyr)
library(stringr)
library(tidyr)
library(ggplot2)
tidy_books <- austen_books() %>%
group_by(book) %>%
mutate(linenumber = row_number(),
chapter = cumsum(str_detect(text, regex("^chapter [\\divxlc]",
ignore_case = TRUE)))) %>%
ungroup() %>%
unnest_tokens(word, text)
nrcjoy <- filter(get_sentiments("nrc"), sentiment == "joy")
filter(tidy_books, book == "Emma") %>%
inner_join(nrcjoy) %>%
count(word, sort = TRUE)
janeaustensentiment <- inner_join(tidy_books, get_sentiments("bing")) %>%
count(book, index = linenumber %/% 80, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(sentiment = positive - negative)
ggplot(janeaustensentiment, aes(index, sentiment, fill = book)) +
geom_col(show.legend = FALSE) +
facet_wrap(~book, ncol = 2, scales = "free_x")
|
08718f4f3ddf01b8e4e325c9fb349e34406ae715
|
cac62c5097aa5a367b15405860fab4f55e88a654
|
/Development/Dev_Local_GP/update/update_new/update.R
|
d57257cbc912e450c9cd17fc046084361788c0e4
|
[] |
no_license
|
drizopoulos/JMbayes2
|
657bcd1bd9dc7c9ae4992bc3514f1f3eb44a6cff
|
e11eed2c0626319d5d655aa555e56e60f75f9d3c
|
refs/heads/master
| 2023-07-06T02:55:16.326465
| 2023-06-26T17:13:58
| 2023-06-26T17:13:58
| 207,892,271
| 62
| 22
| null | 2022-08-12T08:34:07
| 2019-09-11T19:37:54
|
R
|
UTF-8
|
R
| false
| false
| 1,476
|
r
|
update.R
|
fm1 <- lme(fixed = log(serBilir) ~ year * sex + I(year^2) +
age + prothrombin, random = ~ year | id, data = pbc2)
# [2] Fit a Cox model, specifying the baseline covariates to be included in the
# joint model.
fCox1 <- coxph(Surv(years, status2) ~ drug + age, data = pbc2.id)
# [3] The basic joint model is fitted using a call to jm() i.e.,
joint_model_fit_1 <- jm(fCox1, fm1, time_var = "year", n_burnin = 0)
object <- joint_model_fit_1
update <- function(object, ...) {
call <- object$call
if (is.null(call))
stop("need an object with call component.\n")
extras <- match.call(expand.dots = FALSE)$...
if (length(extras) > 0) {
nams <- names(extras)
existing <- !is.na(match(nams, names(call)))
for (a in names(extras)[existing]) {
call[[a]] <- extras[[a]]
}
if (any(!existing)) {
call <- c(as.list(call), extras[!existing])
call <- as.call(call)
}
if (nams %in% c("n_iter")) {
call[['n_burnin']] <- 0
last_iterations <- extract_last_iterations(object)
call[['last_iterations']] <- as.name(last_iterations)
call <- c(as.list(call))
call <- as.call(call)
}
} else {
call <- as.call(call)
}
#eval(call, parent.frame())
call
}
chk <- update(object, n_iter = 1)
lst_iter <- extract_last_iterations(joint_model_fit_1)
chk <- jm(fCox1, fm1, time_var = "year", n_burnin = 0, last_iterations = lst_iter)
summary(chk)
Matrix::isSymmetric(lst_iter[[3]]$D)
|
bdb7a306a6718087255a3f783752d1cc9d28801c
|
c90c41808cd9946b3528bbfb93d0359b10e4c218
|
/data/turk_s1110.R
|
445164b3d61b45f78eda230a14e6a08d37c51833
|
[] |
no_license
|
ewan/dlp
|
8364b89aead2192e3870fa2cd83e44be12201ef7
|
37451d69951303b4cf52c186f7f9c08772f778f9
|
refs/heads/master
| 2020-05-17T18:09:06.420289
| 2012-12-13T18:39:59
| 2012-12-13T18:39:59
| 6,782,932
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 107
|
r
|
turk_s1110.R
|
turk_s1110 <- read.table("turk_s1110.txt", header=T)
names(turk_s1110) <- c("X1","X2","X3","C1","T1","T2")
|
f74cfc8c34935510a513daa5ce1a6dd993874067
|
0da6e68eb6b28874c84b8c0d13fb084724b33c61
|
/pca_2d.R
|
8889a3a29c1dc1dd9482fd1672f7a55512339c62
|
[] |
no_license
|
slcz/UFLDL
|
0204c39c6c37055a63309a0d89b9260dec41826f
|
f68a4ca218bca01b8cdb2bada35d5fc2a91da365
|
refs/heads/master
| 2020-05-17T21:18:45.653102
| 2015-01-23T10:06:23
| 2015-01-23T10:06:23
| 29,243,785
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,199
|
r
|
pca_2d.R
|
library(ggplot2)
library(grid)
raw <- as.matrix(read.table("pcaData.txt"))
sigma <- 1 / ncol(raw) * (raw %*% t(raw))
t <- svd(sigma)
data <- data.frame(x = raw[1,], y = raw[2,])
p <- ggplot(data, aes(x=x, y=y)) + geom_point(shape = 0, size = 4)
v1 <- cbind(c(0,0), t$u[1,] / sqrt(sum(v1^2)) / 2)
v2 <- cbind(c(0,0), t$u[2,] / sqrt(sum(v2^2)) / 2)
p <- p + geom_line(data=data.frame(x=v1[1,], y=v1[2,]), aes(x=x, y=y), arrow=arrow(ends="first"))
p <- p + geom_line(data=data.frame(x=v2[1,], y=v2[2,]), aes(x=x, y=y), arrow=arrow(ends="first"))
xrot <- t(t$u) %*% raw
p <- ggplot(data = data.frame(x = xrot[1,], y=xrot[2,]), aes(x=x, y=y)) + geom_point(shape = 0, size = 4)
k <- 1
xtilde <- t(t$u[,1:k]) %*% raw
xhat <- t$u %*% rbind(xtilde, 0)
p <- ggplot(data = data.frame(x=xhat[1,], y=xhat[1,]), aes(x=x, y=y)) + geom_point(shape = 0, size = 4)
epsilon <- 1e-5
xpcawhite <- diag(diag(1/sqrt(diag(t$d) + epsilon))) %*% t(t$u) %*% raw
p <- ggplot(data = data.frame(x=xpcawhite[1,], y=xpcawhite[2,]), aes(x=x, y=y)) + geom_point(shape = 0, size = 4)
zpcawhite <- t$u %*% xpcawhite
p <- ggplot(data = data.frame(x=zpcawhite[1,], y=zpcawhite[2,]), aes(x=x, y=y)) + geom_point(shape = 0, size = 4)
|
dc1fcd72da83893946810f0ec142ca4c881f1cfc
|
2b2fa7913d67a5ce25402f49e45d78e0d51ff746
|
/man/davidson_44.Rd
|
3bcbb533dc120922a89477617e486be430242abb
|
[] |
no_license
|
frareb/devRate
|
c7580bce58f385ebc4028334c764b248694382a0
|
a3dcdc8ecf7e8fb4212002995eb142f0fdc35f77
|
refs/heads/master
| 2022-09-28T04:21:34.781263
| 2022-09-08T10:49:36
| 2022-09-08T10:49:36
| 56,805,012
| 3
| 1
| null | 2021-01-06T11:29:45
| 2016-04-21T20:54:15
|
R
|
UTF-8
|
R
| false
| true
| 1,309
|
rd
|
davidson_44.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{davidson_44}
\alias{davidson_44}
\title{Davidson equation of development rate as a function of temperature.}
\format{
A list of eight elements describing the equation.
\describe{
\item{eq}{The equation (formula object).}
\item{eqAlt}{The equation (string).}
\item{name}{The name of the equation.}
\item{ref}{The equation reference.}
\item{refShort}{The equation reference shortened.}
\item{startVal}{The parameters found in the literature with their references.}
\item{com}{An optional comment about the equation use.}
\item{id}{An id to identify the equation.}
}
}
\usage{
davidson_44
}
\description{
Davidson, J. (1944). On the relationship between temperature and rate of development of insects
at constant temperatures. The Journal of Animal Ecology:26-38. <doi:10.2307/1326>
}
\details{
Equation:
\deqn{rT = \frac{K}{1 + e^{aa + bb * T}}}{%
rT = K / (1 + exp(aa + bb * T))}
where rT is the development rate, T the temperature, K the distance between
the upper and lower asymptote of the curve, aa the relative position of the origin of
the curve on the abscissa, bb the degree of acceleration of development of the life stage
in relation to temperature.
}
\keyword{datasets}
|
fd2225cbab753eb4d15922c41c0c066a3bfdf6ec
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/gRbase/R/graph-coerce.R
|
872cdb32903b48979ba6fbd5d6138e45cb88b6b7
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,191
|
r
|
graph-coerce.R
|
##############################################################
####
#### Coercion between graphNEL, igraph and matrix
####
##############################################################
setOldClass("igraph")
matrix2igraph <- function(x){
if (isSymmetric(x)){
gg <- igraph::graph.adjacency(x, mode="undirected")
} else {
gg <- igraph::graph.adjacency(x, mode="directed")
}
igraph::V(gg)$label <- igraph::V(gg)$name <- colnames(x)
gg
}
graphNEL2igraph <- function(x){
gg <- igraph::igraph.from.graphNEL(x)
igraph::V(gg)$label <- igraph::V(gg)$name
gg
}
## From graphNEL
## -------------
setAs("graphNEL", "igraph", function(from) graphNEL2igraph(from))
setAs("graphNEL", "matrix", function(from) graphNEL2M(from, result="matrix"))
setAs("graphNEL", "Matrix", function(from) graphNEL2M(from, result="Matrix"))
setAs("graphNEL", "dgCMatrix", function(from) graphNEL2M(from, result="Matrix"))
## From matrix
## -----------
setAs("matrix", "igraph", function(from) matrix2igraph(from))
## matrix -> graphNEL : is in graph package (I guess)
## matrix -> dgCMatrix: is in Matrix package. Should be used
## matrix -> Matrix : is in Matrix package but care should be taken
## because the output can be of different types
## From Matrix
## -----------
setAs("Matrix", "igraph", function(from){ matrix2igraph( as.matrix( from )) })
# Matrix -> graphNEL : in the graph package (I guess)
# Matrix -> matrix : in the Matrix package
## From igraph
## -----------
setAs("igraph", "graphNEL", function(from) igraph::igraph.to.graphNEL(from))
setAs("igraph", "matrix", function(from) as(igraph::get.adjacency(from),"matrix"))
setAs("igraph", "Matrix", function(from) MAT2dgCMatrix(igraph::get.adjacency(from)))
setAs("igraph", "dgCMatrix", function(from) MAT2dgCMatrix(igraph::get.adjacency(from)))
## #####################################################################
##
## The coerceGraph methods are mentioned in GMwR and therefore they
## must be kept alive.
##
## #####################################################################
coerceGraph <- function(object, result){
UseMethod("coerceGraph")
}
coerceGraph.graphNEL <- function(object, result){
result <- match.arg(result, c("graphNEL","matrix","dgCMatrix","Matrix","igraph"))
switch(result,
"graphNEL"={object},
"igraph" ={gg <- igraph::igraph.from.graphNEL(object)
igraph::V(gg)$label <- igraph::V(gg)$name
gg
},
"matrix" =,
"Matrix" =,
"dgCMatrix"={
graphNEL2M(object, result=result)
}
)
}
coerceGraph.matrix <- function(object, result){
result <- match.arg(result, c("graphNEL","matrix","dgCMatrix","Matrix","igraph"))
switch(result,
"graphNEL" ={ as(object,"graphNEL")},
"igraph" ={ matrix2igraph(object)},
"matrix" ={ object },
"Matrix" =,
"dgCMatrix"={ matrix2dgCMatrix( object )})
}
coerceGraph.dgCMatrix <- function(object, result){
result <- match.arg(result, c("graphNEL","igraph","matrix","dgCMatrix","Matrix"))
switch(result,
"graphNEL" ={ as(object,"graphNEL")},
"igraph" ={ matrix2igraph(dgCMatrix2matrix(object))},
"matrix" ={ dgCMatrix2matrix( object )},
"Matrix" =,
"dgCMatrix"={ object })
}
coerceGraph.igraph <- function(object, result){
result <- match.arg(result, c("graphNEL","matrix","dgCMatrix","Matrix","igraph"))
switch(result,
"graphNEL"={ igraph::igraph.to.graphNEL(object)},
"igraph" ={ object},
"matrix" ={ as(igraph::get.adjacency(object),"matrix")},
"Matrix" =,
"dgCMatrix"={ MAT2dgCMatrix(igraph::get.adjacency(object))}
)
}
### xxx2yyy
ugList2graphNEL<- function(gset, vn=NULL){
if ( is.null(vn) )
vn <- unique.default( unlist(gset, use.names=FALSE) )
zzz <- lapply(gset, function(xx) names2pairs(xx, sort=TRUE, result="matrix"))
ftM <- do.call(rbind, zzz)
if ( nrow(ftM) > 0 ){
tofrom <- unique(rowmat2list(ftM))
fff <- do.call(rbind, tofrom)
graph::ftM2graphNEL(fff, V=as.character(vn), edgemode="undirected")
} else {
new("graphNEL", nodes=as.character(vn), edgemode="undirected")
}
}
dagList2graphNEL<- function(gset, vn=NULL){
if ( is.null(vn) )
vn <- unique.default( unlist(gset, use.names=FALSE) )
zzz <- lapply(gset, function(xx) names2pairs(xx[1],xx[-1],
sort=FALSE, result="matrix"))
ftM <- do.call(rbind, zzz)
if (nrow(ftM)>0){
tfL <- unique(rowmat2list(ftM))
ftM <- do.call(rbind,tfL)[,2:1,drop=FALSE]
graph::ftM2graphNEL(ftM, V=as.character(vn),
edgemode="directed")
} else {
new("graphNEL", nodes=as.character(vn), edgemode="directed")
}
}
##################################################
##
## Convert between matrix and dgCMatrix
##
##################################################
MAT2matrix <- function(x){
.check.that.input.is.matrix(x)
switch( class(x),
"matrix" ={x},
"dgCMatrix" ={dgCMatrix2matrix(x)})
}
MAT2dgCMatrix <- function(x){
.check.that.input.is.matrix(x)
switch( class(x),
"matrix" ={matrix2dgCMatrix(x)},
"dgCMatrix" ={x})
}
##################################################
##
## Convert list of generators to adjacency matrix
##
##################################################
## glist: A list of vectors of the form (v, pa1, pa2, ... pan)
vpaList2adjMAT <- function(glist, vn=unique(unlist(glist)), result="matrix"){
result <- match.arg(result, c("matrix", "Matrix", "dgCMatrix"))
switch(result,
"Matrix"=,
"dgCMatrix" = {dagList2dgCMatrix( glist, vn )},
"matrix" = {dagList2matrix( glist, vn )} )
}
## glist: A list of vectors of the form (v1, v2, ... vn)
glist2adjMAT <- function(glist, vn=unique(unlist(glist)), result="matrix"){
result <- match.arg(result, c("matrix","Matrix","dgCMatrix"))
switch(result,
"Matrix"=,
"dgCMatrix" = {ugList2dgCMatrix( glist, vn )},
"matrix" = {ugList2matrix( glist, vn )} )
}
## adjList : named list as returned by graph::edges( )
adjList2adjMAT <- function(adjList, result="matrix"){
result <- match.arg(result, c("matrix", "Matrix", "dgCMatrix"))
switch(result,
"matrix" = {adjList2matrix( adjList )},
"Matrix" = ,
"dgCMatrix"= {adjList2dgCMatrix( adjList )})
}
adjList2M <- function( x, result="matrix"){
adjList2adjMAT(x, result=result)
}
##
## graphNEL 2 something
##
graphNEL2M <- function(object, result="matrix"){
if( class(object) != "graphNEL" )
stop("'object' must be a graphNEL object...")
adjList2adjMAT( graph::edges(object), result=result )
}
## FIXME graphNEL2adjMAT used by HydeNet package; I do not use it.
graphNEL2adjMAT <- graphNEL2M
as.adjMAT <- graphNEL2M
## Never used
graphNEL2matrix <- function(object){
graphNEL2M(object, result="matrix")
}
## Used a lot
graphNEL2dgCMatrix <- function(object){
graphNEL2M(object, result="Matrix")
}
graphNEL2MAT <- function(object, limit=100){
if( class(object) != "graphNEL" )
stop("'object' must be a graphNEL object...")
result <-
if ( length( graph::nodes(object) ) > limit )
"dgCMatrix" else "matrix"
adjList2M( graph::edges(object), result=result )
}
## vpaL2tfM: (v,pa(v))-list 2 to-from-matrix
## FIXME vpaL2tfM: rename to vpaList2ftM; used in topoSort
vpaL2tfM <- function(vpaL){
eMat <- lapply(vpaL, function(xx) names2pairs(xx[1], xx[-1],
sort = FALSE, result = "matrix"))
do.call(rbind, eMat)
}
graphNEL2ftM <- function(object){
if( class(object) != "graphNEL" )
stop("'object' must be a graphNEL object...")
adjList2ftM(graph::edges(object))
}
graphNEL2tfM <- function(object){
if( class(object) != "graphNEL" )
stop("'object' must be a graphNEL object...")
adjList2tfM(graph::edges(object))
}
## -----------
ugList2M <- function(x, result="matrix"){
## glist2adjMAT <- function(glist, vn=unique(unlist(glist)), result="matrix")
result <- match.arg(result, c("matrix","Matrix","dgCMatrix"))
vn <- unique.default(unlist(x), use.names=FALSE)
switch(result,
"Matrix"=,
"dgCMatrix" = {ugList2dgCMatrix( x, vn )},
"matrix" = {ugList2matrix( x, vn )} )
}
## -----------
dagList2M <- function(x, result="matrix"){
## vpaList2adjMAT(x, result=result)
result <- match.arg(result, c("matrix", "Matrix", "dgCMatrix"))
vn <- unique.default(unlist(x), use.names=FALSE)
switch(result,
"Matrix"=,
"dgCMatrix" = {dagList2dgCMatrix( x, vn )},
"matrix" = {dagList2matrix( x, vn )} )
}
##
## Matrix 2 something
##
M2adjList <- function(x){
.check.that.input.is.matrix(x)
vn <- colnames(x)
if (!isadjMAT_(x))
stop("'x' is not an adjacency matrix\n")
r <- rowmat2list(x)
i <- lapply(r, function(z) which(z!=0))
out <- lapply(i, function(j) vn[j])
names(out) <- vn
out
}
M2ugList <- function(x){
## FIXME: M2ugList: Need a check for undirectedness
.check.that.input.is.matrix(x)
maxCliqueMAT(x)[[1]]
}
M2graphNEL <- function(x){
.check.that.input.is.matrix(x)
as(x, "graphNEL")
}
M2dagList <- function(x){
.check.that.input.is.matrix(x)
vn <- colnames(x)
c <- colmat2list(x)
i <- lapply(c, function(z) which(z!=0))
i <- lapply(1:length(vn), function(j) c(j, i[[j]]))
out <- lapply(i, function(j) vn[j])
##names(out) <- vn
out
}
.check.that.input.is.matrix <- function(x){
if ( !(class(x)=="matrix" || class(x)=="dgCMatrix") )
stop("Input must be a matrix or a dgCMatrix\n")
}
ug2dag <- function(object){
if (class(object) != "graphNEL")
stop("Object 'object' must be a graphNEL")
if (graph::edgemode(object) != "undirected")
stop("Graph must have undirected edges")
if (length( m <- mcs(object) )==0)
stop("Graph is not chordal")
adjList <- graph::adj(object, m)
vparList <- vector("list", length(m))
names(vparList) <- m
vparList[[1]] <- m[1]
if (length(m) > 1){
for (i in 2:length(m)){
vparList[[ i ]] <- c(m[ i ],
intersectPrim(adjList[[ i ]], m[ 1:i ]))
}
}
dg <- dagList(vparList)
dg
}
#' .eliminationOrder <- function(gg){
#' is.acyc <- TRUE
#' ### amat <- as.adjmat(gg)
#' amat <- as.adjMAT(gg)
#' elorder <- NULL
#' repeat{
#' idx <- which(rowSums(amat)==0)
#' if (!length(idx)){
#' return(NULL)
#' }
#' elorder <- c(elorder, idx)
#' amat <- amat[-idx,-idx]
#' if(all(c(0,0)==dim(amat))){
#' break()
#' }
#' }
#' names(rev(elorder))
#' }
## Represent list of sets in a matrix...
## FIXME: glist2setMAT: Used in gRain 1.2-3, but not in gRain 1.2-4
## FIXME: should be deleted for next release
glist2setMAT <- function(glist,vn=unique(unlist(glist))){
amat <- matrix(0, nrow=length(glist), ncol = length(vn))
colnames(amat) <- vn
for (i in 1:length(glist)){
amat[i, glist[[i]] ] <- 1
}
amat
}
#' genL2M <- function( x, result="matrix"){
#' ##glist2adjMAT <- function(glist, vn=unique(unlist(glist)), result="matrix")
#' ugList2M(x, result=result)
#' }
#' vpaL2M <- function(x, result="matrix"){
#' vpaList2adjMAT(x, result=result)
#' }
|
107e157cc8066ca318b9cdcc34c2a63ce31d3ef6
|
ff6ca8e3a11a1445c44759895e11655d0c178cd2
|
/R/sfaStep.R
|
fd9fcb3396e5133ce1ad8cc6cef54319e822b51c
|
[] |
no_license
|
cran/rSFA
|
a375b3402107ecf9bfeb36a9fdafbeacb7881ab4
|
c8faff4caa5007db462de83f9814539174a543fd
|
refs/heads/master
| 2022-05-06T15:11:09.907585
| 2022-03-29T09:00:07
| 2022-03-29T09:00:07
| 17,698,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,291
|
r
|
sfaStep.R
|
###################################################################################
#' Update a step of the SFA algorithm.
#'
#' sfaStep() updates the current step of the SFA algorithm. Depending on \code{sfaList$deg}
#' it calls either \code{\link{sfa1Step}} or \code{\link{sfa2Step}} to do the main work.
#' See further documentation there
#'
#' @param sfaList A list that contains all information about the handled sfa-structure
#' @param arg Input data, each column a different variable
#' @param step Specifies the current SFA step. Must be given in the right sequence:
#' for SFA1 objects: "preprocessing", "sfa"\cr
#' for SFA2 objects: "preprocessing", "expansion", "sfa"
#' Each time a new step is invoked, the previous one is closed, which
#' might take some time.
#' @param method Method to be used: For \code{sfaList$step="expansion"} the choices are "TIMESERIES" or "CLASSIF". \cr
#' For \code{sfaList$step="sfa"} (\code{\link{sfa2Step}} only) the choices are "SVDSFA" (recommended) or "GENEIG" (unstable).
#'
#' @return list \code{sfaList} taken from the input, with new information added to this list.
#' See \code{\link{sfa1Step}} or \code{\link{sfa2Step}} for details.
#'
#' @examples
#' ## Suppose you have divided your training data into two chunks,
#' ## DATA1 and DATA2. Let the number of input dimensions be N. To apply
#' ## SFA on them write:
#' \dontrun{
#' sfaList = sfa2Create(N,xpDim(N))
#' sfaList = sfaStep(sfaList, DATA1, "preprocessing")
#' sfaList = sfaStep(sfaList, DATA2)
#' sfaList = sfaStep(sfaList, DATA1, "expansion")
#' sfaList = sfaStep(sfaList, DATA2)
#' sfaList = sfaStep(sfaList, NULL, "sfa")
#' output1 = sfaExecute(sfaList, DATA1)
#' output2 = sfaExecute(sfaList, DATA2)
#' }
#'
#' @seealso \code{\link{sfa1Step}} \code{\link{sfa2Step}} \code{\link{sfa1Create}} \code{\link{sfa2Create}} \code{\link{sfaExecute}}
#' @export
###################################################################################
sfaStep <- function (sfaList, arg, step=NULL, method=NULL){
if(!is.null(arg)){
arg<-as.matrix(arg)
}
if (is.null(method)){
if (!is.null(step) && (step=="sfa")){
method = "SVDSFA";
}
else{
method = "TIMESERIES";
}
}
if (sfaList$deg==1){
sfaList<-sfa1Step(sfaList, arg, step, method);}
else{
sfaList<-sfa2Step(sfaList, arg, step, method);}
return(sfaList)
}
###################################################################################
#' A step in the SFA2 algorithm.
#'
#' !!! Do not use this function directly, use sfaStep instead !!!
#'
#' @param sfaList A list that contains all information about the handled sfa-structure
#' @param arg Input data, each column a different variable
#' @param step Specifies the current SFA step. Must be given in the right sequence:
#' for SFA1 objects: "preprocessing", "sfa"\cr
#' for SFA2 objects: "preprocessing", "expansion", "sfa"
#' Each time a new step is invoked, the previous one is closed, which
#' might take some time.
#' @param method Method to be used: For \code{sfaList$step="expansion"} the choices are "TIMESERIES" or "CLASSIF". \cr
#' For \code{sfaList$step="sfa"} the choices are "SVDSFA" (recommended) or "GENEIG" (unstable).
#' GENEIG is not implemented in the current version, since
#' R lacks the option to calculate generalized eigenvalues easily.
#'
#' @return list \code{sfaList} taken from the input, with new information added to this list.
#' Among the new items are:
#' \item{avg0}{ mean vector in input space}
#' \item{avg1}{ mean vector in expanded space}
#' \item{W0}{ (ppRange x ppRange)-matrix, the whitening matrix for the input data}
#' \item{C}{ covariance matrix of the time-diff of expanded and sphered data}
#' \item{SF}{ (sfaRange x sfaRange)-matrix with rows which contain the directions in expanded space with slow signals. The rows are
#' sorted acc. to increasing eigenvalues of C}
#'
#' @seealso \code{\link{sfaStep}} \code{\link{sfa2Create}} \code{\link{sfa1Step}}
#' @export
#' @keywords internal
###################################################################################
sfa2Step <- function (sfaList, arg=NULL, step=NULL, method=NULL){
#if(is.null(sfaList$dbg)){dbg<-0}else{dbg<-sfaList$dbg}
if(is.null(sfaList$opts$epsC)){epsC<-1e-7}else{epsC<-sfaList$opts$epsC}
if(!is.null(step))
{
oldStep=sfaList$step
# step: init -> preprocessing
if (oldStep=="init" & (step=="preprocessing")){
print("Start preprocessing");
if (substr(sfaList$ppType, 1, 3)=="PCA"){ # check if first three chars are PCA: PCA, PCA2 or PCAVAR
sfaList$lcov=lcovCreate(ncol(arg));
#sfaList$diff=sfaList$lcov;
}
else{
sfaList$sfa1List=sfa1Create(sfaList$ppRange);
}
}
# step: preprocessing -> expansion
else if (oldStep=="preprocessing" & (step=="expansion")){
print("Close preprocessing");
if(sfaList$ppType=="SFA1"){
sfaList$sfa1List=sfaStep(sfaList$sfa1List, NULL, "sfa")
sfaList$W0=sfaList$sfa1List$SF;
sfaList$D0=sfaList$sfa1List$DSF;
sfaList$avg0=sfaList$sfa1List$avg0; #save avg and tlen from lcov
sfaList$tlen0=sfaList$sfa1List$tlen0;
sfaList$sfa1List=NULL; # clear sfa1List
}
else{#use PCA if not SFA1
sfaList$lcov=lcovFix(sfaList$lcov)
if(sfaList$ppType=="PCA"){
print("Whitening and dimensionality reduction (PCA)");
pcaResult=lcovPca(sfaList$lcov,sfaList$ppRange)
sfaList$W0=pcaResult$W;
sfaList$DW0=pcaResult$DW;
sfaList$D0=pcaResult$D;
sfaList$avg0=sfaList$lcov$avg; #save avg and tlen from lcov
sfaList$tlen0=sfaList$lcov$tlen;
#additional check: is covariance matrix illconditioned?
sfaCheckCondition(sfaList$lcov$COVMTX, "input")
}
else if(sfaList$ppType=="PCA2"){
# the improved preprocessing sphering by Konen, using SVD.
# Redundant dimensions with eigenvalue close to zero are detected
# and the corresponding rows in W0 removed.
print("Whitening and dimensionality reduction (PCA2)");
pcaResult=lcovPca2(sfaList$lcov,sfaList$ppRange)
sfaList$W0=pcaResult$W;
sfaList$DW0=pcaResult$DW;
sfaList$D0=pcaResult$D;
# lcovPca2 will null the rows of SFA_STRUCTS{hdl}.W0 with too
# small eigenvalues. Here we reduce the rows of W0 and the numbers
# pp_range and xp_range accordingly:
ppRange=length(which(colSums(t(sfaList$W0))!=0));
sfaList$ppRange=ppRange
sfaList$xpRange=sfaList$xpDimFun(ppRange)
sfaList$sfaRange=min(cbind(sfaList$xpRange,sfaList$sfaRange)); # ??
sfaList$W0=sfaList$W0[1:ppRange,];
sfaList$avg0=sfaList$lcov$avg;
sfaList$tlen0=sfaList$lcov$tlen;
}
else if(sfaList$ppType=="PCAVAR"){
# another preprocessing as done by Wiskott&Sejnowski 2002 which
# does not use PCA, but simply shifts and scales the input data to
# have zero mean and unit variance
#
print("unit variance w/o dimensionality reduction (PCAVAR)");
varmat = diag(diag(sfaList$lcov$COVMTX));
sfaList$W0 = varmat^(-0.5);
sfaList$avg0=sfaList$lcov$avg;
sfaList$tlen0=sfaList$lcov$tlen;
}
sfaList$lcov=NULL; # clear lcov
}
print("Init expansion step");
#inSize=sfaList$ppRange; #used nowhere.. why?
#if (length(inSize)==2){
# inSize=inSize[2]-inSize[1]+1;
#}
xpSize=sfaList$xpRange;
sfaList$xp=lcovCreate(xpSize);
sfaList$diff=lcovCreate(xpSize);
}
# step: expansion -> sfa
else if (oldStep=="expansion" & (step=="sfa")){
print("Close expansion step");
sfaList$xp=lcovFix(sfaList$xp);
sfaList$avg1=sfaList$xp$avg;
sfaList$tlen1=sfaList$xp$tlen;
xpsize=sfaList$xpRange
sfaList$diff=lcovFix(sfaList$diff);
print("Perform Slow Feature Analysis")
sfaInt=sfaGetIntRange(sfaList$sfaRange);
################################################################################
#First check method
if(method=="GENEIG" ){#|| dbg>0){
stop("GENEIG method is not implemented in rSFA package.
Please choose method SVDSFA instead.") #see note below
#sfaCheckCondition(sfaList$xp$COVMTX, "expanded")
#
# Original Code
#
#Bm<-1*sfaList$xp$COVMTX
#Am<-1*sfaList$diff$COVMTX
#res<-sfaDggev(Am,Bm) #Please note: sfaDggev is not running properly, thus deprecated, not part of package. Code not working.
#D=res$val;
#sfaList$SF<-res$vec;
#
# End Originial Code
#
}
#TODO WHY IF INSTEAD OF ELSE IF ???
if(method=="SVDSFA"){
# extension /WK/08/2009: first sphere expanded data with
# LCOV_PCA2, taking care of zero or very small eigenvalues in B
# by using the SVD approach
#
print("Using alternate [WisSej02] approach for SFA-calculation ...")
pcaResult<-lcovPca2(sfaList$xp);
S<-pcaResult$W # S: sphering matrix for expanded data (xphdl)
#not used anywhere?#DS<-pcaResult$DW # DS: de-sphering matrix, BD: eigenvalues
BD<-pcaResult$D # of B (covariance matrix of expanded data)
C = S %*% sfaList$diff$COVMTX %*% t(S);
#res= eigen(C);
#W1=res$vectors
#D1=res$values
resvd=svd(C,nu=0,nv=ncol(C))
W1=resvd$v;
D1=resvd$d;
SF1 = t(S)%*%W1;
sfaList$SF = SF1;
sfaList$BD = BD;
sfaList$myS=S;
D=D1;
}
#always calculate rank(B) (for diagnostics only)
B = sfaList$xp$COVMTX;
rankB = qr(B)$rank; #TODO: this might not be completely the same like matlabs rank(B);
print(paste("rank of B = ",rankB));
sfaList$rankB = rankB;
sfaList$myB = B; # needed for nl_regress only
idx=t(order(D)); # % idx(1): index to smallest eigenvalue, idx(2): to 2nd-smallest, ...
lammax=max(D);
print(paste("epsC*lammax= ",epsC*lammax)); #TODO maybe remove this print? or only do with high verbosity (implement later)
if(method=="SVDSFA"){
#rankC = qr(C)$rank #only used in sfatk for a print, skipped.
#print(paste("rank of C = ",rankC)); #see above
#idx = idx[which(D[idx]!=0)]; # 'SVDSFA': exclude eigenvalues which
# are *exactly* zero from further
# analysis, since they correspond to
# zeros in the sphering matrix
# (degenerate dimensions)
idx = idx[which(abs(D[idx])>rep(epsC*lammax,length(D[idx])))]; #TODO: ugly solution ?
sfaInt = 1:length(idx);
sfaList$sfaRange = length(idx);
# REMARK: These statement were also beneficial for 'GENEIG', because
# there it may happen in the degenerate case that some eigenvalues of
# D become negative (??, and the corresponding eigenvectors contain
# only noisy signals). However, we do not apply it here, because it
# lets 'GENEIG' deviate from the original [Berkes03] code. And the
# slow signals would still have the wrong variance.
}
sfaList$DSF<-t(D[idx[sfaInt]]);
sfaList$SF<-t(sfaList$SF[,idx[sfaInt]]);
################################################################################
#clear unneeded parts
sfaList$cp=NULL;
sfaList$diff=NULL;
print("SFA2 closed");
}
else if (!(oldStep==step)){ #oldStep and step should only be different for well defined sequences like above
warning("Unknown Step Sequence in sfa2Step")
return(sfaList)
}
sfaList$step=step;
}
#
# things to do always when sfaList$step is either 'preprocessing' or 'expansion'
# (no matter whether it is invoked for the first time or once again)
#
if(sfaList$step=="preprocessing"){
if(substr(sfaList$ppType, 1, 3)=="PCA"){
sfaList$lcov=lcovUpdate(sfaList$lcov,arg);
}
else{ #else SFA1
sfaList$sfa1List=sfaStep(sfaList$sfa1List, arg, "preprocessing")
}
}
if(sfaList$step=="expansion"){
#arg=arg-customRep(sfaList$avg0,customSize(arg,1));
arg=arg-matrix(sfaList$avg0,customSize(arg,1),length(sfaList$avg0),byrow=T) #MZ, 11.11.12: speedfix
arg=sfaList$sfaExpandFun(sfaList, arg %*% t(sfaList$W0));
sfaList$xp=lcovUpdate(sfaList$xp,arg);
if(method=="TIMESERIES"){
sfaList$diff=lcovUpdate(sfaList$diff, sfaTimediff(arg,sfaList$axType));
}
else if (method=="CLASSIF"){
# extension /WK/08/2009: generate the difference of all pattern
# pairs in 'pdiff'
K = customSize(arg,1);
lt = customSize(arg,2);
if(K<2){
stop("This class has less than two training records. Expansion can not run, pattern difference can not be calculated")
}
pdiff = NULL;
for (k in 1:(K-1)){ #TODO: check and maybe improve
#pdiff = rbind(pdiff, customRep(t(arg[k,]),K-k) - arg[(k+1):K,]);
pdiff = rbind(pdiff, matrix(t(arg[k,]),K-k,lt,byrow=TRUE) - arg[(k+1):K,]);#MZ, 11.11.12: speedfix
if (k%%100==0) { # Time and Mem optimization: do not let pdiff grow too large /WK/01/2012
sfaList$diff=lcovUpdate(sfaList$diff, pdiff);
pdiff=NULL;
#cat("zeroing pdiff\n");
}
#cat(k,"\n");flush.console();
}
sfaList$diff=lcovUpdate(sfaList$diff, pdiff);
}
else{
warning(paste(method," is not an allowed method in expansion step"));
}
}
return(sfaList)
}
###################################################################################
#' A step in the SFA1 algorithm.
#'
#' !!! Do not use this function directly, use sfaStep instead !!!
#'
#' @param sfaList A list that contains all information about the handled sfa-structure
#' @param arg Input data, each column a different variable
#' @param step Specifies the current SFA step. Must be given in the right sequence:
#' for SFA1 objects: "preprocessing", "sfa"\cr
#' for SFA2 objects: "preprocessing", "expansion", "sfa"
#' Each time a new step is invoked, the previous one is closed, which
#' might take some time.
#' @param method Method to be used: For \code{sfaList$step="expansion"} the choices are "TIMESERIES" or "CLASSIF". \cr
#' For \code{sfaList$step="sfa"} currently no choices.
#'
#' @return list \code{sfaList} taken from the input, with new information added to this list.
#' Among the new items are:
#' \item{avg0}{ mean vector in input space}
#' \item{SF}{ (sfaRange x sfaRange)-matrix with rows which contain the directions in expanded space with slow signals. The rows are
#' sorted acc. to increasing eigenvalues of time-diff covariance matrix}
#'
#' @seealso \code{\link{sfaStep}} \code{\link{sfa1Create}} \code{\link{sfa2Step}}
#' @export
#' @keywords internal
###################################################################################
sfa1Step <- function (sfaList, arg=NULL, step=NULL, method=NULL){
if(!is.null(step))
{
oldStep=sfaList$step
if (oldStep=="init" & (step=="preprocessing")){
print("Start preprocessing");
sfaList$lcov=lcovCreate(ncol(arg));
sfaList$diff=sfaList$lcov;
}
else if (oldStep=="preprocessing" & (step=="sfa")){
print("Close preprocessing");
sfaList$lcov=lcovFix(sfaList$lcov);
sfaList$avg0=sfaList$lcov$avg;
sfaList$tlen0=sfaList$lcov$tlen;
print("Perform slow feature analysis");
if(length(sfaList$sfaRange)==1){
sfaInt=1:sfaList$sfaRange;
}
else{
sfaInt=sfaList$sfaRange[1]:sfaList$sfaRange[2];
}
################################################################################
#
# original code: with generalized eigenvalues. unstable and bad implementation
#
#Bm<-1*sfaList$lcov$COVMTX
#Am<-1*sfaList$diff$COVMTX
#res<-sfaDggev(Am,Bm) #will not work for complex inputs
#D=res$val; #CAREFULL: This only works for non complex outputs, complex outputs are difficult
#idx=t(order(D));
#sfaList$DSF<-t(D[idx[sfaInt]]);
#sfaList$SF<-t(res$vec[,idx[sfaInt]]);
#
# end of original code
#
# svd approach instead:
pcaResult<-lcovPca2(sfaList$lcov);
S<-pcaResult$W # S: sphering matrix for expanded data
C = S %*% sfaList$diff$COVMTX %*% t(S);
resvd=svd(C,nu=0,nv=ncol(C))
W1=resvd$v;
D=resvd$d;
sfaList$SF = t(S)%*%W1;
idx=t(order(D)); # % idx(1): index to smallest eigenvalue, idx(2): to 2nd-smallest, ...
lammax=max(D);
if(is.null(sfaList$opts$epsC)){epsC<-0}else{epsC<-sfaList$opts$epsC}
print(paste("epsC*lammax= ",epsC*lammax)); #TODO maybe remove this print? or only do with high verbosity (implement later)
idx = idx[which( abs(D[idx])>rep(epsC*lammax,length(D[idx])))];
sfaInt = 1:length(idx);
sfaList$DSF<-t(D[idx[sfaInt]]);
sfaList$SF<-t(sfaList$SF[,idx[sfaInt]]);
################################################################################
#clean up
sfaList$lcov=NULL;
sfaList$diff=NULL;
print("SFA1 closed");
}
else if (!(oldStep==step)){
warning("Unknown Step Sequence in sfa1Step")
return(sfaList)
}
sfaList$step=step;
}
if(sfaList$step=="preprocessing"){
sfaList$lcov=lcovUpdate(sfaList$lcov,arg);
if(method=="TIMESERIES"){
sfaList$diff=lcovUpdate(sfaList$diff, sfaTimediff(arg,sfaList$axType));
}
else if (method=="CLASSIF"){
#% extension /WK/12/2009: generate the difference of all pattern
#% pairs in 'pdiff'
K = customSize(arg,1);
lt = customSize(arg,2);
pdiff = NULL;
for (k in 1:(K-1)){ #TODO: check and maybe improve
#pdiff = rbind(pdiff, customRep(arg[k,],K-k) - arg[(k+1):K,]);
pdiff = rbind(pdiff, matrix(t(arg[k,]),K-k,lt,byrow=TRUE) - arg[(k+1):K,]);#MZ, 11.11.12: speedfix
}
sfaList$diff=lcovUpdate(sfaList$diff, pdiff);
}
else{
stop(paste(method," is not an allowed method in expansion step"));
}
}
return(sfaList)
}
|
d79f579c185065eb7810c23adbcaacafc748261b
|
1c766196fe74bfb2e8f05b286431b6973223435f
|
/strategies/my_limit.R
|
d81be249397423695160e1b78a03acee60bbd477
|
[] |
no_license
|
RadishLamb/automated-trading-program
|
dc51a8d73bb6fc10e79a611e43d92aafe50dc7ba
|
16cddcf524cdf76eb9ced3eb2667f30e3d870b79
|
refs/heads/master
| 2020-12-14T12:16:21.544452
| 2020-01-18T13:37:19
| 2020-01-18T13:37:19
| 234,739,259
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 4,061
|
r
|
my_limit.R
|
# FOR A GENERAL EXPLANATION OF REQUIREMENTS ON getOrders see rsi_contrarian.R
# Marketmaking strategy
# Places buy and sell limit orders around close price
# Spread is determined by daily range
# Unit position sizes for limit orders
# Uses market order to clear inventory when it becomes too large
# Note: limit orders are automatically cancelled at the end of the day
maxRows <- 3100
getOrders <- function(store, newRowList, currentPos, params) {
#cat("currentPos", formatC(currentPos,3),"\n")
# check if current inventory is above a limit and if so exit completely
# with a market order
if (is.null(store)) store <- initStore(newRowList,params$series)
marketOrders <- ifelse(abs(currentPos) > params$inventoryLimits, -currentPos, 0)
allzero <-rep(0,length(newRowList))
limitOrders1<-limitOrders2<-limitPrices1<-limitPrices2<-allzero
# use the range (High-Low) as a indicator for a reasonable "spread" for
# this pseudo market making strategy
spread <- sapply(1:length(newRowList),function(i)
params$spreadPercentage * (newRowList[[i]]$High -
newRowList[[i]]$Low))
if (store$iter > params$lookback) {
limitOrders1 <- rep(1,length(newRowList)) # BUY LIMIT ORDERS 低于这个价 买
limitPrices1 <- sapply(1:length(newRowList),function(i)
newRowList[[i]]$Close + calculateDirection(store$cl,params$series[i],store$iter)*
calculatePercentage(store$cl,params$series[i],store$iter)*
newRowList[[i]]$Close - spread[i]/2)
limitOrders2 <- rep(-1,length(newRowList)) # SELL LIMIT ORDERS 高于这个价 卖
limitPrices2 <- sapply(1:length(newRowList),function(i)
newRowList[[i]]$Close + calculateDirection(store$cl,params$series[i],store$iter)*
calculatePercentage(store$cl,params$series[i],store$iter)*
newRowList[[i]]$Close + spread[i]/2)
}
store <- updateStore(store, newRowList, params$series,ppos)
#print(store$iter)
return(list(store=store,marketOrders=marketOrders,
limitOrders1=limitOrders1,
limitPrices1=limitPrices1,
limitOrders2=limitOrders2,
limitPrices2=limitPrices2))
}
##################################
# functions for managing the store
initClStore <- function(newRowList,series) {
clStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(clStore)
}
updateClStore <- function(clStore, newRowList, series, iter) {
#print(series)
for (i in 1:length(series))
clStore[iter,series[i]] <- as.numeric(newRowList[[series[i]]]$Close)
#print(clStore[1:iter,])
return(clStore)
}
initStore <- function(newRowList,series) {
return(list(iter=0,cl=initClStore(newRowList,series)
))
}
updateStore <- function(store, newRowList, series,psos) {
store$iter <- store$iter + 1
store$cl <- updateClStore(store$cl,newRowList,series,store$iter)
return(store)
}
######################
# main function
calculatePercentage<-function(clStore,column,iter){
startIndex <- iter - params$lookback - 1
percentage<-rep(0,params$lookback)
# print(column)
for(i in 1:params$lookback){
percentage[i]<-last(clStore[startIndex:iter,column])/clStore[iter-params$lookback+i-1,column]-1
}
print(column)
print(percentage)
averagePercentage<-mean(percentage)
return(averagePercentage)
}
calculateDirection<-function(clStore,column,iter){
startIndex <- iter - params$lookback - 1
# print(column)
# print(head(clStore,20))
# print((clStore[startIndex:iter,column]))
close<-clStore[startIndex:iter,column]
#print(close)
sma<-last(SMA(close, params$lookback))
direction<-ifelse(last(close)>sma,1,-1)
return(direction)
}
|
f3138abc6c8057a1281b752feeba39ced0551773
|
90b30c4d63da6381edc5d82856c9b298e655fb5b
|
/R/script2_aafreq_perDrug_perVar_v4_refConsensus.R
|
84b6c0e46ea9d828453a31b944c9416c69270696
|
[] |
no_license
|
manonr/covid-therapeutics
|
979810e5570daef5fa58ec1da1965d9ec36b5582
|
ef7e9f913678f9a2376dcc88546ff7abadf0eb43
|
refs/heads/main
| 2023-04-06T21:54:20.473008
| 2022-10-13T21:45:07
| 2022-10-13T21:45:07
| 549,894,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,597
|
r
|
script2_aafreq_perDrug_perVar_v4_refConsensus.R
|
# main analysis
# this script takes all the translated protein sequences
# for each combo of interest, the aa frequencies are compared across treated an untrested pateints
# prints one csv per combo examined
# and one summary csv with all significant combinations
# M. Ragonnet
# 09/03/2022
library(ape)
library(phangorn)
library(bioseq)
library(tidyverse)
library(ggplot2)
library(ggpubr)
runDate <- "April12"
ndays <- 5 ########## how many days after treatment do you count sequences as post treatment?
subdir <- paste0(ndays,"dayCutoff")
refDir <- "C:/Users/mr909/OneDrive/Projects/ncov/PHE/therapeutics/reference"
refDir <- "C:/Users/manon.ragonnet/Documents/Projects/therapeutics/reference"
rootDir <- paste0("C:/Users/mr909/OneDrive/Projects/ncov/PHE/therapeutics/", runDate)
rootDir <- paste0("C:/Users/manon.ragonnet/Documents/Projects/therapeutics/", runDate)
setwd(rootDir)
wDir <- paste0(rootDir, "/",subdir)
dir.create(subdir)
setwd(wDir)
drugCombos <- read.csv("../../drug_gene_combos.csv", stringsAsFactors = F)
lineages <- read.csv("../therapeutics_with_seq_lineages.csv", stringsAsFactors = F)
lineages <- rbind(lineages[lineages$prepost=="post" & lineages$date_difference>ndays,], lineages[lineages$prepost=="pre",])
aa_files <- dir(rootDir)[grep("aa", dir(rootDir))]
variants <- unique(lineages$variant)
summary_output <- data.frame(gene="spike",variant="variant", treatment="treatment", pos=1, aminoacid="A", npost_uniqueseq=0,
npre_uniqueseq=0, proppost=0, proppre=0, p=1,npost_uniquePatient=0, npre_uniquePatient=0, p2=1, stringsAsFactors = FALSE)
summary_line <- 0
for (z in 1:length(drugCombos[,1])){
genename <- drugCombos$gene[z]
drug <- drugCombos$drug[z]
aafile <- aa_files[grep(paste0(genename, "\\."), aa_files)]
aa_seq <- bioseq::read_fasta(paste0(rootDir, "/",aafile), type="AA")
names(aa_seq) <- gsub("\r", "", names(aa_seq))
aa_seq_uniqueID <- aa_seq[match(unique(names(aa_seq)),names(aa_seq))]
for (var in variants){
refgenome <- bioseq::read_fasta(paste(refDir,var,aafile, sep="/"), type="AA")
ref_tibble <- tibble(label = names( refgenome), sequence = refgenome )
print(paste(drug, genename, var))
output <- data.frame(gene="spike",variant="variant", treatment="treatment", pos=1, aminoacid="A", npost_uniqueseq=0,
npre_uniqueseq=0, proppost=0, proppre=0, p=1,npost_uniquePatient=0, npre_uniquePatient=0, p2=1, stringsAsFactors = FALSE)
line <- 0
patients_var <- unique(lineages$central_sample_id[lineages$intervention==drug & lineages$variant==var])
drug_aa_seq <- aa_seq_uniqueID[unique(unlist(lapply(patients_var, function(x) {grep(x, names(aa_seq_uniqueID))})))]
fra_data <- tibble(label = names( drug_aa_seq ), sequence = drug_aa_seq )
fra_data$treatment <- "unknown"
fra_data$treatment[match(lineages$central_sample_id[lineages$prepost=="post"], fra_data$label)] <- "post"
fra_data$treatment[match(lineages$central_sample_id[lineages$prepost=="pre"], fra_data$label)] <- "pre"
fra_data$uniqueID <- lineages$uniq_ID[match(fra_data$label, lineages$central_sample_id)]
print(table( fra_data$uniqueID,fra_data$treatment))
if(nrow(fra_data)>1){
for (i in 1:as.numeric(nchar(fra_data$sequence[1]))){
# print(i)
tab <- t(table(fra_data$treatment, unlist(lapply(fra_data$sequence, function(x) {strsplit(x, "")[[1]][i]}))))
# remove rows that are X or ~
zig <- which(rownames(tab)=="~")
if(length(zig)>0){
tab <- tab[-zig,]
}
Xs <- which(rownames(tab)=="X")
if(length(Xs)>0){
tab <- tab[-Xs,]
}
stars <- which(rownames(tab)=="*")
if(length(stars)>0){
tab <- tab[-stars,]
}
#print( tab)
if (length(tab)>2 ){
if(sum(tab[,1])>0){
fish <- fisher.test(tab,simulate.p.value=TRUE)
temptab <- cbind(prop.table(tab), Total = rowSums(prop.table(tab)))
#maxAA <- names(which(temptab[,3]==max(temptab[,3])))
refAA <- unlist(lapply(ref_tibble$sequence, function(x) {strsplit(x, "")[[1]][i]}))
if(fish$p.value<1){ ############### you can change p value here
#print(i)
dftemp <- data.frame(uniqueID=fra_data$uniqueID, treat=fra_data$treatment,residue=unlist(lapply(fra_data$sequence, function(x) {strsplit(x, "")[[1]][i]})))
for (j in 1:length(tab[,1])){
line <- line+1
AAchange <- paste0(refAA, i, rownames(tab)[j])
# count the number of unique patients with each mutation (as well as numer of sequences)
unique_pre <- length(unique(dftemp$uniqueID[dftemp$residue==rownames(tab)[j] & dftemp$treat=="pre"]))
unique_post <- length(unique(dftemp$uniqueID[dftemp$residue==rownames(tab)[j] & dftemp$treat=="post"]))
outputLine <- c(genename,var, drug, i, AAchange,tab[j,1],tab[j,2],
round(tab[j,1]/sum(tab[,1]),4), round(tab[j,2]/sum(tab[,2]),4),
round(fish$p.value,6), unique_post, unique_pre, "notyet")
output[line,] <- outputLine
}
lineEnd <- line
lineStart <- lineEnd-length(tab[,1])+1
tab2 <- matrix(as.numeric(c(output$npost_uniquePatient[lineStart:lineEnd], output$npre_uniquePatient[lineStart:lineEnd])),
nrow = length(tab[,1]), ncol=2,byrow = F)
fish2 <- fisher.test(tab2)
output$p2[lineStart:lineEnd] <- round(fish2$p.value,6)
if(fish2$p.value<0.01){ ############### you can change p value here
summary_output <- rbind(summary_output, output[lineStart:lineEnd,])
}
}
}
}
}
write.csv(output, paste0(genename,"_", drug,"_",var, "_AA_changes_pre-post-treatment_", runDate,subdir, ".csv"), row.names = FALSE)
}
}
}
summary_output <- summary_output[2:length(summary_output[,1]),]
write.csv(summary_output, paste0("significant_AA_changes_pre-post-treatment_", runDate,"_",subdir,".csv"), row.names = FALSE)
|
2fe709f6954a1abdf2a7ba4ff9bbe9f7c9bea8c9
|
bb7e36e775baf6daa4a63a9aaaad31f86c5ce827
|
/man/n_sent_id.Rd
|
00f26d014b9b1392be4207858298a564880e636a
|
[] |
no_license
|
leoluyi/EOLembrainToolbox
|
31135915bfa4feb9f243a4d6e7b82f09a98bf730
|
f9a57a0f5cf9d777a9c0a6b04342b1604db9f73c
|
refs/heads/master
| 2021-01-21T02:10:48.877362
| 2016-03-12T14:13:21
| 2016-03-12T14:13:21
| 38,605,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 539
|
rd
|
n_sent_id.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/n_sent_id.r
\name{n_sent_id}
\alias{n_sent_id}
\title{Count sent IDs from txt files}
\usage{
n_sent_id(sent_id_path, date_from = NULL, date_to = NULL)
}
\arguments{
\item{sent_id_path}{Directory contains .txt files of sent IDs,
of which file name contains date.}
\item{date_from}{Date begin.}
\item{date_to}{Date end.}
}
\description{
Count sent IDs from txt files
}
\examples{
n_sent_id("./exclude_id/", date_from = "2015-09-01", date_to = "2015-09-30")
}
|
00d6e3ee78cac431ddfee60e703b057450ed2a21
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/7211_0/rinput.R
|
11ef8aef6e1903c084bf3d48e686cb3c74c1f859
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("7211_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7211_0_unrooted.txt")
|
63ff4cd8e4bedf48d967d9ec99aab0fbcc7d7c59
|
76464062b84e71f60939f2670290ca82dcda6ca9
|
/R/of_import_date.R
|
8eb76a214585559ee22cb206a892f5dac7506926
|
[] |
no_license
|
TealZee/openforms
|
8dbad9d5d3d3107b0ae9b66498bd2723b87eaa93
|
eb65b158553be494d17c9747665da2dd414fcabc
|
refs/heads/master
| 2020-04-07T18:16:03.087248
| 2019-02-19T16:31:45
| 2019-02-19T16:31:45
| 158,603,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,186
|
r
|
of_import_date.R
|
#' Imports form data from openforms.com
#'
#' This function returns form response data for a given form ID
#' @param formID The version ID of the form (Integer)
#' @param apiKey The API key for the form owner's account
#' @param startDate The start date of the query in "%Y-%m-%d %H:%M:%S" format
#' @export
#' @examples
#' of_import(1000, apiKey)
of_import_date <- function(formID, apiKey, startDate) {
options(stringsAsFactors = FALSE)
########## API CALL TO GET FORM METADATA ##########
# GET FORM VERSION ID FROM RESPONSES API CALL
apiMetadata <- httr::GET(paste("https://api.us.openforms.com/api/v4/forms/", formID,"?loadStructure=true", sep=""),
httr::add_headers("accept" = "application/json", "X-API-KEY" = apiKey, "content-Type" = "application/json"))
apiMetadata <- httr::content(apiMetadata)
########## PARSE JSON TO GET COLUMN NAMES AND CONTROL ID'S ##########
i <- 1
for (i in i:length(apiMetadata$sections)) {
j <- 1
for (j in j: length(apiMetadata$sections[[i]]$fields)) {
if (i == 1 && j == 1) {
allFields <- as.data.frame(t(apiMetadata$sections[[i]]$fields[[j]]))
} else {
fields <- as.data.frame(t(apiMetadata$sections[[i]]$fields[[j]]))
allFields <- rbind(allFields, fields)
}
j <- j + 1
}
i <- i + 1
}
# FORMAT START DATE
startDate <- format(as.POSIXct(startDate), "%Y-%m-%d %H:%M:%S")
startDate <- gsub(" ", "%20", startDate)
startDate <- gsub(":", "%3A", startDate)
########## PARSE DATA FROM API RESPONSE JSON INTO DATAFRAME ##########
apiResponse <- httr::GET(paste("http://api.us.openforms.com/api/v4/responses?formId=", formID, "&fromDateTime=", startDate,"&loadAnswers=false", sep=""),
httr::add_headers("accept" = "application/json", "X-API-KEY" = apiKey, "content-Type" = "application/json"))
apiResponse <- httr::content(apiResponse)
if (apiResponse$totalItems > 0) {
totalPages <- apiResponse$totalPages
x <- 1
for (x in x:totalPages) {
########## API CALL TO GET FORM RESPONSE DATA ##########
apiResponse <- httr::GET(paste("http://api.us.openforms.com/api/v4/responses?formId=", formID,"&fromDateTime=", startDate,"&loadAnswers=true&pageSize=1000&page=", x, sep=""),
httr::add_headers("accept" = "application/json", "X-API-KEY" = apiKey, "content-Type" = "application/json"))
apiResponse <- httr::content(apiResponse)
# PARSE RESPONSES FOR EACH API CALL
i <- 1
for (i in i:length(apiResponse$items)) {
j<- 1
# FORMAT EACH RESPONSE AND FORM FIELD CONTROL ID FROM INTO COLUMNS IN DATAFRAME
for (j in j:length(apiResponse$items[[i]]$answers)) {
if (j == 1) {
questionsAnswers <- as.data.frame(t(apiResponse$items[[i]]$answers[[j]]))
if (is.null(questionsAnswers$value)) {
if (as.character(questionsAnswers$multiValues) == "list(list())" || is.null(questionsAnswers$multiValues)) {
questionsAnswers$value <- NA
questionsAnswers$multiValues <- NULL
} else {
questionsAnswers$value <- paste(unlist(questionsAnswers$multiValues), collapse = ",")
questionsAnswers$multiValues <- NULL
}
}
questionsAnswers <- as.data.frame(questionsAnswers$value)
names(questionsAnswers)[j] <- apiResponse$items[[i]]$answers[[j]]$fieldId
} else {
answers <- as.data.frame(t(apiResponse$items[[i]]$answers[[j]]))
# CHECK FOR OPTIONAL FIELDS WITH NO RESPONSES AND SET RESPONSE TO NA
if (is.null(answers$value)) {
if (as.character(answers$multiValues) == "list(list())" || is.null(answers$multiValues)) {
answers$value <- NA
answers$multiValues <- NULL
} else {
answers$value <- paste(unlist(answers$multiValues), collapse = ",")
answers$multiValues <- NULL
}
}
answers <- as.data.frame(answers$value)
names(answers) <- apiResponse$items[[i]]$answers[[j]]$fieldId
questionsAnswers <- cbind(questionsAnswers, answers)
}
j <- j + 1
}
if (i == 1) {
questionsAnswers$Date <- apiResponse$items[[i]]$submitDateTime
questionsAnswers$ID <- apiResponse$items[[i]]$receiptNumber
pageResponses <- questionsAnswers
} else {
questionsAnswers$Date <- apiResponse$items[[i]]$submitDateTime
questionsAnswers$ID <- apiResponse$items[[i]]$receiptNumber
names(pageResponses) = names(questionsAnswers)
pageResponses <- rbind(pageResponses, questionsAnswers, make.row.names = TRUE, stringsAsFactors = FALSE)
}
i <- i + 1
}
if (x == 1) {
allResponses <- pageResponses
} else {
allResponses <- rbind(pageResponses, allResponses, make.row.names = TRUE, stringsAsFactors = FALSE)
}
x <- x + 1
}
########## REFORMAT DATES ##########
allResponses$Date <- as.POSIXct(gsub("T", " ", allResponses$Date))
########## SET RESPONSES DATAFRAME COLUMN NAMES TO FIELD NAMES IN OPENFORMS ##########
matchColumns <- match(names(allResponses), allFields$id)
matchColumns <- matchColumns[!is.na(matchColumns)]
names(allResponses)[1:(length(names(allResponses))-2)] <- as.character(allFields$name)[matchColumns]
# FIX COLUMNS WITH ONLY WHITESPACE IN NAMES IF ANY EXIST
if (length(names(allResponses)[which(nchar(trimws(names(allResponses))) == 0)]) > 0) {
names(allResponses)[which(nchar(trimws(names(allResponses))) == 0)] = c(paste("Unnamed Column", 1:length(which(nchar(trimws(names(allResponses))) == 0))))
}
print(allResponses)
} else {
allFields$name <- unlist(allFields$name)
allFields$type <- unlist(allFields$type)
allResponses <- data.frame(matrix(ncol = length(allFields$type[!grepl("Static", allFields$type)]), nrow = 0))
names(allResponses) = allFields$name[!grepl("Static", allFields$type)]
print(allResponses)
}
}
|
8645820eb60cc1579e9b477f58d9ef52199312ac
|
b928b21a9550b9a2c5fec8a1ba0f8684d0ae91ba
|
/R/desire_individual.R
|
0f3612f5055046034c03895f5a836bb2492e427b
|
[] |
no_license
|
haleyeidem/integRATE
|
46db94f6bf28123ed6650afd044d123cddb9e8e6
|
78740446450fd8f704933942c218a79fa3feab77
|
refs/heads/master
| 2021-01-09T20:47:48.954149
| 2018-04-19T16:06:57
| 2018-04-19T16:06:57
| 75,875,579
| 1
| 1
| null | 2018-01-27T19:14:09
| 2016-12-07T21:05:41
|
R
|
UTF-8
|
R
| false
| false
| 8,237
|
r
|
desire_individual.R
|
#' Low, high, and extreme desirability functions
#'
#' These functions map numeric variables to a [0, 1] scale where low, high, or
#' extreme values are most desirable.
#'
#' @details
#'
#' @param x Vector of numeric values.
#' @param desire_type Class of desirability function to apply (low, high, or
#' extreme).
#' @param cut_type Class of cuts assigned to desirability function (numerical,
#' percentile, or none).
#' @param cut1,cut2,cut3,cut4 Cut points where the desirability function
#' changes.
#' @param min,max Minimum (default = 0) and maximum (default = 1) desirability
#' scores.
#' @param scale Controls shape of the desirability function. Larger values
#' correspond to more steep and strict curves whereas smaller values correspond
#' to more gradual and inclusive curves.
#' @return Returns a numeric vector of desirability scores.
#' @export
# some of the following code is based on https://github.com/stanlazic/desiR
desire_individual <- function(x, desire_type = desire.type, cut_type = cut.type, cut1, cut2, cut3, cut4, min = 0, max = 1, scale = 1){
# Set desirability function
desire.type <- c("low", "l", "high", "h", "extremes", "e")
if(!hasArg(desire_type)) stop("\ndesire_type should be one of the following: 'low', 'high' or 'extremes'\n\nfor more details see help page ?desire()")
if(!is.element(desire_type, desire.type)) stop("\ndesire_type should be one of the following: 'low', 'l', 'high', 'h', 'extremes', 'e'")
if(desire_type == "low") desire_type <- "l"
if(desire_type == "high") desire_type <- "h"
if(desire_type == "extremes") desire_type <- "e"
# Set cut types
cut.type <- c("numerical", "num", "percentile", "per", "none", "no")
if(!hasArg(cut_type)) cut_type <- "none"
if(!is.element(cut_type, cut.type)) stop("\ncut_type should be one of the following: 'numerical', 'num', 'percentile', 'per', 'none', 'no'")
if(cut_type == "none") cut_type <- "no"
if(cut_type == "numerical") cut_type <- "num"
if(cut_type == "percentile") cut_type <- "per"
# Check for appropriate min, max, and scale
if(min < 0 | min > 1) stop("\nmin must be between zero and one\n")
if(max < 0 | max > 1) stop("\nmax must be between zero and one\n")
if(scale <= 0) stop("\nscale must be greater than zero\n")
# Initialize vector of NAs
y <- rep(NA,length(x))
if(all(nna <- !is.na(x))) nna <- TRUE # True if !NA
switch(desire_type,
# Low values are most desirable
l = {
switch(cut_type,
# Numerical cuts
num = {
if(cut1 >= cut2) stop("\ncut1 must be less than cut2\n")
# Apply desirability function
y <- ((x - cut2)/(cut1 - cut2))^scale
# Override desirability score at cuts
y[x[nna] < cut1] <- 1
y[x[nna] > cut2] <- 0
},
# Percentile cuts
per = {
if(cut1 >= cut2) stop("\ncut1 must be less than cut2\n")
# Calculate percentile cuts
per1 <- quantile(x[nna],cut1)
per2 <- quantile(x[nna],cut2)
# Apply desirability function
y <- ((x - per2)/(per1 - per2))^scale
# Override desirability score at cuts
y[x[nna] < per1] <- 1
y[x[nna] > per2] <- 0
},
# No cuts
no = {
cut1 <- min(x[nna])
cut2 <- max(x[nna])
# Apply desirability function
y <- ((x - cut2)/(cut1 - cut2))^scale
# Override desirability score at cuts (min and max)
y[x[nna] == cut1] <- 1
y[x[nna] == cut2] <- 0
}
)
},
# High values are most desirable
h = {
switch(cut_type,
# Numerical cuts
num = {
if(cut1 >= cut2) stop("\ncut1 must be less than cut2\n")
# Apply desirability function
y <- ((x - cut1)/(cut2 - cut1))^scale
# Override desirability score at cuts
y[x[nna] < cut1] <- 0
y[x[nna] > cut2] <- 1
},
# Percentile cuts
per = {
if(cut1 >= cut2) stop("\ncut1 must be less than cut2\n")
# Calculate percentile cuts
per1 <- quantile(x[nna],cut1)
per2 <- quantile(x[nna],cut2)
# Apply desirability function
y <- ((x - per1)/(per2 - per1))^scale
# Override desirability score at cuts
y[x[nna] < per1] <- 0
y[x[nna] > per2] <- 1
},
# No cuts
no = {
cut1 <- min(x[nna])
cut2 <- max(x[nna])
# Apply desirability function
y <- ((x - cut1)/(cut2 - cut1))^scale
# Override desirability score at cuts (min and max)
y[x[nna] == cut1] <- 0
y[x[nna] == cut2] <- 1
}
)
},
# Extreme values are most desirable
e = {
switch(cut_type,
# Numerical cuts
num = {
if(cut2 >= cut3) stop("\ncut2 must be less than cut3\n")
if(cut3 >= cut4) stop("\ncut3 must be less than cut4\n")
for (i in 1:length(x)){
if (is.na(x[i])) next
# Apply desirability function
if (x[i] > cut1 & x[i] < cut2) y[i] <- ((x[i] - cut2)/(cut1 - cut2))^scale
if (x[i] > cut3 & x[i] < cut4) y[i] <- ((x[i] - cut3)/(cut4 - cut3))^scale
# Override desirability score between and outside cuts
if (x[i] <= cut1 | x[i] >= cut4) y[i] <- 1
if (x[i] >= cut2 & x[i] <= cut3) y[i] <- 0
}
},
# Percentile cuts
per = {
if(cut2 >= cut3) stop("\ncut2 must be less than cut3\n")
if(cut3 >= cut4) stop("\ncut3 must be less than cut4\n")
# Calculate percentile cuts
per1 <- quantile(x[nna],cut1)
per2 <- quantile(x[nna],cut2)
per3 <- quantile(x[nna],cut3)
per4 <- quantile(x[nna],cut4)
for (i in 1:length(x)){
if (is.na(x[i])) next
# Apply desirability function
if (x[i] > per1 & x[i] < per2) y[i] <- ((x[i] - per2)/(per1 - per2))^scale
if (x[i] > per3 & x[i] < per4) y[i] <- ((x[i] - per3)/(per4 - per3))^scale
# Override desirability score between and outside cuts
if (x[i] <= per1 | x[i] >= per4) y[i] <- 1
if (x[i] >= per2 & x[i] <= per3) y[i] <- 0
}
},
# No cuts
no = {
cut1 <- min(x[nna])
cut4 <- max(x[nna])
cut2 <- 0
cut3 <- 0
for (i in 1:length(x)){
if (is.na(x[i])) next
# Apply desirability function
if (x[i] > cut1 & x[i] < cut2) y[i] <- ((x[i] - cut2)/(cut1 - cut2))^scale
if (x[i] > cut3 & x[i] < cut4) y[i] <- ((x[i] - cut3)/(cut4 - cut3))^scale
# Override desirability score between and outside cuts
if (x[i] <= cut1 | x[i] >= cut4) y[i] <- 1
if (x[i] >= cut2 & x[i] <= cut3) y[i] <- 0
}
}
)
}
)
# Rescale according min to max and return desirability score
y <- (y * (max - min)) + min; return(y)
}
|
e9f7f6a17ba1be958766e399b7e081dc6638a246
|
1ae4868cf6bfedd4d334777f8068c9a3f8909071
|
/R/formula.censReg.R
|
eee59450dd6edfd5f856d4e1f1306f1ae6707235
|
[] |
no_license
|
cran/censReg
|
b0b5c5fde5279aa5ce810b626e7d3e8056e9ca91
|
d40196a7a63f66cb6bc87fa1ea05ad4c0199cb73
|
refs/heads/master
| 2022-09-06T04:49:25.358517
| 2022-08-07T05:20:02
| 2022-08-07T05:20:02
| 17,695,020
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 99
|
r
|
formula.censReg.R
|
formula.censReg <- function( x, ... ) {
result <- formula( terms( x ) )
return( result )
}
|
cdc0e679748ed6c054b8aacd63561d7bb0b123c2
|
c8113b3977b82486643308229612d98e0e919399
|
/04_ExploratoryData_project2/plot2.R
|
4197be92bab920d655fd2e583c3bbe26fbae3cd4
|
[] |
no_license
|
anyacha/datasciencecoursera
|
2c2f83baebcee29267b85da6516bc88645494e8d
|
44d0f6bd87977a3d04589deb52ca96cc88928f05
|
refs/heads/master
| 2020-05-18T17:44:20.050944
| 2015-08-26T04:53:09
| 2015-08-26T04:53:09
| 38,330,200
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,437
|
r
|
plot2.R
|
#Coursera, Johns Hopkins U, Data Sci, course #4 Exploratory Analysis
#8/17/2015
#course project #2
### Q2. Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
### Step 0. Setup
rm(list=ls())
setwd("~/Desktop/iSchool/Coursera/JohnsHopkinsU_DataScienceSpec/4ExploratoryDataAnalysis/assignments/CourseProject2")
#zip file is already downloaded and unpacked.
### Step 1. Reading in data
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
#configure datatypes
NEI$fips<-as.factor(NEI$fips)
NEI$year<-as.factor(NEI$year)
### Step 2. Created dataset
#subset to Baltimore only and sum emission by year
emissions.Baltimore<-NEI[NEI$fips=="24510", ]
emissions.Baltimore.total.byYear<-lapply(split(emissions.Baltimore$Emissions,emissions.Baltimore$year),sum)
### Step 3. Plot and answer the question
#plot total emissions by year, add custom x axis
plot(names(emissions.Baltimore.total.byYear),emissions.Baltimore.total.byYear,
type = "o", col="blue", lwd=2,
main = "Baltimore: trends in total PM2.5 emission from 1999 to 2008",
sub = "decrease in 2002, increase in 2005, and decrease in 2008",
xlab="Year", ylab="Total emissions (in tons)", xaxt='n', ylim=range(1000:4000))
axis(1, at = c(1999, 2002, 2005,2008))
### Step 4. Create .png file
dev.copy(png, file = "plot2.png", width=900, height=480 )
dev.off()
|
4cb2bfaa65974cab1b1a5c6db00439346228d873
|
77bf6846a7b572eeac9fbbc49fc2eb687a336088
|
/Problem_1.R
|
104d801bb2b664daa47e898b14e334e959abd523
|
[] |
no_license
|
feb-uni-sofia/homework-1-r-basics-nvichev
|
8a0a6b990e54be053aac62ad5a026f084c9f931c
|
14bb26b2070e83d783426005658866c4188bcc6d
|
refs/heads/master
| 2021-04-12T10:25:50.471260
| 2018-03-21T17:41:53
| 2018-03-21T17:41:53
| 126,212,736
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 347
|
r
|
Problem_1.R
|
#a)
x <- c(4,1,1,4)
x
#b)
y <- c(1,4)
y
#c) The 2 vectors have different lengths, so the length of the shorter
#vector is doubled to match the 4 elements of the bigger one.
x-y
#d)
s <- c(x,y)
s
#e)
sReplicated <- rep(s,10)
length(sReplicated)
#f)
sRep_Each <- rep(s,each = 3)
sRep_Each
#g)
seq1 <- seq(7,21)
seq1
7:21
#h)
length(seq1)
|
ea07bedba72b49c237fec71899dc0078d863387e
|
a87fbb4d8286a50ea6d36f1432b1b27c5f96085e
|
/vignettes/diabetes/src/population.R
|
361a48c7309df6169492b8383b85e5ff17824fa7
|
[] |
no_license
|
terourou/small-area-estimation
|
938908a28a0d87853368f11e4be51ad0b7e9eabf
|
935796305459a7d348134d5578a32f18768402cb
|
refs/heads/master
| 2023-07-03T08:19:35.748766
| 2021-08-11T04:58:00
| 2021-08-11T04:58:24
| 319,798,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,120
|
r
|
population.R
|
library(readr)
library(tidyr)
library(dplyr)
library(forcats)
library(dembase)
maori <- read_csv("data/DPE479901_20210414_110618_25.csv",
skip = 2,
n_max = 15) %>%
rename(time = X1) %>%
pivot_longer(-time, names_to = "age", values_to = "maori")
total <- read_csv("data/DPE403903_20210414_110734_1.csv",
skip = 3,
n_max = 15) %>%
rename(time = X1) %>%
pivot_longer(-time, names_to = "age", values_to = "total")
population <- left_join(maori, total, by = c("time", "age")) %>%
mutate(age = cleanAgeGroup(age)) %>%
mutate(time = as.integer(time) - 1L) %>% ## using mean year to June as proxy for 31 December count
mutate(nonmaori = total - maori) %>%
select(-total) %>%
pivot_longer(cols = c(maori, nonmaori), names_to = "ethnicity", values_to = "count") %>%
mutate(ethnicity = fct_recode(ethnicity, "Maori" = "maori", "Non-Maori" = "nonmaori")) %>%
dtabs(count ~ age + ethnicity + time) %>%
Counts(dimscales = c(time = "Points"))
saveRDS(population,
file = "out/population.rds")
|
8f7c6e534658bcb262231fd6d4895b20e99d0555
|
981ce555f51f0cf849d8ac56422d0f6528707e89
|
/05_Code/02_Analysis/01_Descriptive-Analysis/CER/A-01-04C_A1_Descriptive-Analysis_CER_Response-to-Temperature_Daily.R
|
5081cd6dac2492d794c0a9f872e9264be1173d92
|
[] |
no_license
|
JMJo321/Energy-Demand-Analysis
|
6059e52c177a133f912f5d77239adfeb02620c78
|
32873d78f73e66b2d14f9686739ddf61c4748b04
|
refs/heads/main
| 2023-09-02T13:20:11.807431
| 2021-07-28T01:16:01
| 2021-07-28T01:16:01
| 321,818,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,402
|
r
|
A-01-04C_A1_Descriptive-Analysis_CER_Response-to-Temperature_Daily.R
|
# < Description > *
# > Script Group Indicator Number and Name:
# # A-01, Descriptive Analysis
# #
# > Script Number(s):
# # A-01-04C_A1
# #
# > Purpose of the script(s):
# # Descriptive Analysis - Estimate the Treatment Impact on
# # Household Response to Temperature by using `hdd_all`
# ------------------------------------------------------------------------------
# Load required libraries
# ------------------------------------------------------------------------------
library(stringr)
library(zoo)
library(lfe)
library(stargazer)
library(latex2exp)
library(ggplot2)
library(data.table)
# ------------------------------------------------------------------------------
# Set working directory, and run header script
# ------------------------------------------------------------------------------
# ------- Set project name -------
PROJ.NAME <- "Energy-Demand-Analysis"
# ------- Set working directory -------
PATH_PROJ <-
paste("/Users/jmjo/Dropbox/00_JMJo/Projects", PROJ.NAME, sep = "/")
setwd(PATH_PROJ)
# ------- Run the header script -------
PATH_HEADER <- paste0("05_Code/H-", PROJ.NAME, ".R")
source(PATH_HEADER)
# --------------------------------------------------
# Define path(s), parameter(s) and function(s)
# --------------------------------------------------
# ------- Define path(s) -------
# # 1. Path(s) from which Dataset(s)/Script(s) is(are) loaded
# # 1.1. For Metering Data
DIR_TO.LOAD_CER <- "CER"
FILE_TO.LOAD_CER_FOR.REGRESSION_ELECTRICITY <-
"CER_DT-for-Regressions_Electricity.RData"
PATH_TO.LOAD_CER_METERING_ELECTRICITY <- paste(
PATH_DATA_INTERMEDIATE,
DIR_TO.LOAD_CER,
FILE_TO.LOAD_CER_FOR.REGRESSION_ELECTRICITY,
sep = "/"
)
# # 1.2. For R Script including Regression Models
FILE_TO.LOAD_CER_MODELS <- "M-Energy-Demand-Analysis_Regression-Models_CER.R"
PATH_TO.LOAD_CER_MODELS <- paste(
PATH_CODE,
FILE_TO.LOAD_CER_MODELS,
sep = "/"
)
# # 2. Path(s) to which Plots will be stored
DIR_TO.SAVE_PLOT <- paste(
PATH_NOTE, "07_CER-Trials", "02_Figures", "Descriptive-Analysis",
sep = "/"
)
# ------- Define parameter(s) -------
# (Not Applicable)
# ------- Define function(s) -------
# (Not Applicable)
# ------------------------------------------------------------------------------
# Load Dataset(s) and/or Script(s)
# ------------------------------------------------------------------------------
# ------- Load Dataset(s) -------
load(PATH_TO.LOAD_CER_METERING_ELECTRICITY)
# ------- Load Script(s) -------
source(PATH_TO.LOAD_CER_MODELS)
# ------------------------------------------------------------------------------
# Create DTs for Tables, Plots, or Regressions
# ------------------------------------------------------------------------------
# ------- Create DT(s) for Plots -------
# # 1. Create a DT that includes Household-level Daily Average Consumption
# # 1.1. Add a column showing ranges of HDDs, which will be used to aggregate
# # consumption
dt_for.reg[
,
range_hdd := cut(hdd_all, breaks = seq(0, 48, by = 1), include.lowest = TRUE)
]
# # 1.2. Create a DT by aggregating daily consumption
dt_avg.kwh_daily <-
dt_for.reg[ # To obtain each household's daily consumption
is_in.sample_incl.control == TRUE,
lapply(.SD, sum, na.rm = TRUE), .SDcols = "kwh",
by = .(id, date, group, period, range_hdd)
][ # To compute daily average consumption
,
lapply(.SD, mean, na.rm = TRUE), .SDcols = "kwh",
by = .(date, group, period, range_hdd)
]
# ## Note:
# ## Do NOT exclue `is_within.temperature.range == FALSE` because excluding
# ## observations meeting the condition distort average daily consumption.
# ## Excluding observations with `is_within.temperature.range == FALSE` will
# ## not cause any problem when I run regressions with a hourly-level sample.
# ------- Create DT(s) for Regressions -------
# # 1. Create a DT that includes Household-level Daily Average Consumption
# # 1.1. For DT including Observations of Control Group
dt_for.reg_daily_incl.control <- dt_for.reg[
is_in.sample_incl.control == TRUE,
lapply(.SD, sum, na.rm = TRUE), .SDcols = "kwh",
by = .(
date,
id_in.factor,
is_treated_r, is_treatment.period, treatment.and.post,
mean.temp_all_f, hdd_all,
day.of.week_in.factor, id.and.day.of.week_in.factor, month_in.factor
)
]
# # 1.2. For DT excluding Observations of Control Group
dt_for.reg_daily_excl.control <- dt_for.reg[
is_in.sample_excl.control == TRUE,
lapply(.SD, sum, na.rm = TRUE), .SDcols = "kwh",
by = .(
date,
id_in.factor,
treatment.and.post,
mean.temp_all_f, hdd_all,
day.of.week_in.factor, id.and.day.of.week_in.factor, month_in.factor
)
]
# ------------------------------------------------------------------------------
# Run Regressions
# ------------------------------------------------------------------------------
# ------- Run Regressions with OLS Models -------
# # 1. Run Regressions with Day-level Data
# # 1.1. With a sample including the control group
result_ols_daily_incl.control_linear <- felm(
data = dt_for.reg_daily_incl.control,
formula = model_ols_daily_incl.control_linear
)
result_ols_daily_incl.control_quadratic <- felm(
data = dt_for.reg_daily_incl.control,
formula = model_ols_daily_incl.control_quadratic
)
# # 1.2. With a sample excluding the control group
result_ols_daily_excl.control_linear <- felm(
data = dt_for.reg_daily_excl.control,
formula = model_ols_daily_excl.control_linear
)
result_ols_daily_excl.control_quadratic <- felm(
data = dt_for.reg_daily_excl.control,
formula = model_ols_daily_excl.control_quadratic
)
# ------- Run Regressions with FEs Models -------
# # 1. Run Regressions with Day-level Data
# # 1.1. With a sample including the control group
result_fes_daily_incl.control_linear <- felm(
data = dt_for.reg_daily_incl.control,
formula = model_fes_daily_incl.control_linear
)
result_fes_daily_incl.control_linear_variation1 <- felm(
data = dt_for.reg_daily_incl.control,
formula = model_fes_daily_incl.control_linear_variation1
)
result_fes_daily_incl.control_quadratic <- felm(
data = dt_for.reg_daily_incl.control,
formula = model_fes_daily_incl.control_quadratic
)
result_fes_daily_incl.control_quadratic_variation1 <- felm(
data = dt_for.reg_daily_incl.control,
formula = model_fes_daily_incl.control_quadratic_variation1
)
# # 1.2. With a sample including the control group
result_fes_daily_excl.control_linear <- felm(
data = dt_for.reg_daily_excl.control,
formula = model_fes_daily_excl.control_linear
)
result_fes_daily_excl.control_quadratic <- felm(
data = dt_for.reg_daily_excl.control,
formula = model_fes_daily_excl.control_quadratic
)
# ------------------------------------------------------------------------------
# Create DTs from Regression Results
# ------------------------------------------------------------------------------
# ------- Create DTs from Regression Results with Daily Data -------
# # 1. Extract Estimates
# # 1.1. From results from FEs models
# # 1.1.1. From results from FEs models with the sample excluding control group
# # 1.1.1.1. Linear Model
dt_fes_daily_excl.control_linear <- summary(
result_fes_daily_excl.control_linear, robust = TRUE
)$coefficients %>%
data.table(., keep.rownames = TRUE)
names(dt_fes_daily_excl.control_linear) <-
c("desc", "estimate", "se", "t.value", "prob_v.value")
# # 1.1.1.2. Quadratic Model
dt_fes_daily_excl.control_quadratic <- summary(
result_fes_daily_excl.control_quadratic, robust = TRUE
)$coefficients %>%
data.table(., keep.rownames = TRUE)
names(dt_fes_daily_excl.control_quadratic) <-
c("desc", "estimate", "se", "t.value", "prob_v.value")
# # 1.1.2. From results from FEs models with the sample including control group
# # 1.1.2.1. Linear Model
dt_fes_daily_incl.control_linear <- summary(
result_fes_daily_incl.control_linear, robust = TRUE
)$coefficients %>%
data.table(., keep.rownames = TRUE)
names(dt_fes_daily_incl.control_linear) <-
c("desc", "estimate", "se", "t.value", "prob_v.value")
# # 1.1.2.2. Quadratic Model
dt_fes_daily_incl.control_quadratic <- summary(
result_fes_daily_incl.control_quadratic, robust = TRUE
)$coefficients %>%
data.table(., keep.rownames = TRUE)
names(dt_fes_daily_incl.control_quadratic) <-
c("desc", "estimate", "se", "t.value", "prob_v.value")
# # 2. Create DTs that include Simulation Results
# # 2.1. Simulation Results from FEs Models: Temperature Response
# # 2.1.1. From the sample excluding control group
# # 2.1.1.1. For Linear Model
dt_simulation_fes_daily_excl.control_linear <-
data.table(hdd = seq(0, 50, by = 0.5)) %>%
.[
,
`:=` (
model = "Linear",
response = (
dt_fes_daily_excl.control_linear[
str_detect(desc, "^treatment.and.post")
]$estimate +
dt_fes_daily_excl.control_linear[
str_detect(desc, "^hdd_all:treatment.and.post")
]$estimate * hdd
)
)
]
# # 2.1.1.2. For Quadratic Model
dt_simulation_fes_daily_excl.control_quadratic <-
data.table(hdd = seq(0, 50, by = 0.5)) %>%
.[
,
`:=` (
model = "Quadratic",
response = (
dt_fes_daily_excl.control_quadratic[
str_detect(desc, "^treatment.and.post")
]$estimate +
dt_fes_daily_excl.control_quadratic[
str_detect(desc, "^hdd_all:treatment.and.post")
]$estimate * hdd +
dt_fes_daily_excl.control_quadratic[
str_detect(desc, "^I.+treatment.and.postTRUE$")
]$estimate * hdd^2
)
)
]
# # 2.1.2. From the sample including control group
# # 2.1.2.1. For Linear Model
dt_simulation_fes_daily_incl.control_linear <-
data.table(hdd = seq(0, 50, by = 0.5)) %>%
.[
,
`:=` (
model = "Linear",
response = (
dt_fes_daily_incl.control_linear[
str_detect(desc, "^treatment.and.post")
]$estimate +
dt_fes_daily_incl.control_linear[
str_detect(desc, "^hdd_all:treatment.and.post")
]$estimate * hdd
)
)
]
# # 2.1.2.2. For Quadratic Model
dt_simulation_fes_daily_incl.control_quadratic <-
data.table(hdd = seq(0, 50, by = 0.5)) %>%
.[
,
`:=` (
model = "Quadratic",
response = (
dt_fes_daily_incl.control_quadratic[
str_detect(desc, "^treatment.and.post")
]$estimate +
dt_fes_daily_incl.control_quadratic[
str_detect(desc, "^hdd_all:treatment.and.post")
]$estimate * hdd +
dt_fes_daily_incl.control_quadratic[
str_detect(desc, "^I.+treatment.and.postTRUE$")
]$estimate * hdd^2
)
)
]
# # 2.1.3. Create DTs by combining DTs generated above
dt_simulation_fes_daily_excl.control <- rbind(
dt_simulation_fes_daily_excl.control_linear,
dt_simulation_fes_daily_excl.control_quadratic
)
dt_simulation_fes_daily_incl.control <- rbind(
dt_simulation_fes_daily_incl.control_linear,
dt_simulation_fes_daily_incl.control_quadratic
)
dt_simulation_fes_daily <- rbind(
dt_simulation_fes_daily_excl.control[, category := "Excluding Control Group"],
dt_simulation_fes_daily_incl.control[, category := "Including Control Group"]
)
# # 2.1.4. Modify the combined DT
# # 2.1.4.1. Convert data type from character to factor
levels <- c("Including Control Group", "Excluding Control Group")
dt_simulation_fes_daily[, category := factor(category, levels = levels)]
# ------------------------------------------------------------------------------
# Make Table(s) from Regression Results
# ------------------------------------------------------------------------------
# ------- Make Table(s) from Regression Results -------
# # 1. Create objects that will be used to make regression table(s)
list_results <- list(
result_ols_daily_excl.control_linear,
result_fes_daily_excl.control_linear,
result_ols_daily_excl.control_quadratic,
result_fes_daily_excl.control_quadratic,
result_ols_daily_incl.control_linear,
result_fes_daily_incl.control_linear,
result_fes_daily_incl.control_linear_variation1,
result_ols_daily_incl.control_quadratic,
result_fes_daily_incl.control_quadratic,
result_fes_daily_incl.control_quadratic_variation1
)
column.labels <- c(
"Sample excluding Control Group", "Sample including Control Group"
)
column.separate <- c(4, 6)
covariate.labels <- c(
"HDDs",
"(HDDs)\\^2",
"1[Treatment]",
"1[Post]",
"1[Treatment and Post]",
"HDDs x 1[Treatment]",
"(HDDs)\\^2 x 1[Treatment]",
"HDDs x 1[Post]",
"(HDDs)\\^2 x 1[Post]",
"HDDs x 1[Treatment and Post]",
"(HDDs)\\^2 x 1[Treatment and Post]",
"(Constant)"
)
dep.var.labels <- "Daily Consumption (kWh per Day)"
add.lines <- list(
c(
"FEs: ID-by-Day of Week",
"No", "Yes", "No", "Yes", "No", "Yes", "Yes", "No", "Yes", "Yes"
),
c(
"FEs: Month",
"No", "Yes", "No", "Yes", "No", "Yes", "Yes", "No", "Yes", "Yes"
)
)
# # 2. Print Table(s)
stargazer(
list_results,
type = "text",
column.labels = column.labels,
column.separate = column.separate,
covariate.labels = covariate.labels,
dep.var.labels = dep.var.labels,
add.lines = add.lines
)
# ------------------------------------------------------------------------------
# Make Plots
# ------------------------------------------------------------------------------
# ------- Set Common Plot Options -------
plot.options <- list(
theme_linedraw(),
theme(strip.text = element_text(face = "bold"))
)
# ------- Create Plots for Descriptive Analysis -------
# # 1. Create a Plot that shows Simulation Results
plot_simulation_fes <-
ggplot(data = dt_simulation_fes_daily) +
geom_point(aes(x = hdd, y = response, color = model, shape = model)) +
geom_line(aes(x = hdd, y = response, color = model, group = model)) +
facet_grid(category ~ .) +
scale_x_continuous(breaks = seq(0, 50, by = 5)) +
scale_y_continuous(labels = scales::comma) +
labs(
x = "HDDs",
y = "Response with respect to Treatment",
color = "Models",
shape = "Models"
) +
plot.options
# # 2. Create a Plot that shows Daily Average Consumption
plot_avg.kwh_daily <-
ggplot(data = dt_avg.kwh_daily[, range_hdd := factor(range_hdd)]) +
geom_jitter(
aes(x = range_hdd, y = kwh, color = period),
alpha = 0.3
) +
geom_smooth(
aes(x = as.numeric(range_hdd), y = kwh, color = period),
method = "loess", formula = y ~ x,
alpha = 0.3
) +
facet_grid(group ~ .) +
scale_y_continuous(labels = scales::comma) +
labs(
x = "Ranges of HDDs",
y = "Daily Average Consumption (kWh per Day)",
color = "Periods"
) +
plot.options +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1))
# ------- Export Plots created above in PNG Format -------
# # 1. For Daily Average Consumption
plot.save(
paste(
DIR_TO.SAVE_PLOT,
"CER_Simulation_Response-to-Temperature_Daily_Electricity_Using-HDD-All.png",
sep = "/"
),
plot_simulation_fes,
width = 40, height = 30, units = "cm"
)
# # 2. For Simulation Results
plot.save(
paste(
DIR_TO.SAVE_PLOT,
"CER_Daily-Average-Consumption_By-HDD_Electricity_Using-HDD-All.png",
sep = "/"
),
plot_avg.kwh_daily,
width = 40, height = 30, units = "cm"
)
|
39e658cfaea4fdcfee1b94eb11af6b57eca49870
|
6fe23897d8599f4b6cdc31ea744734cf43ad7088
|
/src/05scripts/02R/InternshipReport/RCytoscapeClustersAbasy.R
|
41bdc95329ad3a5cf5cf4d62f20f26f9cece9910
|
[] |
no_license
|
dimagarcia/Framework
|
7011058ffbf291a83ef1fee14e80410564eefa3b
|
ec4547825755201b762f4623bf55ab36f6339734
|
refs/heads/master
| 2023-08-22T03:24:29.496278
| 2023-08-15T10:26:08
| 2023-08-15T10:26:08
| 132,790,549
| 1
| 0
| null | 2023-08-16T03:26:34
| 2018-05-09T17:22:39
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,819
|
r
|
RCytoscapeClustersAbasy.R
|
library("RCytoscape")
pluginVersion (cy)
# Module 34
g <- new ('graphNEL', edgemode='directed')
g <- graph::addNode ('lacA', g)
g <- graph::addNode ('lacY', g)
g <- graph::addNode ('lacZ', g)
# Module 40
#g <- new ('graphNEL', edgemode='directed')
g <- graph::addNode ('ttdA', g)
g <- graph::addNode ('ttdB', g)
g <- graph::addNode ('ttdT', g)
# Module 52
#g <- new ('graphNEL', edgemode='directed')
g <- graph::addNode ('zraR', g)
g <- graph::addNode ('zraS', g)
cw <- new.CytoscapeWindow ('E. coli simulation - Modules: 34(Lactose), 40(Sodium), 52(Phosphorelay)', graph=g, overwriteWindow=TRUE)
displayGraph (cw)
g <- cw@graph
g <- initEdgeAttribute (graph=g, attribute.name='edgeType',attribute.type='char',default.value='regulates to')
g <- initEdgeAttribute (graph=g, attribute.name='weight',attribute.type='numeric',default.value='unspecified')
# Module 34
g <- graph::addEdge ('lacA','lacY', g)
g <- graph::addEdge ('lacA','lacZ', g)
g <- graph::addEdge ('lacY','lacZ', g)
edgeData (g, 'lacA','lacY','weight') <- 0.5
edgeData (g, 'lacA','lacZ','weight') <- 0.5
edgeData (g, 'lacY','lacZ','weight') <- 0.5
# Module 40
g <- graph::addEdge ('ttdA','ttdB', g)
g <- graph::addEdge ('ttdA','ttdT', g)
g <- graph::addEdge ('ttdB','ttdT', g)
edgeData (g, 'ttdA','ttdB','weight') <- 0.5
edgeData (g, 'ttdA','ttdT','weight') <- 0.5
edgeData (g, 'ttdB','ttdT','weight') <- 0.5
# Module 52
g <- graph::addEdge ('zraR','zraS', g)
g <- graph::addEdge ('zraS','zraR', g)
edgeData (g, 'zraR','zraS','weight') <- 0.5
edgeData (g, 'zraS','zraR','weight') <- 0.5
cw@graph <- g
displayGraph (cw)
setVisualStyle(cw, 'Sample1')
layoutNetwork (cw, layout.name='degree-circle')
redraw (cw)
edges.of.interest = as.character (cy2.edge.names (cw@graph))
setEdgeTargetArrowShapeDirect (cw, edges.of.interest, 'Arrow')
redraw (cw)
|
61b16e63381e18da890be10637118a7a8bcbda75
|
4c16c3c020a4e421dccb55e9f2d8fb345a898182
|
/main_interval.R
|
14e09fc280f5a542cc659cc7b98c556d4a92997a
|
[] |
no_license
|
KNewhart/MP_ADPCA_Monitoring
|
b7f858a69fc6e5ad79b821993f99ebe8ee490e22
|
38dca7a1782af0f7449278b84cf7899fa9ab69c9
|
refs/heads/master
| 2020-03-27T01:33:51.556481
| 2018-08-27T22:51:03
| 2018-08-27T22:51:03
| 145,722,847
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,069
|
r
|
main_interval.R
|
### Preliminaries
# Clear global environment
rm(list=ls())
# Load libraries
library(ADPCA)
# Set working directory
remote <- TRUE
if (remote) {
setwd("C:/Users/Kate Newhart/odrive/Mines/Code/MP_ADPCA_Monitoring")
} else {
setwd("C:/Users/SB-MBR/Desktop/R Code/MP_ADPCA_Monitoring")
}
# Load variables
source("vars.R")
### Compile and clean data
# loadandcleanDBF returns a dataframe with all days including column names
rawData <- loadandcleanDBF(dataLocation, testingDay, nDays = 1)
# convert to xts
rawData <- xts(rawData[,-1], order.by = rawData[,1])
rawData <- rawData[paste(testingDay,"/",sep="")]
# Subset data into BR and and MT
dataBR <- rawData[,varsBR]
dataMT <- rawData[,varsMT]
# Create states
dataBR_ls <- stateGenerator(data = dataBR, stateVars = stateVarsBR, testingDay = testingDay, minObs = 1)
dataMT_ls <- stateGenerator(data = dataMT, stateVars = stateVarsMT, testingDay = testingDay, minObs = 1)
# Load training specs
load("trainingSpecs/trainingDataSS.R")
load("trainingSpecs/trainingDataBR.R")
load("trainingSpecs/trainingDataMT.R")
# Only include states with training data
states2keepBR <- numeric()
states2keepMT <- numeric()
for (i in 1:length(trainingDataBR[[1]][[1]])) {
states2keepBR <- c(states2keepBR, as.integer(trainingDataBR[[1]][[1]][[i]]$labelCol[1]))
}
for (i in 1:length(trainingDataMT[[1]][[1]])) {
states2keepMT <- c(states2keepMT, as.integer(trainingDataMT[[1]][[1]][[i]]$labelCol[1]))
}
filtered.dataBR_ls <- list()
for (j in 1:length(states2keepBR)) {
for (i in 1:length(dataBR_ls)) {
n <- dataBR_ls[[i]]$labelCol[1]
if (n == states2keepBR[j]) {
filtered.dataBR_ls <- c(filtered.dataBR_ls, list(dataBR_ls[[i]]))
} else {}
}
}
filtered.dataMT_ls <- list()
for (j in 1:length(states2keepMT)) {
for (i in 1:length(dataMT_ls)) {
n <- dataMT_ls[[i]]$labelCol[1]
if (n == states2keepMT[j]) {
filtered.dataMT_ls <- c(filtered.dataMT_ls, list(dataMT_ls[[i]]))
} else {}
}
}
# Test SS
alarmDataSS <- testNewObs(data = rawData,
trainingSpecs = trainingDataSS,
testingDay = testingDay,
faultsToTriggerAlarm = faultsToTriggerAlarm)
# Test multistate
alarmDataBR <- multistate_test(data = filtered.dataBR_ls,
trainingSpec_ls = trainingDataBR[[2]][[1]],
testingDay = trainingDataBR[[3]],
faultsToTriggerAlarm = trainingDataBR[[4]])
alarmDataMT <- multistate_test(data = filtered.dataMT_ls,
trainingSpec_ls = trainingDataMT[[2]][[1]],
testingDay = trainingDataMT[[3]],
faultsToTriggerAlarm = trainingDataMT[[4]])
write.csv(as.data.frame(alarmDataSS), file = paste("results/",testingDay," alarmDataSS.csv", sep=""))
write.csv(as.data.frame(alarmDataBR), file = paste("results/",testingDay," alarmDataBR.csv", sep=""))
write.csv(as.data.frame(alarmDataMT), file = paste("results/",testingDay," alarmDataMT.csv", sep=""))
|
d8bfece9c85527d18ef04b49c82452da3074cdb3
|
2624780e9ac235d2b08aa69b191033fe35cdc915
|
/man/janus.Rd
|
dd22e39f46a3ede5c1bd013d67a4c5efd6157b69
|
[] |
no_license
|
Sandy4321/janus
|
81c01fdc783252cff1d16cdd506bcaf1d0f22ca8
|
8dc36385a063de0e1efc0ed76bb00dccccd78012
|
refs/heads/master
| 2021-01-14T14:07:50.902289
| 2015-09-13T23:06:58
| 2015-09-13T23:06:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,072
|
rd
|
janus.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/janus.R
\name{janus}
\alias{janus}
\title{Constructor for janus object}
\usage{
janus(object, package, classifier, interface = c("formula", "default"),
constructed = TRUE)
}
\arguments{
\item{object}{A trained model object.}
\item{package}{Character string indicating package origin of classifier.}
\item{classifier}{Character string indicating the classifier used to train
the model in object.}
\item{interface}{String indicating whether the object was created using the
formula method interface or the default interface.}
\item{constructed}{Logical indicating whether this object was created using
the janus constructor.}
}
\value{
A janus object containing the trained model object with additional
metadata.
}
\description{
A constructor for creating a janus object. The principle argument is the
trained model object, which is packaged inside a janus object along with
metadata derived from the fitting process.
}
\author{
Alex Wollenschlaeger, \email{alexw@panix.com}
}
|
86853ac7b67f7b005723c9cd41eade25c6c44ba6
|
9cc7423f4a94698df5173188b63c313a7df99b0e
|
/R/analyze.anova.R
|
92bd7db05b014066faccd84080e80f180a70ad62
|
[
"MIT"
] |
permissive
|
HugoNjb/psycho.R
|
71a16406654b11007f0d2f84b8d36587c5c8caec
|
601eef008ec463040c68bf72ac1ed8d4a8f7751f
|
refs/heads/master
| 2020-03-27T01:24:23.389884
| 2018-07-19T13:08:53
| 2018-07-19T13:08:53
| 145,707,311
| 1
| 0
| null | 2018-08-22T12:39:27
| 2018-08-22T12:39:27
| null |
UTF-8
|
R
| false
| false
| 8,321
|
r
|
analyze.anova.R
|
#' Analyze aov and anova objects.
#'
#' Analyze aov and anova objects.
#'
#' @param x aov object.
#' @param effsize_rules Grid for effect size interpretation. See \link[=interpret_omega_sq]{interpret_omega_sq}.
#' @param ... Arguments passed to or from other methods.
#'
#' @return output
#'
#' @examples
#' \dontrun{
#' library(psycho)
#'
#' df <- psycho::affective
#'
#' x <- aov(df$Tolerating ~ df$Salary)
#' x <- aov(df$Tolerating ~ df$Salary * df$Sex)
#'
#' x <- anova(lm(df$Tolerating ~ df$Salary * df$Sex))
#'
#'
#' summary(analyze(x))
#' print(analyze(x))
#'
#' df <- psycho::emotion %>%
#' mutate(Recall = ifelse(Recall == TRUE, 1, 0)) %>%
#' group_by(Participant_ID, Emotion_Condition) %>%
#' summarise(Recall = sum(Recall) / n())
#'
#' x <- aov(Recall ~ Emotion_Condition + Error(Participant_ID), data=df)
#' x <- anova(lmerTest::lmer(Recall ~ Emotion_Condition + (1|Participant_ID), data=df))
#' analyze(x)
#' summary(x)
#' }
#'
#'
#' @references
#' \itemize{
#' \item{Levine, T. R., & Hullett, C. R. (2002). Eta squared, partial eta squared, and misreporting of effect size in communication research. Human Communication Research, 28(4), 612-625.}
#' \item{Pierce, C. A., Block, R. A., & Aguinis, H. (2004). Cautionary note on reporting eta-squared values from multifactor ANOVA designs. Educational and psychological measurement, 64(6), 916-924.}
#' }
#'
#' @seealso http://imaging.mrc-cbu.cam.ac.uk/statswiki/FAQ/os2
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#' @import broom
#'
#' @export
analyze.aov <- function(x, effsize_rules="field2013", ...) {
if (!"aov" %in% class(x)) {
if (!"Residuals" %in% row.names(x)) {
if (!is.null(x$Within)) {
x <- x$Within
message("(Repeated measures ANOVAs are bad, you should use mixed-models...)")
} else {
return(.analyze.anova_lmer(x))
}
}
} else {
if (!is.null(x$Within)) {
x <- x$Within
message("(Repeated measures ANOVAs are bad, you should use mixed-models...)")
}
}
# Processing
# -------------
# Effect Size
omega <- tryCatch({
omega_sq(x, partial = TRUE)
}, warning = function(w) {
stop("I believe there are within and between subjects variables that caused the error. You should REALLY use mixed-models.")
})
all_values <- x %>%
broom::tidy() %>%
dplyr::full_join(data.frame("Omega" = omega) %>%
tibble::rownames_to_column("term"), by = "term") %>%
mutate_("Effect_Size" = "interpret_omega_sq(Omega, rules = 'field2013')") %>%
rename_(
"Effect" = "term",
"Sum_Squares" = "sumsq",
"Mean_Square" = "meansq",
"F" = "statistic",
"p" = "p.value"
)
varnames <- all_values$Effect
df_residuals <- all_values[all_values$Effect == "Residuals", ]$df
values <- list()
for (var in varnames) {
values[[var]] <- list()
current_values <- dplyr::filter_(all_values, "Effect == var")
values[[var]]$df <- current_values$df
values[[var]]$Sum_Squares <- current_values$Sum_Squares
values[[var]]$Mean_Square <- current_values$Mean_Square
values[[var]]$F <- current_values$F
values[[var]]$p <- current_values$p
values[[var]]$Omega <- current_values$Omega
values[[var]]$Effect_Size <- current_values$Effect_Size
if (var != "Residuals") {
if (current_values$p < .05) {
significance <- "significant"
} else {
significance <- "not significant"
}
if (grepl(":", var)) {
effect <- "interaction between"
varname <- stringr::str_replace_all(var, ":", " and ")
} else {
varname <- var
effect <- "effect of"
}
values[[var]]$text <- paste0(
"The ",
effect,
" ",
varname,
" is ",
significance,
" (F(",
current_values$df,
", ",
df_residuals,
") = ",
format_digit(current_values$F),
", p ",
format_p(current_values$p, stars = FALSE),
") and can be considered as ",
current_values$Effect_Size,
" (Partial Omega-squared = ",
format_digit(current_values$Omega),
")."
)
}
}
# Summary
# -------------
summary <- all_values
# Text
# -------------
text <- c()
for (var in varnames[varnames != "Residuals"]) {
text <- c(text, paste(" -", values[[var]]$text))
}
# Plot
# -------------
plot <- "Not available yet"
output <- list(text = text, plot = plot, summary = summary, values = values)
class(output) <- c("psychobject", "list")
return(output)
}
#' @export
analyze.anova <- analyze.aov
#' @export
analyze.aovlist <- analyze.aov
#' @keywords internal
.analyze.anova_lmer <- function(x) {
if (!"NumDF" %in% colnames(x)) {
stop("Cannot analyze the anova from lme4. Please refit the model using lmerTest.")
}
summary <- x %>%
as.data.frame() %>%
tibble::rownames_to_column("term") %>%
rename_(
"Effect" = "term",
"df" = "NumDF",
"df_Residuals" = "DenDF",
"Sum_Squares" = "`Sum Sq`",
"Mean_Square" = "`Mean Sq`",
"F" = "`F value`",
"p" = "`Pr(>F)`"
) %>%
select_("Effect", "df", "df_Residuals", "Sum_Squares", "Mean_Square", "F", "p")
varnames <- summary$Effect
values <- list()
for (var in varnames) {
values[[var]] <- list()
current_values <- dplyr::filter_(summary, "Effect == var")
values[[var]]$df <- current_values$df
values[[var]]$df_Residuals <- current_values$df_Residuals
values[[var]]$Sum_Squares <- current_values$Sum_Squares
values[[var]]$Mean_Square <- current_values$Mean_Square
values[[var]]$F <- current_values$F
values[[var]]$p <- current_values$p
# values[[var]]$Omega <- current_values$Omega
# values[[var]]$Effect_Size <- current_values$Effect_Size
if (current_values$p < .05) {
significance <- "significant"
} else {
significance <- "not significant"
}
if (grepl(":", var)) {
effect <- "interaction between"
varname <- stringr::str_replace_all(var, ":", " and ")
} else {
varname <- var
effect <- "effect of"
}
values[[var]]$text <- paste0(
"The ",
effect,
" ",
varname,
" is ",
significance,
" (F(",
current_values$df,
", ",
format_digit(current_values$df_Residuals, 0),
") = ",
format_digit(current_values$F),
", p ",
format_p(current_values$p, stars = FALSE),
")."
)
}
# Text
# -------------
text <- c()
for (var in varnames[varnames != "Residuals"]) {
text <- c(text, paste(" -", values[[var]]$text))
}
# Plot
# -------------
plot <- "Not available yet"
output <- list(text = text, plot = plot, summary = summary, values = values)
class(output) <- c("psychobject", "list")
return(output)
}
#' Partial Omega Squared.
#'
#' Partial Omega Squared.
#'
#' @param x aov object.
#' @param partial Return partial omega squared.
#'
#' @return output
#'
#' @examples
#' library(psycho)
#'
#' df <- psycho::affective
#'
#' x <- aov(df$Tolerating ~ df$Salary)
#' x <- aov(df$Tolerating ~ df$Salary * df$Sex)
#'
#' omega_sq(x)
#'
#' @seealso http://stats.stackexchange.com/a/126520
#'
#' @author Arnoud Plantinga
#' @importFrom stringr str_trim
#' @export
omega_sq <- function(x, partial=TRUE) {
if ("aov" %in% class(x)) {
summary_aov <- summary(x)[[1]]
} else {
summary_aov <- x
}
residRow <- nrow(summary_aov)
dfError <- summary_aov[residRow, 1]
msError <- summary_aov[residRow, 3]
nTotal <- sum(summary_aov$Df)
dfEffects <- summary_aov[1:{
residRow - 1
}, 1]
ssEffects <- summary_aov[1:{
residRow - 1
}, 2]
msEffects <- summary_aov[1:{
residRow - 1
}, 3]
ssTotal <- rep(sum(summary_aov[1:residRow, 2]), 3)
Omegas <- abs((ssEffects - dfEffects * msError) / (ssTotal + msError))
names(Omegas) <- stringr::str_trim(rownames(summary_aov)[1:{
residRow - 1
}])
partOmegas <- abs((dfEffects * (msEffects - msError)) /
(ssEffects + (nTotal - dfEffects) * msError))
names(partOmegas) <- stringr::str_trim(rownames(summary_aov)[1:{
residRow - 1
}])
if (partial == TRUE) {
return(partOmegas)
} else {
return(Omegas)
}
}
|
df89cd16eb97299042f34eeac1ea278898b0f478
|
9ff1c5bb2148e0a9782bf3084817878f95f191d1
|
/scripts/ejercicio_012.r
|
0d3e0b1a34b505b16f61eb3cc75078531a70746b
|
[] |
no_license
|
mar71n/cursoR
|
95a1045f89e77cc6406bd834f3e1a5ff5e38a00c
|
fad97fccc114bb5b8cf367952eb64bc397ba6431
|
refs/heads/master
| 2021-01-16T18:30:01.702666
| 2013-07-18T17:19:35
| 2013-07-18T17:19:35
| 31,655,439
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,193
|
r
|
ejercicio_012.r
|
# Vamos a crear primero tres vectores de datos con distribuciones dispares:
discreta <- sample(1:10, 100, prob=seq(from=0.1,to=1.0, length.out=10), replace=TRUE)
exponencial <- rexp(1000)
bimodal <- c(rnorm(1000), rnorm(1000, mean=5, sd=2))
# El ejercicio consiste en crear gráficos para los tres vectores (incluyendo diagramas de cajas,
# el histograma y la densidad) para comprobar qué aspecto tienen.
# ¿Son igualmente útiles para los tres tipos de datos? ¿Qué gráfico refleja mejor cada vector/distribución?
# hola. histograma , cajas -bigote dan bastante información. En el caso exponencial se ve mejor en histograma
layout(matrix(c(1,2,3,4),2,2,byrow=TRUE))
boxplot(discreta,main="Discreta")
boxplot(exponencial,main="exponencial")
boxplot(bimodal,main="bimodal")
boxplot(discreta,exponencial,bimodal,main="caja y bigotes",xlab="disc,expo,bimo",ylab="frecuencia")
hist(discreta,main="discreta",xlab="datos")
hist(exponencial,main="exponencial",xlab="datos")
hist(bimodal,main="bimodal",xlab="datos")
# plot(x, y, ... veo que es mejor para reprecentar una variable en funcion de otra, no distribuciones
|
58326ae50d1f1401ff2539df75d33710d80ca54e
|
5a676f5a367775e242968a487b1a7940c550f374
|
/R/clean_profiles.R
|
6aa85d134c362092e2de104643f0dffec9bd8559
|
[
"MIT"
] |
permissive
|
fosterlab/PrInCE
|
a812897bd29383d84ad0ba7337a31d20b1c79b89
|
add96aad315861f5aad2079f291dee209e122729
|
refs/heads/master
| 2021-12-15T03:03:00.683090
| 2020-12-07T20:15:10
| 2020-12-07T20:15:10
| 109,034,214
| 6
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,527
|
r
|
clean_profiles.R
|
#' Preprocess a co-elution profile matrix
#'
#' Clean a matrix of co-elution/co-fractionation profiles by
#' (1) imputing single missing
#' values with the average of neighboring values, (2) replacing missing values
#' with random, near-zero noise, and (3) smoothing with a moving average
#' filter.
#'
#' @param profile_matrix a numeric matrix of co-elution profiles, with proteins
#' in rows, or a \code{\linkS4class{MSnSet}} object
#' @param impute_NA if true, impute single missing values with the average of
#' neighboring values
#' @param smooth if true, smooth the chromatogram with a moving average filter
#' @param smooth_width width of the moving average filter, in fractions
#' @param noise_floor mean value of the near-zero noise to add
#'
#' @return a cleaned matrix
#'
#' @examples
#' data(scott)
#' mat <- scott[c(1, 16), ]
#' mat_clean <- clean_profiles(mat)
#'
#' @importFrom MSnbase exprs
#' @importFrom Biobase exprs<-
#' @importFrom methods is
#'
#' @export
clean_profiles <- function(profile_matrix, impute_NA = TRUE, smooth = TRUE,
smooth_width = 4, noise_floor = 0.001) {
if (is(profile_matrix, "MSnSet")) {
profile_matrix <- exprs(profile_matrix)
}
profile_matrix <- t(apply(profile_matrix, 1, clean_profile,
impute_NA = impute_NA,
smooth = smooth,
smooth_width = smooth_width,
noise_floor = noise_floor))
return(profile_matrix)
}
|
cf994cdf1b4897633f7c6d2e2bce3281e0890e6a
|
94eed3c3d82610194b5d5e683a9248a22487c5ac
|
/R/xml_html_scrape.R
|
2236bd603e1fabf2b4945c8abfa5d2d382dc45ef
|
[] |
no_license
|
euhkim/regression2000
|
2529dc1934c7a323892bb915f3b9fd99cf815f44
|
20cc832cb1e17faabc0de3e30190cee7f015eb46
|
refs/heads/master
| 2021-09-11T01:31:51.430346
| 2018-04-05T18:49:22
| 2018-04-05T18:49:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,587
|
r
|
xml_html_scrape.R
|
install.packages(c("ggplot","magrittr","lubridate","dplyr","glmnet"))
library(ggplot2); library(magrittr); library(lubridate) ; library(dplyr)
# extra functions
extract_numerics <- function(x){
strsplit(x," ") %>% lapply(function(x){
nums <- which(!is.na(as.numeric(x)))
return(as.numeric(x[nums]))
}) %>% unlist
}
number_of_days <- function(x,y){
c(lubridate::days(x-y) %>% as.numeric())/(60*60*24)
}
xml <- xml2::read_html("clds.html")
attendance <- rvest::html_text(rvest::html_nodes(xml,".avatarRow--attendingCount"))[1:26] %>% extract_numerics()
dates <- rvest::html_text(rvest::html_nodes(xml,".eventTimeDisplay-startDate"))[1:26]
title <-rvest::html_text(rvest::html_nodes(xml,".eventCardHead--title"))[1:26]
comments <- rvest::html_text(rvest::html_nodes(xml,".eventCard--expandedInfo-comments"))[1:26] %>% extract_numerics()
meetup_dates <- as.Date(dates,"%A, %B %d, %Y, %I:%M %p")
day <- strptime(dates,"%A, %B %d, %Y, %I:%M %p")
meetup_data <- data.frame("Meetup"=title,
"Date"=meetup_dates,
"Attendance"=attendance,
"WeekDay"=weekdays(meetup_dates),
"Comments"= comments)
cd <- data.table::fread("clds.txt")
dates <- cd$JoinedGroup
tabs <- table(dates)
df <- data.frame("Date"=lubridate::ymd(names(tabs)),"Members"=as.numeric((tabs)))
df2 <- df[order(df$Date),]
df2$Members <- cumsum(df2$Members)
## let's now add some extra covariates
full_data <- dplyr::left_join(df2,meetup_data,by="Date")
write.csv(full_data,"clds.txt",row.names = FALSE)
|
7e987825ad8057a3e56591e204aa7f9d1f16f572
|
af11fe3ff3fec9f631df5d1bd10cd6b8dae32c89
|
/shiny observe.R
|
b0b3cbdfeed80504db4036f45f60b4ed26c8a8d9
|
[] |
no_license
|
y1220/R-practice
|
fc483bef6831fe37c7b22d5c10babaf53ae31772
|
b2fc05202b04e39c5b33b2d0bfa08b947fa0d605
|
refs/heads/main
| 2023-08-17T13:20:45.450404
| 2021-10-10T22:26:57
| 2021-10-10T22:26:57
| 397,967,259
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 383
|
r
|
shiny observe.R
|
library(shiny)
ui <- fluidPage(
textInput('name', 'Enter your name')
)
server <- function(input, output, session) {
# CODE BELOW: Add an observer to display a notification
# 'You have entered the name xxxx' where xxxx is the name
observe(
showNotification(
paste('You have entered the name ',input$name)
)
)
}
shinyApp(ui = ui, server = server)
|
4645e58ba6eef4c4365697644dab41b41b53e7c1
|
b9310268702ef141c4cd4e03e19c6a89682c1a69
|
/simulation_samplesize/create.dmps.norm.other.R
|
78e6b1e16a1f635e7603e47cbf6040eff916939b
|
[] |
no_license
|
Jfortin1/funnorm_repro
|
7b853da5e648a60e74b09e458fd88cc49efcf69b
|
b2cf7b2c907990fde2e204a1b6f9acc4acd6cccf
|
refs/heads/master
| 2021-01-20T13:48:09.063091
| 2015-03-27T16:06:20
| 2015-03-27T16:06:20
| 20,110,327
| 7
| 1
| null | 2015-03-27T16:06:20
| 2014-05-23T18:47:02
|
R
|
UTF-8
|
R
| false
| false
| 1,312
|
r
|
create.dmps.norm.other.R
|
# We will focus on the EBV dataset
k=as.numeric(commandArgs(TRUE)[1])
j=as.numeric(commandArgs(TRUE)[2])
funnomDir <- "/amber1/archive/sgseq/workspace/hansen_lab1/funnorm_repro"
rawDir <- paste0(funnormDir,"/raw_datasets")
disValDir <- paste0(funnormDir,"/dis_val_datasets")
designDir <- paste0(funnormDir,"/designs")
normDir <- paste0(funnormDir,"/norm_datasets")
scriptDir <- paste0(funnormDir,"/scripts")
sampleSizeDir <- paste0(funnormDir, "/simulation_samplesize")
dmpsDir <- paste0(sampleSizeDir,"/dmps_norm_other")
normDir3 <- paste0(sampleSizeDir,"/norm_other")
library(minfi)
setwd(designDir)
load("design_ontario_ebv.Rda")
design <- design_ontario_ebv
design <- design[design$set=="Validation",]
n.vector <- c(10,20,30,50,80)
file=paste0("ontario_ebv_val_n_",n.vector[k],"_B_",j,".Rda")
setwd(normDir3)
load(file)
#quantile.norm, swan.norm, dasen.norm
n <- n.vector[k]
m <- n/2
pheno <- c(rep(1,m),rep(2,m))
names(pheno) <- colnames(quantile.norm)
# Creation of the dmps:
setwd(scriptDir)
source("returnDMPSFromNormMatrices.R")
setwd(dmpsDir)
norm.matrices <- list(quantile=quantile.norm, swan = swan.norm, dasen = dasen.norm)
dmps <- returnDmpsFromNormMatrices(normMatrices = norm.matrices, pheno = pheno)
save(dmps, file=paste0("dmps_ontario_ebv_val_n_",n.vector[k],"_B_",j,".Rda"))
|
38cdc69e0bd29af64a17dce29b8e5bfdf4621fea
|
378ce06964d8617d005de4f697685f93632ffcda
|
/Genetic_Chen2021.R
|
1f6f3edb5d05664f27e7063b06b166bcd2004d0a
|
[] |
no_license
|
YLCHEN1992/Genetic_Chen
|
3fb8db6655c1d23a43f65f7d1849d27a078e7d1c
|
803497b69bf8b97e191e5dbf48ed7cbab97296b1
|
refs/heads/main
| 2023-05-29T09:33:33.756531
| 2021-06-13T11:11:59
| 2021-06-13T11:11:59
| 372,094,221
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 8,920
|
r
|
Genetic_Chen2021.R
|
# Need packages
library(corrplot)
library(ggplot2)
library(formattable)
library(reshape2)
# Public functions
# Matrix for linkage calculation
Dmdata=c(1,0,0,0,0.5,0,0.5,0,0,0,0.5,0.5,0,0,0,1,
0,0.5,0,0.5,0,1,0,0,0.5,0,0,0.5,0.25,0.25,0.25,0.25,0,0.5,0.5,0)
Dm=matrix(Dmdata,nrow=9,ncol=4,byrow=T)
colnames(Dm)=c("g11","g22","g12","g21")
rownames(Dm)=c("T1111","T1112","T1122","T2211","T2212","T2222","T1211","T1212","T1222")
# Linkage calculation
LDm=function(x){
LD=c()
if(("-" %in% as.character(x[,2]))|("-" %in% as.character(x[,3]))){
for(i in 2:3){LD=c(LD,which(as.character(x[,i])=="-"))}
x=x[-unique(LD),]}
x[x=="21"]="12"
x[x!="11"&x!="12"&x!="22"]="12"
T4=c()
for(i in 1:nrow(x)){
t=as.numeric(Dm[which(rownames(Dm)==paste("T",as.character(x[i,2]),as.character(x[i,3]),sep="")),])
T4=c(T4,t)}
T4M=matrix(T4,ncol=4,byrow=T)
g11=sum(T4M[,1])/nrow(T4M)
g22=sum(T4M[,2])/nrow(T4M)
g12=sum(T4M[,3])/nrow(T4M)
g21=sum(T4M[,4])/nrow(T4M)
D=g11*g22-g12*g21 # CORE EQUATION
q1=genebf(as.character(x[,2]))[4]
p1=genebf(as.character(x[,2]))[5]
q2=genebf(as.character(x[,3]))[4]
p2=genebf(as.character(x[,3]))[5]
if(p1&p2&q1&q2==0){
R2=0
ZD=D}else{
if(D>0){ZD=D/min(q1*p2,q2*p1)
}else if(D<0){ZD=D/min(q1*p1,q2*p2)}else{ZD=D}
R2=D^2/(p1*p2*q1*q2)}
BDR=c(abs(ZD),R2)
BDR}
# Genetic linkage map display
LMAP=function(x,LD="D'"){
library(ggplot2)
library(reshape2)
cormat=round(x,2)
cormat[lower.tri(cormat)]=NA
melted_cormat= melt(cormat,na.rm=TRUE)
ggheatmap=ggplot(melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint =0.5, limit = c(0,1), space = "Lab",
name=paste("Linkage Disequilibrium\n",as.character(LD),sep="")) +
theme_minimal()+
theme(axis.text.x = element_text(angle = 45,vjust = 1,size = 12,hjust = 1))+
coord_fixed()
map=ggheatmap+
geom_text(aes(Var2, Var1, label = value), color = "black", size = 10)+
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(1, 0),
legend.position = c(0.35, 0.8),
legend.direction = "horizontal")+
guides(fill = guide_colorbar(barwidth = 8, barheight = 1,
title.position = "top", title.hjust = 0.5))
map
}
# Values calculated and formated
genebf=function(x){
sx=as.character(x)[x!="-"]
sy=as.character(unlist(strsplit(sx,"")))
msx=table(sx)
msy=table(sy)
ntox=sum(msx)
ntoy=sum(msy)
if(is.na(as.numeric(msx["11"]))){msx["11"]=0}
if(is.na(as.numeric(msx["12"]))){msx["12"]=0}
if(is.na(as.numeric(msx["22"]))){msx["22"]=0}
if(is.na(as.numeric(msy["1"]))){msy["1"]=0}
if(is.na(as.numeric(msy["2"]))){msy["2"]=0}
nf11=as.numeric(msx["11"])
nf12=as.numeric(msx["12"])
nf22=as.numeric(msx["22"])
nf1=as.numeric(msy["1"])
nf2=as.numeric(msy["2"])
f11=nf11/ntox
f12=nf12/ntox
f22=nf22/ntox
f1=nf1/ntoy
f2=nf2/ntoy
acf11=f11*ntox
acf22=f22*ntox
acf12=f12*ntox
exf11=ntox*f1^2
exf22=ntox*f2^2
exf12=ntox*2*f1*f2
Ho=sum(nf11,nf22)/ntox # CORE EQUATION
He=nf12/ntox # CORE EQUATION
Ne=1/Ho # CORE EQUATION
PIC=1-f1^2-f2^2-2*f2^2*f1^2 # CORE EQUATION
XF=((abs(acf11-exf11)-0.5)^2/(exf11))+
((abs(acf12-exf12)-0.5)^2/(exf12))+
((abs(acf22-exf22)-0.5)^2/(exf22)) # Chi-square value
PV=1-pchisq(XF,1) # Chi-square P value
BC=c(f11,f12,f22,f1,f2,XF,PV,nf11,nf12,nf22,nf1,nf2,ntox,ntoy,Ho,He,Ne,PIC)
BC}
# Main function 1 !!!
Genetic_Chen=function(gebd){
address=getwd()
# Read and normalize file
x=read.csv(deparse(substitute(gebd)))
sites=ncol(x)-1
frenq=c()
x[x=="21"]="12"
x[x!="11"&x!="12"&x!="22"]="12"
# Multiple rule
for(i in 1:sites){
assign(paste("site",i,sep=""),as.character(x[,i+1]))
getn=paste("site",i,sep="")
frenq=c(frenq,genebf(get(getn)))}
Mfrenq=matrix(frenq,nrow=sites,ncol=18,byrow=T)
# Values extract
gefbm=data.frame(Sitenames=as.character(colnames(x)[-1]),
FrequenceOF11=Mfrenq[,1],NumberOF11=Mfrenq[,8],
FrequenceOF12=Mfrenq[,2],NumberOF12=Mfrenq[,9],
FrequenceOF22=Mfrenq[,3],NumberOF22=Mfrenq[,10],
FrequenceOF1=Mfrenq[,4],NumberOF1=Mfrenq[,11],
FrequenceOF2=Mfrenq[,5],NumberOF2=Mfrenq[,12],
NumberOFSample=Mfrenq[,13],
NumberOFGene=Mfrenq[,14],
X_Statistics=Mfrenq[,6],P_Value=Mfrenq[,7],
Homozygosity=Mfrenq[,15],Heterozygosity=Mfrenq[,16],Ne=Mfrenq[,17],PIC=Mfrenq[,18])
# Files save
if (file.exists("./Rgenetics")==TRUE){cat("阁下目标文件夹 Rgenetics 已存在\n")}else{
dir.create("./Rgenetics", recursive=TRUE)
cat("目标文件夹 Rgenetics 已为阁下创建\n")}
setwd("./Rgenetics")
NAME=paste("阁下遗传统计已计算完成",gsub(":","_",Sys.time()),".csv")
write.csv(gefbm,NAME,row.names=FALSE)
cat("阁下基础遗传数据分析已完成,文件保存在",as.character(getwd()),"目录下\n")
setwd(address)
# Web display format
Wgefbm=cbind(Sitenames=gefbm[,1],round(gefbm[,-1],3))
pp= formatter("span",
style = x ~ style(
font.weight = "bold",
color = ifelse(x > 0.05, "Green", ifelse(x < 0.05, "Red", "black"))))
nn=formatter("span",
style =~style(
color ="grey",font.weight = "bold"))
FQ=color_tile("MediumAquamarine","MediumAquamarine")
webtable=formattable(Wgefbm, align =c("l",rep("c",17)),list('P_Value' =pp,
'Sitenames' =nn,
'FrequenceOF11'=FQ,
'FrequenceOF12'=FQ,
'FrequenceOF22'=FQ,
'FrequenceOF1'=FQ,
'FrequenceOF2'=FQ))
# Jugement of linkage
if(sites>1){
LDDM=matrix(0,nrow=sites,ncol=sites)
LDRM=matrix(0,nrow=sites,ncol=sites)
for(a in 2:ncol(x)){
for(b in 2:ncol(x)){
LDDM[a-1,b-1]=LDm(x[,c(1,a,b)])[1]
LDRM[a-1,b-1]=LDm(x[,c(1,a,b)])[2]
if(a==b){
LDDM[a-1,b-1]=1
LDRM[a-1,b-1]=1}}}
colnames(LDDM)=as.character(colnames(x)[-1])
rownames(LDDM)=as.character(colnames(x)[-1])
colnames(LDRM)=as.character(colnames(x)[-1])
rownames(LDRM)=as.character(colnames(x)[-1])
mapLDDM=LMAP(LDDM)
mapLDRM=LMAP(LDRM,LD="R2")
if (file.exists("./Rgenetics")==TRUE){cat("阁下目标文件夹 Rgenetics 已存在\n")}else{
dir.create("./Rgenetics", recursive=TRUE)
cat("目标文件夹 Rgenetics 已为阁下创建\n")}
setwd("./Rgenetics")
gNAMED=paste("阁下遗传连锁图D绘制已完成",gsub(":","_",Sys.time()),".png")
gNAMER=paste("阁下遗传连锁图R绘制已完成",gsub(":","_",Sys.time()),".png")
ggsave(filename=gNAMED,mapLDDM,dpi=600,width=8,height=8)
ggsave(filename=gNAMER,mapLDRM,dpi=600,width=8,height=8)
cat("阁下连锁遗传数据分析已完成,文件保存在",as.character(getwd()),"目录下\n")
setwd(address)}
webtable}
# Search Haplotype 1 Generate group seeds
rephap=function(x){
t=x
for(i in 1:length(x)){
for(j in 1:2){
x=c(x,paste(as.character(x[i]),as.character(j),sep=""))}}
s=setdiff(x,t)
s}
# Search Haplotype 2 Generate groups seeds*
seedhap=function(n){
po=c()
if(n==1){
po=c("1","2")}else{
po=c("1","2")
for (a in 1:(n-1)){
po=rephap(po)}}
po}
# Search Haplotype 3 I am an genius
pxy=function(x,y){
if(x=="11"&y=="1"){pr=1}
if(x=="11"&y=="2"){pr=0}
if(x=="22"&y=="1"){pr=0}
if(x=="22"&y=="2"){pr=1}
if(x=="12"&y=="1"){pr=0.5}
if(x=="12"&y=="2"){pr=0.5}
pr}
# Main function 2
HapChen=function(gebd){
address=getwd()
# Read and normalize file
x=read.csv(deparse(substitute(gebd)))
LD=c()
if(("-" %in% as.character(x[,2]))|("-" %in% as.character(x[,3]))){
for(i in 2:3){LD=c(LD,which(as.character(x[,i])=="-"))}
x=x[-unique(LD),]}
x[x=="21"]="12"
x[x!="11"&x!="12"&x!="22"]="12"
sites=ncol(x)-1
# Generate groups seeds
nhap=length(seedhap(sites))
pnxxs=c()
# Calculate Haplotypes
for(j in 1:nhap){
pnxx=c()
for(a in 1:nrow(x)){
pn=c()
px=""
py=""
for(i in 1:sites){
px=substring(paste(as.character(x[a,2:ncol(x)]),collapse = ""),(i*2-1),i*2)
py=substring(as.character(seedhap(sites)[j]),i,i)
pn=c(pn,pxy(px,py))}
pnxx=c(pnxx,prod(pn))} # I am an genius
cat("统计单倍型",seedhap(sites)[j],"完成\n")
pnxxs=c(pnxxs,(sum(pnxx)/nrow(x)))}
# Haplotype Save
mhapy=data.frame(Haplotype=seedhap(sites),FreHaplotype=round(pnxxs,3))
if (file.exists("./Rgenetics")==TRUE){cat("阁下目标文件夹 Rgenetics 已存在\n")}else{
dir.create("./Rgenetics", recursive=TRUE)
cat("目标文件夹 Rgenetics 已为阁下创建\n")}
setwd("./Rgenetics")
NAME=paste("阁下单倍型-遗传统计已计算完成",gsub(":","_",Sys.time()),".csv")
write.csv(mhapy,NAME,row.names=FALSE)
cat("阁下遗传数据分析已完成,文件保存在",as.character(getwd()),"目录下\n")
setwd(address)
# Haplotype Show
pp= formatter("span",
style = ~ style(
font.weight = "bold",
color ="Cornislk"))
HYOtable=formattable(mhapy, align =c("l","c"),
list('FreHaplotype' =pp,'Haplotype' =pp))
HYOtable}
|
310e967c4b13b0ae71c4ffd5ec790a99618909a5
|
ba47c8138302b941da39dac09cc5c20ab8d401cf
|
/R/zz-flow-code.R
|
f21ac0f3e1b98c67381d463a65b60d6888ff1ff5
|
[
"MIT"
] |
permissive
|
flow-r/flowr
|
b5b542b44d175af84840f88fed54d48db474c4fb
|
dabf9d0df4d580e45b758b4dd7f2346e76a63c3d
|
refs/heads/master
| 2023-03-22T23:14:02.563866
| 2021-03-10T15:43:53
| 2021-03-10T15:43:53
| 19,354,942
| 11
| 0
|
NOASSERTION
| 2021-02-28T04:35:36
| 2014-05-01T19:20:29
|
R
|
UTF-8
|
R
| false
| false
| 1,303
|
r
|
zz-flow-code.R
|
# nocov start
## some function to supplement the shiny GUI
if(FALSE){
qobj <- queue(platform = "lsf", queue = "normal")
job1 <- job(name = "myjob1", q_obj = qobj)
job2 <- job(name = "myjob2", q_obj = qobj)
job3 <- job(name = "myjob3", q_obj = qobj, previous_job = c("myjob2", "myjob1"))
fobj <- flow(name = "myflow", jobs = list(job1, job2, job3), desc="description")
plot_flow(fobj)
x <- fobj
}
### generate code from dat
#' @title generate_flow_code
#' @description generate_flow_code
#' @param x flow object
#' @param ... currently ignored
#' @keywords internal
#' @examples
#' \dontrun{
#' generate_flow_code(x = x)
#' }
generate_flow_code <- function(x, ...){
fobj <- x
## this would take in a flowmat and produce a code to generate it
jobnames <- sapply(fobj@jobs, slot, "name")
code_jobs <- sapply(jobnames, function(j){
prev_jobs=fobj@jobs[[j]]@previous_job;prev_jobs <- ifelse(length(prev_jobs) > 1, prev_jobs, "none")
cpu = fobj@jobs[[j]]@cpu;cmds=fobj@jobs[[j]]@cmds
code_cmd <- sprintf("cmd_%s <- '%s'", j, cmds)
code_job <- sprintf("jobj_%s <- job(name = '%s', q_obj = qobj, previous_job = '%s', cpu = '%s', cmd=cmd_%s)",
j, j, prev_jobs, cpu, j)
return(c(code_cmd, code_job))
})
return(code_jobs)
}
# nocov end
|
ec3c5dd29a2964099bfb53564694f24308ef341d
|
305b202e7360ccd04489bbc00e2b5ea2d1ca6f8f
|
/Concrete_Discretize.R
|
efcdffc7ec9a3ed0b38edda2df5e718dc06dbf63
|
[] |
no_license
|
rajivsam/Miscellaneous_R_Utility_Code
|
5749370be7371b8e317e9a1e7025e342bec280a4
|
f277b7fec9cb7b06547ba20d0fdbeae5f7bcc2ea
|
refs/heads/master
| 2021-01-10T22:05:40.986567
| 2015-07-20T09:43:17
| 2015-07-20T09:43:17
| 39,375,565
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,227
|
r
|
Concrete_Discretize.R
|
library(arules)
fp = "/home/admin123/homals_analysis/Concrete_Data.csv"
col.names = c("Cement_Comp_1", "Blast_Furnace_Slag_Comp2",
"Fly_Ash_Comp_3", "Water_Comp_4",
"Superplasticizer_Comp_5",
"Coarse_Aggregate_Comp_6",
"Fine_Aggregate_Comp_7",
"Age (day)",
"Concrete_CS")
cdf = read.csv(fp)
names(cdf) = col.names
# We don't want the "Age" attribute because it is a count
cdf = cdf[,-8]
col.names = col.names[-8]
d.col.names = vector()
for (i in seq(1:8)) {
prefix = col.names[i]
suffix = seq(1:5)
the.labels = paste(prefix, suffix, sep="#")
new.name.for.col = paste(col.names[i],"D", sep="_")
the.col.vals = cdf[,i]
cdf[,new.name.for.col] = discretize(the.col.vals,method="interval", categories = 5,
labels = the.labels)
d.col.names <<- c(d.col.names, new.name.for.col)
}
cdf = cdf[d.col.names]
fp2 = "/home/admin123/homals_analysis/Concrete_Data_Discretized.csv"
write.table(cdf, fp2, sep = ",", col.names = TRUE, row.names = FALSE)
fp3 = "/home/admin123/homals_analysis/Concrete_Data_Discretized_RN.csv"
write.table(d.col.names, fp3, sep = ",", col.names = TRUE, row.names = FALSE)
|
9410b263b256f3b598ea8743be0ba4255b968e92
|
bb10ea2c03c9cd1a0d4458772ca2440f488b1008
|
/R/elastic-client.R
|
63ea7f42da31097dcccf8eb6a2cf45b8ad86b9b0
|
[
"GPL-3.0-only"
] |
permissive
|
Henning-Schulz/forecastic
|
1b375c95c77f3ceb88db049213b0762b5997c2c1
|
3f7517749b2701f7670e895568a2d85a84ff2a4f
|
refs/heads/master
| 2021-08-08T03:07:22.922224
| 2020-07-16T15:27:15
| 2020-07-16T15:27:19
| 202,161,301
| 0
| 0
|
Apache-2.0
| 2019-08-13T14:25:12
| 2019-08-13T14:25:12
| null |
UTF-8
|
R
| false
| false
| 3,476
|
r
|
elastic-client.R
|
# elastic-client.R
#' @author Henning Schulz
library(elasticsearchr)
library(tidyverse)
library(stringr)
#' Reads the intensities from the elasticsearch.
#' The result will be formatted as tibble with the following columns:
#' \code{timestamp} The timestamp in milliseconds
#' \code{intensity.<group>} The workload intensity (one column per group)
#' \code{<context_variable>} The values of a context variable (one column per variable / per value in the string case)
#' The tibble holds the data as they are in the elasticsearch, i.e., can contain \code{NA} and missing values.
#'
#' @param app_id The app-id to be used in the query.
#' @param tailoring the tailoring to be used in the query.
#'
#' @example read_intensities("my_app", "all")
read_intensities <- function(app_id, tailoring, perspective = NULL) {
if (is.null(perspective)) {
filtering_query = query('{ "match_all": {} }')
} else {
filtering_query = query(sprintf('{ "range": { "timestamp": { "lte": %s } } }', perspective))
}
raw_data <- elastic(cluster_url = str_c("http://", opt$elastic, ":9200"), index = str_c(app_id, ".", tailoring, ".intensity")) %search%
filtering_query %>%
as_tibble()
intensities <- raw_data %>%
select(timestamp, starts_with("intensity")) %>%
arrange(timestamp) %>%
left_join(transform_context(raw_data), by = "timestamp") %>%
arrange(timestamp)
}
#' When used with the elastic client, returns the list of groups.
#'
#' @param app_id The app-id to be used in the query.
#' @param tailoring the tailoring to be used in the query.
#'
#' @example elastic(cluster_url = "localhost:9200", index = "my_app.all.intensity") %info% list_intensity_groups("my_app", "all")
list_intensity_groups <- function(app_id, tailoring) {
endpoint <- str_c("/", app_id, ".", tailoring, ".intensity/_mapping")
process_response <- function(response) {
index_mapping <- httr::content(response, as = "parsed")
names(index_mapping[[1]]$mappings$properties$intensity$properties)
}
structure(list("endpoint" = endpoint, "process_response" = process_response),
class = c("elastic_info", "elastic_api", "elastic"))
}
#' Gets the latest timestamp stored in the elasticsearch for the passed app-id and tailoring.
#'
#' @param app_id The app-id to be used in the query.
#' @param tailoring the tailoring to be used in the query.
#'
#' @example get_latest_timestamp("my_app", "all")
get_latest_timestamp <- function(app_id, tailoring) {
client <- elastic(cluster_url = str_c("http://", opt$elastic, ":9200"), index = str_c(app_id, ".", tailoring, ".intensity"))
intensity_fields <- client %info%
list_intensity_groups(app_id, tailoring) %>%
str_c("\"intensity.", ., "\"") %>%
paste(collapse = ", ")
client %search%
(
query(sprintf('{
"bool": {
"filter": [
{ "range": { "timestamp": { "gte": 1 } } },
{
"script": {
"script": {
"source": "for (field in params.fields) { if (doc[field].size() > 0) { return true } } return false",
"params": { "fields": [ %s ] },
"lang": "painless"
}
}
}
]
}
}', intensity_fields), size = 0) +
aggs('{
"max_timestamp" : { "max" : { "field" : "timestamp" } }
}')
) %>%
.$value
}
|
07086415e3f85344d6e5e582c0be6527d681b8af
|
41b079970f142ed6439a07b896719718b1fb4fff
|
/R/Strategy.R
|
960995442b6034f0c24e0c39f24863fa2317c721
|
[] |
no_license
|
quantrocket/strategery
|
5e015e75d874c6ab16e767861e394350bd825055
|
a7b6aee04f3f95b71e44c2c9f3c9a76390c21e52
|
refs/heads/master
| 2021-01-18T07:36:31.291056
| 2014-06-17T21:54:30
| 2014-06-17T21:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 401
|
r
|
Strategy.R
|
Strategy <- function(){}
#' Start strategy definition
#'
#' @export
newStrategy <- function (name)
{
# s <- list(name=name)
# class(s) <- "strategy"
# assign(name, s , envir=.GlobalEnv)
}
#' Save (persist) strategy definition
#'
#' @export
saveStrategy <- function(
# envir=strategy$name
)
{
# s <- get("strategy", envir=.GlobalEnv)
#
# assign(strategy$name, s , envir=.GlobalEnv)
}
|
7607c5bebfa73a1086f8cbb5a2f05618733389e0
|
4c7f27e57df28dcb83a714c8395cc019e694b03a
|
/Active.R
|
d0a59feed82139a185d85e20a4ab3e5a64cdf827
|
[] |
no_license
|
TylerShirley/Active_monitor
|
93abf8c5175c0c4a4f4268ef78d2ad2854f2dc4a
|
49b5ae28d4f782c0067e904dd384deaf74b2294d
|
refs/heads/master
| 2022-11-29T09:50:23.072544
| 2020-08-05T21:50:26
| 2020-08-05T21:50:26
| 283,050,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,241
|
r
|
Active.R
|
activity_unclean <- read.csv("activity.csv")
#Load in ggplot
library(ggplot2)
library(lubridate)
library(plyr)
library(dplyr)
#remove the NA values & make date data type
activity_unclean$date <- as.Date(as.character(activity_unclean$date))
activity <- na.omit(activity_unclean)
activity$day <- weekdays(activity$date)
step_day <- aggregate(steps ~ date, activity, FUN = sum)
#histogram of daily steps taken
hist(step_day$steps,
xlab = "average steps",
main = "Histogram of Steps",
col = "red")
#mean and median steps per day
mean_median_steps <- summary(step_day$steps)
#time series plot
step_int <- aggregate(steps ~ interval, activity, FUN = mean)
tim_ser <- ggplot(step_int, aes(x = interval, y = steps)) +
geom_line() +
labs(title = "Average Steps Per Interval", x = "Interval", y = "Average Steps") +
theme(plot.title = element_text(hjust = 0.5))
#compute maximum steps and interval with max steps
max_step <- max(step_int$steps)
biggest_interval <- step_int[which.max(step_int$steps), 1]
#find number of NA in activity dataset
sum_active_na <- sum(is.na(activity_unclean))
#Replace NA with average number of steps each date
df_active_na <- activity_unclean
active_na <- is.na(df_active_na$steps)
averages <- tapply(df_active_na$steps, df_active_na$interval, mean, na.rm = TRUE)
na.omit(averages)
df_active_na <- df_active_na %>%
mutate(steps = replace(steps, active_na, averages))
new_df_active <- tapply(df_active_na$steps, df_active_na$date,sum, na.rm = TRUE)
hist(new_df_active)
Mean_new = mean(new_df_active)
Median_new = median(new_df_active)
#find breakdown of Weekday Vs Weekend steps
df_active_na$day <- weekdays(df_active_na$date)
df_active_na$dow <- ifelse(df_active_na$day %in% c("Saturday", "Sunday"),
"Weekend",
"Weekday")
Weekend_int <- subset(df_active_na, df_active_na$dow == "Weekend")
Weekend_int <- aggregate(steps ~ interval, Weekend_int, FUN = mean)
Weekday_int<- subset(df_active_na, df_active_na$dow == "Weekday")
Weekday_int <- aggregate(steps ~ interval, Weekday_int, FUN = mean)
par(mfrow = c(2,1))
plot(Weekday_int$interval, Weekday_int$steps, "l")
plot(Weekend_int$interval, Weekend_int$steps, "l")
|
1b6ffedd61e3b0a861cad1bbd2c6b775f9bb890b
|
4c4dc390167f4a6e77f2d0c1f53184efd632fab5
|
/R/plot_cal.R
|
c9de6d4b4b68b75281a6cfdd44749123cd933641
|
[] |
no_license
|
ck2136/PMMSKNN
|
bca3c01f6443d535a4e498270c2e56d7e22fb55c
|
f41a39493ea881b134e985e5bd7c26369c190726
|
refs/heads/master
| 2023-07-06T22:25:31.287256
| 2021-08-13T08:15:50
| 2021-08-13T08:15:50
| 186,530,969
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,601
|
r
|
plot_cal.R
|
#' Plot for statistical validity and calibration
#'
#' Creates two types of plots.
#' \enumerate{
#' \item \emph{Model performance plots} showing average bias,
#' coverage, 50 percent PI width (mean IQR diefference),
#' and a combined score of these statistics, at various
#' choices for the number of matches.
#' \item \emph{Calibration plots} showing the distribution of the
#' observed outcomes at several predicted values. Separate plots
#' are made for the training and test data.}
#'
#' @param plotobj - An object produced by \code{\link{loocv_function}}
#' @param test_proc - Preprocessed object from \code{\link{preproc}}
#' @param outcome - Name of the outcomes variable (type=string)
#' @param filt Logical (\code{TRUE/FALSE}) indicating whether or not to
#' filter the data in terms of performance values. This would be useful
#' if the user would want to exclude certain values in presenting the data
#' @param pred_sum - String value representing the summary used to depict
#' the predictions within the calibration. Usually \code{pred_sum = 'mean'}
#' or \code{pred_sum = 'median'} would be a good choice to depict the
#' summary statistic of predicted values across the deciles of observed values
#' @param obs_dist - String value representing the summary used to depict
#' the observed value within the calibration plot.
#' Usually \code{pred_sum = 'median'} woud be a good choice to depict the
#' deciles of observed values in the calibration plot.
#' @param loocv Logical indicating the type of plot:
#' Model performance plot (if \code{loocv = TRUE}, default),
#' or or calibration plot (if \code{loocv = FALSE}).
#' @param filter_exp - String. For filtering possible values of bias, precision, and coverage values that are out of range. (e.g. \code{"bias < 1.5"})
#' @param plot_cal_zscore - Logical (\code{TRUE/FALSE}) indicating whether to plot zscore calibration
#' @param wtotplot - Logical (\code{TRUE/FALSE}) indicating wehter to include a weighted total score plot
#' that indicates the optimal n match based on equally weighting bias, coverage and precision
#' @param plotvals - Logical (\code{TRUE/FALSE}) indicating whether to plot bias, coverage, and precision values onto the calibration plot
#' @param iqrfull - Dataframe containing gamlss predictions which triggers the plotting of reference model prediction on the same plot as that of the patient like me predictions.
#' @param bs Logical (\code{TRUE/FALSE}) indicating whether to plot brokenstick object.
#' @param \dots - For specifying plotting options.
#'
#' @return An object of class \code{ggplot} that outputs a calibration plot of observed vs. deciles of predicted values.
#'
#' @export
plot_cal <- function(plotobj,
test_proc=test_proc,
outcome = "tug",
filt=FALSE,
pred_sum="mean",
obs_dist="median",
#plot_by=seq(10,150,5),
loocv=TRUE,
filter_exp = NULL,
plot_cal_zscore=FALSE,
wtotplot=FALSE,
plotvals=FALSE,
iqrfull=NULL,
bs=FALSE,
...) {
# - - - - - - - - - - - - - - - - - - - - - - #
# Instantiate all plot objects for viewing
# - - - - - - - - - - - - - - - - - - - - - - #
#-- NON CALIBRATION plots only
if(loocv){
# - - - - - - - - - - - - - - - - - - - - - - #
# RMSE/Coverage Plot function for test and train
# - - - - - - - - - - - - - - - - - - - - - - #
# For brokenstick object it's simple data manipulation of loocv_score
if(bs){
tmp1 <- plotobj$loocv_score %>%
tidyr::pivot_longer(.data$rmse:.data$prec, names_to = "measure") %>% rename(nearest_n = 1)
perfdf <- plotobj$loocv_score
} else {
nearest_n =as.numeric(regmatches(names(plotobj$loocv_res), regexpr("\\d+",names(plotobj$loocv_res))))
perfdf <- loocv_perf(
plotobj$loocv_res,
outcome=outcome,
nearest_n=nearest_n,
perf_round_by=4
)
tmp1 <- perfdf %>%
tidyr::pivot_longer(.data$rmse:.data$prec, names_to = "measure") %>% rename(nearest_n = 1)
}
# tmp1 <-listtodf(plotobj$loocv_res)
train_bias <- ggplot(tmp1 %>%
filter(
#abs(value) < 50,
#measure == 'bias' | measure == 'rmse' | measure == 'zscore')
#measure == 'bias' | measure == 'zscore')
.data$measure == 'rmse')
) +
xlab("Matches (N)") + ylab("RMSE") +
geom_point(aes(x=.data$nearest_n, y=.data$value, colour=.data$measure)) +
#geom_smooth(aes(x=.data$nearest_n, y=.data$value, colour = .data$measure),
#method="gam",formula = y ~ s(x, bs="cs", k=splinek ), se=FALSE) +
theme_bw() + theme(legend.position="none", aspect.ratio = 1) +
geom_hline(yintercept = 0)
#annotate("text", x = median(tmp1$nearest_n), y = 0, vjust = -1, label = "0 Bias")
# Coverage (Excluding extreme measures)
train_cov <- ggplot(tmp1 %>%
filter(
#nearest_n > 10,
.data$measure == 'cov')
#measure == 'iqrcoverage' | measure == 'coverage95c' )
) +
geom_point(aes(x=.data$nearest_n, y=.data$value), colour="blue") +
#geom_smooth(aes(x=.data$nearest_n, y=.data$value), colour = "blue",
#method="gam",formula = y ~ s(x, bs="cs", k=splinek ), se=FALSE) +
xlab("Matches (N)") + ylab("Coverage (50%)") +
ylim(min(tmp1 %>%
filter(.data$measure == 'cov') %>%
dplyr::select(.data$value) %>%
unlist %>%
as.vector) * 0.95 ,
max(tmp1 %>%
filter(.data$measure == 'cov') %>%
dplyr::select(.data$value) %>%
unlist %>%
as.vector) * 1.05)+
#ylim(0.3,1)+
#scale_colour_manual(labels=c("95% IQR Coverage","50% IQR Coverage"), values=c("blue","red")) +
scale_colour_manual(labels=c("50% IQR difference"), values=c("blue")) +
theme_bw() + theme(legend.position="none", aspect.ratio = 1) +
geom_hline(yintercept = 0.50)
#annotate("text", x = median(tmp1$nearest_n),y = .50, vjust = -0.1, label = "Coverage")
# - - - - - - - - - - - - - - - - - - - - - - #
# Precision Plot: Mean IQR dif by Nearest N
# - - - - - - - - - - - - - - - - - - - - - - #
pppm <- ggplot(tmp1 %>%
filter(
#nearest_n > 10,
.data$measure == 'prec')
#measure == 'iqrcoverage' | measure == 'coverage95c' )
) +
geom_point(aes(x=.data$nearest_n, y=.data$value), colour="green") +
#geom_smooth(aes(x=.data$nearest_n, y=.data$meaniqrdif), colour="green",
#method="gam",formula = y ~ s(x, bs="cs", k=splinek ), se=FALSE) +
xlab("Matches (N)") + ylab("Mean IQR difference") +
ylim(min(tmp1 %>%
filter(.data$measure == 'prec') %>%
dplyr::select(.data$value) %>%
unlist %>%
as.vector) * 0.95 ,
max(tmp1 %>%
filter(.data$measure == 'prec') %>%
dplyr::select(.data$value) %>%
unlist %>%
as.vector) * 1.05)+
#ylim(0.3,1)+
#scale_colour_manual(labels=c("95% IQR Coverage","50% IQR Coverage"), values=c("blue","red")) +
#scale_colour_manual(labels=c("50% IQR Coverage"), values=c("blue")) +
theme_bw() + theme(legend.position="none", aspect.ratio = 1) +
geom_hline(yintercept = max(perfdf$prec))
#annotate("text", x=median(ppdf_means$nearest_n), y = max(ppdf_means %>% dplyr::select(.data$meaniqrdif) %>% unlist %>% as.vector), vjust = -1, label = "Max IQR Difference")
# - - - - - - - - - - - - - - - - - - - - - - #
# Return plot objects
# - - - - - - - - - - - - - - - - - - - - - - #
if(wtotplot){
# - - - - - - - - - - - - - - - - - - - - - - #
# Weighted Total Score Plot included
# - - - - - - - - - - - - - - - - - - - - - - #
wtspdf <- loocvperf(plotobj$loocv_res, test_proc$train_o)
wtsp <- ggplot(wtspdf) +
geom_point(aes(x=.data$nearest_n, y=.data$totscore)) +
#geom_smooth(aes(x=.data$nearest_n, y=.data$totscore), method="gam",
#formula = y ~ s(x, bs="cs", k=splinek ), se=FALSE) +
xlab("Matches (N)") + ylab("Weighted Total Score") +
theme_bw() + theme(legend.position="none", aspect.ratio = 1)
return(plot_grid(train_bias, train_cov, pppm, wtsp, labels="AUTO", ncol=2))
} else {
# - - - - - - - - - - - - - - - - - - - - - - #
# Weighted Total Score Plot Not included
# - - - - - - - - - - - - - - - - - - - - - - #
return(plot_grid(train_bias, train_cov, pppm, labels="AUTO", ncol=3))
}
} else {
print("creating training calibration plot")
cptrainlist = plot_func(plotobj = plotobj,
test_proc = test_proc,
train=TRUE,
filt=filt,
iqrfull=iqrfull,
pred_sum=pred_sum,
obs_dist=obs_dist,
outcome=outcome
)
print("creating testing calibration plot")
cptestlist = plot_func(plotobj = plotobj,
train=FALSE,
test_proc = test_proc,
filt=filt,
iqrfull=iqrfull,
pred_sum=pred_sum,
obs_dist=obs_dist,
outcome=outcome
)
if(plot_cal_zscore==FALSE){
minc <- floor(min(cptrainlist[[2]], cptestlist[[2]], na.rm=TRUE))
maxc <- ceiling(max(cptrainlist[[3]], cptestlist[[3]], na.rm=TRUE))
# PLOT BIAS, PRECISION, COVERAGE
if(plotvals){
#labels
train_zs_lab <- paste0("zscore == ", round(cptrainlist$tp$zscore,3))
train_cov_lab <- paste0("coverage == ", round(cptrainlist$tp$coverage,3))
train_prec_lab <- paste0("precision == ", round(cptrainlist$tp$precision,3))
test_zs_lab <- paste0("zscore == ", round(cptestlist$tp$zscore, 3))
test_cov_lab <- paste0("coverage == ", round(cptestlist$tp$coverage, 3))
test_prec_lab <- paste0("precision == ", round(cptestlist$tp$precision,3))
# Calibration plots
cptrain <- cptrainlist[[1]] + xlim(minc, maxc) + ylim(minc,maxc) +
#geom_text(aes(label=paste0(cptrainlist[[4]]), y=minc+(maxc-minc)/10+1, x=(minc+maxc)*0.6), parse= TRUE, color="red") +
geom_text(aes(label=paste0(train_zs_lab), y=minc+(maxc-minc)/10+(maxc-minc)/10, x=(minc+maxc)*0.6), parse= TRUE, color="red") +
geom_text(aes(label=paste0(train_cov_lab), y=minc+(maxc-minc)/10, x=(minc+maxc)*0.6), parse= TRUE, color="blue") +
geom_text(aes(label=paste0(train_prec_lab), y=minc+(maxc-minc)/10-(maxc-minc)/10, x=(minc+maxc)*0.6), parse= TRUE, color="green")
cptest <- cptestlist[[1]] + xlim(minc, maxc) + ylim(minc,maxc) +
#geom_text(aes(label=paste0(cptestlist[[4]]), y=minc+(maxc-minc)/10+1, x=(minc+maxc)*0.6), parse= TRUE, color="red")+
geom_text(aes(label=paste0(test_zs_lab), y=minc+(maxc-minc)/10+(maxc-minc)/10, x=(minc+maxc)*0.6), parse= TRUE, color="red") +
geom_text(aes(label=paste0(test_cov_lab), y=minc+(maxc-minc)/10, x=(minc+maxc)*0.6), parse= TRUE, color="blue") +
geom_text(aes(label=paste0(test_prec_lab), y=minc+(maxc-minc)/10-(maxc-minc)/10, x=(minc+maxc)*0.6), parse= TRUE, color="green")
return(plot_grid(cptrain + theme(aspect.ratio = 1), cptest + theme(aspect.ratio = 1), labels = "AUTO", vjust = 3))
} else {
minc <- floor(min(cptrainlist[[2]], cptestlist[[2]], na.rm=TRUE))
maxc <- ceiling(max(cptrainlist[[3]], cptestlist[[3]], na.rm=TRUE))
cptrain <- cptrainlist[[1]] +
xlim(minc, maxc) + ylim(minc,maxc) +
theme(aspect.ratio = 1)
#scale_colour_manual(name="", values=c("REF"="red", "PLM"="blue"),
#guide = guide_legend(fill = NULL, colour=NULL))
cptest <- cptestlist[[1]] +
xlim(minc, maxc) + ylim(minc,maxc) +
theme(aspect.ratio = 1)
#scale_colour_manual(name="", values=c("REF"="red", "PLM"="blue"),
#guide = guide_legend(fill = NULL, colour=NULL))
cpfin <- plot_grid(cptrain + theme(legend.position = "none"),
cptest + theme(legend.position = "none"),
align = "vh"
)
#legend <- get_legend(cptrain)
return(plot_grid(cpfin))
#return(plot_grid(cpfin, legend, rel_widths=c(3,0.3)))
#if(!is.null(iqrfull)){
#cptrainref <- cptrainlist[[9]] + xlim(minc, maxc) + ylim(minc,maxc)
#cptestref <- cptestlist[[9]] + xlim(minc, maxc) + ylim(minc,maxc)
#return(plot_grid(cptrain + theme(aspect.ratio = 1),
#cptest + theme(aspect.ratio = 1),
#cptrainref + theme(aspect.ratio = 1),
#cptestref + theme(aspect.ratio = 1),
#labels = "AUTO", label_y = 3, ncol=2))
#} else {
#return(plot_grid(cptrain + theme(aspect.ratio = 1), cptest + theme(aspect.ratio = 1), labels = "AUTO", label_y = 3, ncol=2))
#}
}
} else {
minc <- floor(min(cptrainlist[[6]], cptestlist[[6]], na.rm=TRUE))
maxc <- ceiling(max(cptrainlist[[7]], cptestlist[[7]], na.rm=TRUE))
# Calibration plots
cptrain <- cptrainlist[[5]] +
xlim(minc, maxc) + ylim(minc,maxc) +
theme(aspect.ratio = 1)
#scale_colour_manual(name="", values=c("REF"="red", "PLM"="blue"),
#guide = guide_legend(fill = NULL, colour=NULL))
cptest <- cptestlist[[5]] +
xlim(minc, maxc) + ylim(minc,maxc) +
theme(aspect.ratio =1)
#scale_colour_manual(name="", values=c("REF"="red", "PLM"="blue"),
#guide = guide_legend(fill = NULL, colour=NULL))
#legend <- get_legend(cptrain)
cpfin <- plot_grid(cptrain + theme(legend.position = "none"),
cptest + theme(legend.position = "none"),
align="vh"
)
return(plot_grid(cpfin))
#return(plot_grid(cpfin, legend, rel_widths=c(3,0.3)))
}
}
#return(plot_grid(cptrain, cptest, train_bias, train_cov, ppp, pppm, labels = "AUTO"))
}
|
d53d98fbab23e593918150f5aa8c406b3a80d5d6
|
f9321d868b5249523c7ea88762dadd11f795952d
|
/R/wiggleplotr-package.r
|
8f7d1259fb963c41aa7e50b1ccb3f9add2dc43cb
|
[
"Apache-2.0"
] |
permissive
|
kauralasoo/wiggleplotr
|
34a630520714a1a19f50e65b1a17ddb05747345b
|
bcb4decc5d05b0296c74cb85c880d4122fffff65
|
refs/heads/master
| 2022-07-02T06:00:42.288050
| 2022-06-27T19:54:46
| 2022-06-27T19:54:46
| 26,833,955
| 32
| 19
|
Apache-2.0
| 2022-06-27T20:07:57
| 2014-11-18T22:52:41
|
R
|
UTF-8
|
R
| false
| false
| 844
|
r
|
wiggleplotr-package.r
|
#' wiggleplotr
#'
#' wiggleplotr package provides tools to visualise transcript annotations (\code{\link[wiggleplotr]{plotTranscripts}}) and plot
#' sequencing read coverage over annotated transcripts (\code{\link[wiggleplotr]{plotCoverage}}).
#'
#' You can also use covenient wrapper functions
#' (\code{\link[wiggleplotr]{plotTranscriptsFromEnsembldb}}), (\code{\link[wiggleplotr]{plotCoverageFromEnsembldb}}),
#' (\code{\link[wiggleplotr]{plotTranscriptsFromUCSC}}) and (\code{\link[wiggleplotr]{plotCoverageFromUCSC}}).
#'
#' To learn more about wiggleplotr, start with the vignette:
#' \code{browseVignettes(package = "wiggleplotr")}
#'
#' @name wiggleplotr
#' @docType package
#' @import ggplot2
#' @importFrom dplyr "%>%"
#' @importFrom dplyr "row_number"
utils::globalVariables(c("strand","gene_name","transcript_id", "tx_id"))
|
b438956bcd69b69e172aad89d915bbe2adef1686
|
ebe2d8990d3073a610e0dfbd26bfcb55e8b87cc2
|
/tabs/sobre.R
|
c66c7ffc8b70b875c61ccfeadf24ce78bc97e8e7
|
[] |
no_license
|
voronoys/voronoys-app
|
c7732c475199d82712aeab2b180112e27f5a12cf
|
cc6ccbe58e2732156e7358c3861e746803b1873c
|
refs/heads/master
| 2021-05-05T07:30:20.138526
| 2018-01-24T19:48:56
| 2018-01-24T19:48:56
| 118,813,762
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 532
|
r
|
sobre.R
|
sobre <- tabPanel(title = "Sobre", value = "sobre", br(),
includeHTML(rmarkdown::render('descricoes/augusto.Rmd')), br(),
includeHTML(rmarkdown::render('descricoes/douglas.Rmd')), br(),
includeHTML(rmarkdown::render('descricoes/felipe.Rmd')), br(),
includeHTML(rmarkdown::render('descricoes/gordoy.Rmd')), br(),
includeHTML(rmarkdown::render('descricoes/luis.Rmd')))
|
a5f75a58af3a5f63c836630f6c4a1022a47719d1
|
2b7bb0a817d293a007c1597b57ad9a083c4c614a
|
/R/calcTradeVolume.R
|
620d899dfdc021064a322d719c10abfee0ed07c7
|
[] |
no_license
|
helenristov/aCompiler
|
777585a77ada30fbbb750339fd28dfe439d0cf1e
|
cc0a0146c7dd20c17829190c9eac3e65ad71d940
|
refs/heads/master
| 2021-01-23T07:33:51.970028
| 2018-11-13T03:22:08
| 2018-11-13T03:22:08
| 102,508,631
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,941
|
r
|
calcTradeVolume.R
|
#'
#' Creates a time-series of the traded volumes for a given time period.
#'
#'@param data MDR or TRS data.
#'@param contract The contract that you are pulling data for.
#'@param lookBackPeriod A list containing the lookback units and size for a rolling volume calculation specified in secs,hours,day,weeks,months
#'
#'@author Helen Ristov
#'
#'@export
#'
#'@import data.table
#'
calcTradeVolume <- function(data, contract, lookBackPeriod = 1) {
# normalize to one second bars
data <- align.time(data, 1)
## identify bid and ask volume
data$BidVolume <- ifelse(!is.na(data$TradedVolume) & data$TradedPrice == data$BestBid, data$TradedVolume, 0)
data$AskVolume <- ifelse(!is.na(data$TradedVolume) & data$TradedPrice == data$BestAsk, data$TradedVolume, 0)
combined <- data.table(index = as.POSIXct(index(data)),
TradedVolume = as.numeric(data[,'TradedVolume']),
BidVolume = as.numeric(data[,"BidVolume"]),
AskVolume = as.numeric(data[,"AskVolume"]))
combined <- combined[, list(volume = sum(TradedVolume, na.rm = TRUE), bidvolume = sum(BidVolume), askvolume = sum(AskVolume)), by = "index"]
SquareTime <- seq(combined$index[1], combined$index[nrow(combined)], 1)
final <- merge(as.xts(combined), SquareTime)
final$volume[which(is.na(final$volume))] <- 0
final$bidvolume[which(is.na(final$bidvolume))] <- 0
final$askvolume[which(is.na(final$askvolume))] <- 0
# determine rolling volumes over lookback period
if(length(lookBackPeriod) != 1){
ep <- endpoints(final, on = lookBackPeriod$units, k = lookBackPeriod$size)
volume <- period.apply(final$volume, ep, FUN=sum)
bidvolume <- period.apply(final$bidvolume, ep, FUN=sum)
askvolume <- period.apply(final$askvolume, ep, FUN=sum)
final <- merge(volume,bidvolume,askvolume)
index(final) <- index(final)+1
}
return(final)
}
|
bd0fba7a7ee8f5a990f01cc4bff0926a62ca2714
|
b79d9a843181d324ab1c61f24bb277b2265f7973
|
/allelic_sims/src/check_mcmc_convergence.R
|
1525e9994677822918e6f1f120887053aee15027
|
[] |
no_license
|
wf8/homeolog_phasing
|
5c27f48eb1efc1ce60f4b8f7664e4c498239d5bc
|
9b00fb3af9e555f1aa51d96b32b840903b317c54
|
refs/heads/master
| 2022-07-02T22:35:01.035237
| 2022-06-04T21:52:44
| 2022-06-04T21:52:44
| 193,846,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,124
|
r
|
check_mcmc_convergence.R
|
library(coda)
library(ggplot2)
for (rep in 0:999) {
line_out = paste0(rep)
for (dir in c('output_w_dummy/', 'output_no_dummy/')) {
line_out = paste0(line_out, ',')
in_file = paste0(dir, rep, '/phasing.log')
d = read.csv(in_file, sep='\t')
ess = effectiveSize(d$Posterior[500:1999])[[1]]
line_out = paste0(line_out, ess)
}
line_out = paste0(line_out, '\n')
cat(line_out, file=paste0('ess.csv'), append=TRUE)
}
d = read.csv('ess.csv', header=FALSE, col.names=c('rep', 'ESS_with_dummy' ,'ESS_no_dummy'))
p = ggplot(d) +
geom_point(aes(x=ESS_no_dummy, y=ESS_with_dummy), alpha=0.5) +
geom_hline(yintercept=200, linetype='dashed') +
geom_vline(xintercept=200, linetype='dashed') +
annotate('text', x=400, y=750, label=paste0('proportion not converged'), size=3) +
annotate('text', x=400, y=700, label=paste0('no dummy: ', sum(d$ESS_no_dummy < 200)/1000), size=3) +
annotate('text', x=400, y=650, label=paste0('w/ dummy: ',sum(d$ESS_with_dummy < 200)/1000), size=3) +
theme_classic()
ggsave('MCMC_convergence.pdf', p, width=5, height=4)
|
007f676d2dbcba3fc1beea83047c791728f98084
|
4f240a9d013e25b3ba8c36da43818f48cdce835a
|
/ExttratTestCases.R
|
6bc2c06472d3ef94da69f66896b8e826a570bbf5
|
[] |
no_license
|
boazgiron2020/Rfiles
|
05167cd0415b7c8b9ac25351fcc1ca2c131e14a4
|
78d1da74f7f89de1e03ed016e58aa39474d00dff
|
refs/heads/master
| 2023-02-16T19:06:20.729814
| 2021-01-17T12:20:03
| 2021-01-17T12:20:03
| 330,382,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,466
|
r
|
ExttratTestCases.R
|
data = read.delim("clipboard",header = TRUE,stringsAsFactors = FALSE)
dim(data)
as.character(data$Pt)
data = data.frame(data)
colnames(data)
#"Pt","FINAL.HVPG"
dataTest = data[,c("Pt","FINAL.HVPG","PDRdivPDRfitLength.30","Platelets.Clean","Creatinine","GGT")]
dataTest[,3:6] <- sapply(dataTest[,3:6],as.numeric)
for(i in 3:ncol(dataTest)){
dataTest[is.na(dataTest[,i]), i] <- mean(dataTest[,i], na.rm = TRUE)
}
dataTest$Result10 <- as.factor(ifelse(as.numeric(dataTest[,"FINAL.HVPG"]) >= 10,1,0))
dataTest = dataTest[!is.na(dataTest$Result10),]
Pt <- as.character(dataTest$Pt)
dataTestF = dataTest[(!(Pt %in% as.character(data135$Pt))),]
dataTestF = dataTestF[!is.na(dataTestF$Result10),]
dim(dataTestF)
if(0){
write.csv(dataTest,"C:/Temp/data192.csv")
colnames(data)
str(data)
write.csv(data,"C:/Temp/data192.csv")
colnames(data)
data = read.csv("C:/Temp/data192.csv",header = TRUE , stringsAsFactors = FALSE,sep =",",na.strings = c("N/A","VALUE!"))
datas = data.frame(data[,c("Platelets.Clean","LengthPDRdivCPDR.30","INR", "FINAL.HVPG" )])
str(datas)
datas[,4] = as.numeric(datas[,4])
for(i in 1:ncol(datas)){
datas[is.na(datas[,i]), i] <- mean(datas[,i], na.rm = TRUE)
}
ds <-datas[1:135,]
ds$result <- factor(ifelse(ds$FINAL.HVPG >= 10,1,0))
ds = data.frame(ds[,c("Platelets.Clean","LengthPDRdivCPDR.30","INR", "result" )])
library(randomForest)
rf = randomForest(result ~ . ,data = ds,ntree = 1000,mtry = 2)
rf$confusion
}
|
14afd92313a522c81add9aa26caecb26833126a5
|
26aa1ab6322c64aa22b11d61ca872b4aac7a1bfd
|
/Create Variables/FULLVAL_cy.R
|
cad718a35d46d9987cf1234338bc72cff6bd8926
|
[] |
no_license
|
violet468118034/NY-Properties-Fraud-Analytics
|
b2a692bb7046ed79a82c0f42ba646292008d6045
|
1c0ff06f86cc439255f3d20c225272f0607d55dc
|
refs/heads/master
| 2021-01-23T03:54:05.673736
| 2017-03-25T05:54:09
| 2017-03-25T05:54:09
| 86,134,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,563
|
r
|
FULLVAL_cy.R
|
library(dplyr)
roger<- ny[,c(1:2,30:31,4,7,33,8,32,9:10,35,11:14,20:21,34)]
zy <- Cleandata_zy[,c(35:36)]
nynew <- cbind(roger,zy)
#nynew <- head(nynew,1000)
# FULLVAL
FV <- nynew%>%
filter(FULLVAL!=0)%>%
mutate(price = FULLVAL/BLDVOL)
FV2 <- FV%>%
group_by(TAXBIG,ZIP)%>%
summarise(Price = mean(price))
nynew$FULLVALold <- nynew$FULLVAL
nym <- merge(nynew,FV2,by.x=c("TAXBIG","ZIP"),by.y = c("TAXBIG","ZIP"),all.x = T)
pricemean <- mean(nym$Price,na.rm=T)
nym$Pricenew <- ifelse(is.na(nym$Price),pricemean,nym$Price)
nym$FULLVALnew <- nym$Pricenew*nym$BLDVOL
nym$FULLVAL <- ifelse(nym$FULLVALold==0 | is.na(nym$FULLVALold), nym$FULLVALnew,nym$FULLVALold)
# AVTOT
AVTOT <- nym%>%
filter(AVTOT!=0)%>%
mutate(ratio = AVTOT/FULLVAL)
AVTOT2 <- AVTOT%>%
group_by(TAXBIG,ZIP)%>%
summarise(Ratio = mean(ratio))
nym$AVTOTold <- nym$AVTOT
nym<- merge(nym,AVTOT2 , by.x=c("TAXBIG","ZIP"),by.y = c("TAXBIG","ZIP"),all.x = T)
nym$AVTOTnew <- nym$Ratio*nym$FULLVAL
nym$AVTOT <- ifelse(nym$AVTOTold==0, nym$AVTOTnew,nym$AVTOTold)
# AVLAND
AVLAND <- nynew%>%
filter(AVLAND!=0)%>%
mutate(avratio = AVLAND/AVTOT)
AVLAND2 <- AVLAND%>%
group_by(TAXBIG,ZIP)%>%
summarise(Avratio = mean(avratio))
nym$AVLANDold <- nym$AVLAND
nym <- merge(nym,AVLAND2,by.x=c("TAXBIG","ZIP"),by.y = c("TAXBIG","ZIP"),all.x = T)
nym$AVLANDnew <- nym$Avratio*nym$AVTOT
nym$AVLAND <- ifelse(nym$AVLANDold==0, nym$AVLANDnew,nym$AVLANDold)
colnames(nym)
nyf <- nym[,c(3:10,1,2,11:21)]
nyf <- nyf%>%
arrange(RECORD)
save(nyf, file = "FULLVAL.RData")
save.image()
|
daee6651bed197a2ed9b655b6523ecdafe495a6e
|
bc69cba0d813d0e7316589361ece90dae97ead70
|
/inst/modules/univariate_power_explorer/exports.R
|
f9e967a48d66770fbb59de27909e517db98445b2
|
[
"MIT"
] |
permissive
|
beauchamplab/ravebuiltins
|
04174f772d98bb99e51aa448dc042cca288bbd45
|
b44b718ce898c5e8e5b7153350627517723e3152
|
refs/heads/master
| 2023-03-15T19:03:16.725665
| 2022-10-06T12:12:29
| 2022-10-06T12:12:29
| 175,698,469
| 3
| 3
|
NOASSERTION
| 2022-10-06T12:12:30
| 2019-03-14T20:58:13
|
HTML
|
UTF-8
|
R
| false
| false
| 7,385
|
r
|
exports.R
|
input <- getDefaultReactiveInput()
output = getDefaultReactiveOutput()
power_3d_fun = function(brain){
showNotification(p('Generating 3d viewer...'))
# brain = rave::rave_brain2();
brain$load_surfaces(subject = subject, surfaces = c('pial', 'white', 'smoothwm'))
dat = rave::cache(key = list(
list(baseline_window, preload_info)
), val = get_summary())
# for each electrode, we want to test the different conditions
.FUN <- if(length(levels(dat$condition)) > 1) {
if (length(levels(dat$condition)) == 2) {
function(x) {
res = get_t(power ~ condition, data=x)
res = c(res[1] - res[2], res[3], res[4])
res %>% set_names(c('b', 't', 'p'))
}
} else {
function(x) {
get_f(power ~ condition, data=x)
}
}
} else {
function(x) {
get_t(x$power) %>% set_names(c('b', 't', 'p'))
}
}
values = sapply(unique(dat$elec), function(e){
sub = dat[dat$elec == e, ]
re = .FUN(sub)
v = re[input$viewer_3d_type]
brain$set_electrode_value(subject, e, v)
return(v)
})
brain$view(value_range = c(-1,1) * max(abs(values)),
color_ramp = rave_heat_map_colors)
}
# Export functions
get_summary <- function() {
# here we just want an estimate of the power at each trial for each electrode
# get the labels for each trial
..g_index <- 1
GROUPS = lapply(GROUPS, function(g){
g$Trial_num = epoch_data$Trial[epoch_data$Condition %in% unlist(g$group_conditions)]
if(g$group_name == '') {
g$group_name <- LETTERS[..g_index]
..g_index <<- ..g_index + 1
}
return(g)
})
rm(..g_index)
tnum_by_condition <- sapply(GROUPS, function(g) {
list(g$Trial_num)
}) %>% set_names(sapply(GROUPS, '[[', 'group_name'))
all_trials <- unlist(tnum_by_condition)
# .bl_power <- cache(
# key = list(subject$id, preload_info$electrodes, baseline_window, preload_info),
# val = baseline(power, baseline_window[1], baseline_window[2], hybrid = FALSE, mem_optimize = FALSE)
# )
.bl_power <- baseline(power, baseline_window[1], baseline_window[2], hybrid = FALSE, mem_optimize = FALSE)
# subset out the trials, frequencies, and time rane
.power <- .bl_power$subset(Frequency = Frequency %within% FREQUENCY,
Time = Time %within% analysis_window,
Trial = Trial %in% all_trials, data_only = FALSE)
stimulus <- epoch_data$Condition[as.numeric(.power$dimnames$Trial)]
condition <- .power$dimnames$Trial %>% as.numeric %>% sapply(function(tnum) {
#ensure only one group is ever selected? or we could throw an error on length > 1
sapply(tnum_by_condition, `%in%`, x=tnum) %>% which %>% extract(1)
}) %>% names
# rutabaga over Freq and Time
# by_elec <- rutabaga::collapse(.power$data, keep=c(1,4)) / prod(.power$dim[2:3])
by_elec <- .power$collapse(keep = c(1,4), method = 'mean')
data.frame('subject_id' = subject$id,
'elec' = rep(preload_info$electrodes, each=length(condition)),
'trial' = rep(seq_along(condition), times=length(preload_info$electrodes)),
'condition' = rep(condition, length(preload_info$electrodes)),
'power' = c(by_elec)
)
}
export_stats = function(conn=NA, lbl='stat_out', dir, ...){
out_dir <- dir #module_tools$get_subject_dirs()$module_data_dir %&% '/condition_explorer/'
if(!dir.exists(out_dir)) {
dir.create(out_dir, recursive = TRUE)
}
if(is.na(conn)) {
fout <- out_dir %&% lbl %&% '.RDS'
} else {
fout <- conn #out_dir %&% conn
}
# run through all the active electrodes and get the data
# out_data <- rave::lapply_async(electrodes, process_for_stats)
out_data <- get_summary()
saveRDS(out_data, file = fout)
invisible(out_data)
}
graph_export = function(){
tagList(
# actionLink(ns('btn_graph_export'), 'Export Graphs'),
downloadLink(ns('btn_graph_download'), 'Download Graphs')
)
}
output$btn_graph_download <- downloadHandler(
filename = function(...) {
paste0('power_explorer_export',
format(Sys.time(), "%b_%d_%Y_%H_%M_%S"), '.zip')
},
content = function(conn){
tmp_dir = tempdir()
# map the human names to the function names
function_map <- list('Spectrogram' = 'heat_map_plot',
'By Trial Power' = 'by_trial_heat_map',
'Over Time Plot' = 'over_time_plot',
'Windowed Average' = 'windowed_comparison_plot')
to_export <- function_map[plots_to_export]
prefix <- sprintf('%s_%s_%s_', subject$subject_code, subject$project_name, format(Sys.time(), "%b_%d_%Y_%H_%M_%S"))
fnames <- function_map[plots_to_export]
tmp_files <- prefix %&% str_replace_all(names(fnames), ' ', '_') %&% '.pdf'
mapply(export_graphs, file.path(tmp_dir, tmp_files), fnames)
wd = getwd()
on.exit({setwd(wd)})
setwd(tmp_dir)
zip(conn, files = tmp_files, flags='-r2X')
}
)
export_graphs <- function(conn=NA,
which_plot=c('heat_map_plot','by_trial_heat_map','over_time_plot', 'windowed_comparison_plot'), ...) {
which_plot <- match.arg(which_plot)
args = isolate(reactiveValuesToList(input))
electrodes_loaded = preload_info$electrodes
# check to see if we should loop over all electrodes or just the current electrode
if(export_what == 'Current Selection') {
electrodes_loaded <- ELECTRODE
}
progress = rave::progress('Rendering graphs for: ' %&% str_replace_all(which_plot, '_', ' '),
max = length(electrodes_loaded) + 1)
on.exit({progress$close()}, add=TRUE)
progress$inc(message = 'Initializing')
.export_graph = function(){
module = rave::get_module('ravebuiltins', 'power_explorer', local = TRUE)
formal_names = names(formals(module))
args = args[formal_names]
names(args) = formal_names
# having issues here with the size of the plots being too large for the font sizes
# we can't (easily) change the cex being used by the plots. So maybe we can
# just change the size of the output PDF. people can the resize
# based on the number of groups we should scale the plots
ngroups = 0
for(ii in seq_along(args$GROUPS)) {
if(length(args$GROUPS[[ii]]$group_conditions)>1) {
ngroups = ngroups+1
}
}
w_scale = h_scale = 1
if(which_plot == 'windowed_comparison_plot') {
w_scale = ngroups / 2.25
}
if(which_plot %in% c('by_trial_heat_map', 'heat_map_plot')) {
w_scale = ngroups*1.25
h_scale = ngroups*1.05
}
.w <- round(9.75*w_scale,1)
.h <- round(6.03*h_scale,1)
pdf(conn, width = .w, height = .h, useDingbats = FALSE)
on.exit(dev.off())
for(e in electrodes_loaded){
progress$inc(message = sprintf('Electrode %s', e))
args[['ELECTRODE']] = e
result = do.call(module, args)
result[[which_plot]]()
}
}
.export_graph()
# showNotification(p('Export graph finished.'))
#TODO check the variable export_per_electrode to see if we need to loop over electrodes and export
# or if we want use just the current_electrodes and combine them
#TODO need to scale all the fonts etc so things aren't too large for export
}
|
beed887fb8de8cebd5f304ec610bc9b2d45064b3
|
89c0336313978ced471600c3671e8c937d2fd347
|
/data/seqload.R
|
f0abc9d2ce8abad6e17d5cd5d77952f1434550d6
|
[] |
no_license
|
bax24/Deep_Learning
|
dd4b36d8713ed8f5f9296bd0a19ef648f167def2
|
98a56a70daf7ad929cf8d4e79a4c4586d5bfce54
|
refs/heads/main
| 2023-04-21T01:54:17.889020
| 2021-05-17T19:56:20
| 2021-05-17T19:56:20
| 366,088,600
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,593
|
r
|
seqload.R
|
# Loading the package and ENSEMBL database
library(biomaRt)
mart<-useEnsembl(biomart="ensembl",dataset="hsapiens_gene_ensembl")
# Importing TF data
TF_IDs<-read.table("~/TFs_Ensembl_v_1.01.txt", quote="\"", comment.char="")
TF_IDs<-as.vector(unlist(TF_IDs))[-c(1,2)]
TF_UNIPROT<-getBM(attributes=c('ensembl_gene_id','uniprotswissprot'),filters ='ensembl_gene_id',values = TF_IDs,mart = mart,useCache = FALSE)
swiss_prot_ids<-unique(TF_UNIPROT$uniprotswissprot)[-3]
# Retrieving the sequences of TFs
all_seqs_TFs<-getSequence(id=swiss_prot_ids, type="uniprotswissprot", seqType="peptide", mart = mart)
# Importing random genes data
`%notin%` <- Negate(`%in%`)
DE<-read.csv("~/DE_results_TE_Hela_siEWS_vs_control.csv")
DE<-DE[DE$type=="protein_coding",]
random_ids<-sample(DE$X,2000,replace=FALSE)
random_ids<-random_ids[random_ids %notin% TF_IDs]
RANDOM_UNIPROT<-getBM(attributes=c('ensembl_gene_id','uniprotswissprot'),filters ='ensembl_gene_id',values = random_ids,mart = mart,useCache = FALSE)
swiss_prot_random_ids<-unique(RANDOM_UNIPROT$uniprotswissprot)[-3]
# Retrieving random genes sequences
all_seqs_random<-getSequence(id=swiss_prot_random_ids, type="uniprotswissprot", seqType="peptide", mart = mart)
# Remove genes with 2 or more sequences
remove_dup<-function(v)
{
dup_names<-names(which(sort(table(v$uniprotswissprot),decreasing=TRUE)>1))
duped<-v[v$uniprotswissprot %in% dup_names,]
pos_dup<-0
for (i in unique(duped$uniprotswissprot))
{
x1<-which(duped$uniprotswissprot==i)
x2<-x1[2:length(x1)]
pos_dup<-c(pos_dup,as.numeric(rownames(duped[x2,])))
}
pos_dup<-pos_dup[-1]
return(v[-pos_dup,])
}
unique_seq_TFs<-remove_dup(all_seqs_TFs)
unique_seq_random<-remove_dup(all_seqs_random)
# Saving the data in FASTA files
library(seqinr)
write.fasta(as.list(unique_seq_TFs$peptide),unique_seq_TFs$uniprotswissprot,"TF_seqs.fasta")
write.fasta(as.list(unique_seq_random$peptide),unique_seq_random$uniprotswissprot,"random_seqs.fasta")
# Retrivieving families data
library(readxl)
DatabaseExtract <- read_excel("C:/Users/loico/Downloads/DatabaseExtract_v_1.01 (1).xlsx")
DatabaseExtract<-as.matrix(DatabaseExtract)
DB2<-DatabaseExtract[which(DatabaseExtract[,5]=="Yes"),2:4]
colnames(DB2)<-c("ensembl_gene_id","HGNC","DBD")
# Merging with TF IDs
m_db<-merge(TF_UNIPROT,DB2,by="ensembl_gene_id")
m_db_uniprot<-m_db[,c(2,4)]
m_db_uniprot<-m_db_uniprot[which(m_db_uniprot[,1]!=""),]
# Saving data
write.table(m_db_uniprot,"families.txt",sep="\t",quote=FALSE,row.names = FALSE)
|
431b31a87df06920671e1b7cc9c58fc6b81b6210
|
bc31b76e986ec463ac938a29ada331d1c95b8c3e
|
/Kelsey/influenza_data/ncdetect_influenza_data_cleaning.R
|
e4c87fb17af5e72c97a6ced36acad3284f7d665f
|
[] |
no_license
|
kelseysumner/nc_detect_spatial_clustering
|
0e4d8f650a8c8227c065ad069d8365c3b96483d5
|
9be6f56e21707524db088affef3f7d31da94e314
|
refs/heads/master
| 2020-05-21T01:28:59.451952
| 2019-11-15T21:29:47
| 2019-11-15T21:29:47
| 185,856,395
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,866
|
r
|
ncdetect_influenza_data_cleaning.R
|
# ----------------------------------------- #
# NC DETECT Spatial Project #
# Zip Code Influenza Data #
# July 18, 2019 #
# K. Sumner #
# ----------------------------------------- #
# what this is doing:
# reading in the zip code level influenza data, merging with census data at the tract level
#### ------- load the libraries ---------- ####
# load in tidyverse and geospatial libraries (sf)
library(tidyverse)
library(GISTools)
library(rgdal)
library(foreign)
library(zipcode)
library(readxl)
library(sp)
library(lubridate)
#### -------- user setup ----------------- ####
if (str_detect(tolower(Sys.info()["user"]), "kelsey")) {
wd = "C:\\Users\\kelseyms\\OneDrive - University of North Carolina at Chapel Hill\\nc_detect_one_drive\\Influenza Data"
} else if (str_detect(tolower(Sys.info()["user"]), "joyce")) {
wd = "C:\\Users\\joyceyan\\University of North Carolina at Chapel Hill\\Sumner, Kelsey Marie - nc_detect_one_drive\\Influenza Data"
} else {
print("Specify working directory")
}
#### -------- load in the data sets -------- ####
# set working directory
setwd(wd)
# read in the data sets
# first the cc and triage notes
data_ccandtriagenotes = read_csv("./ccandtriagenotes/ILIbyZIP_ccandtriagenotes.csv") %>%
filter(ZIP != "NULL") %>%
mutate(visitdate = mdy(visitdate), zip = str_pad(as.character(ZIP), width = 5, side = "left", pad = "0")) %>%
dplyr::select(visitdate, Count = syndromecount, zip)
# then the cc only one
data_cconly = read_csv("./cc_only/ILIbyZIP_cconly.csv") %>%
filter(ZIP != "NULL") %>%
mutate(visitdate = mdy(visitdate), zip = str_pad(as.character(ZIP), width = 5, side = "left", pad = "0")) %>%
dplyr::select(visitdate, Count = syndromecount, zip)
# look at quick summaries of both data sets
table(nchar(data_ccandtriagenotes$zip))
table(nchar(data_cconly$zip))
table(data_ccandtriagenotes$zip)
table(data_cconly$zip)
#### ----- add zip codes latitude and longitude coordinates for satscan ------- ####
# cc and triage notes data set
#add lat and long based on zip code matches - matches using zipcode package, remove nonmatches
data("zipcode")
clean_data_ccandtriagenotes = data_ccandtriagenotes %>%
left_join(zipcode, by = "zip") %>%
filter_all(all_vars(!is.na(.))) %>%
mutate(visitweek = epiyear(visitdate)*100 + epiweek(visitdate))
#write to csv for import into SaTScan as case and coordinates files
clean_data_ccandtriagenotes %>%
write_csv("./ccandtriagenotes/clean_ILIbyZIP_ccandtriagenotes.csv")
# cc only data set
#add lat and long based on zip code matches - matches using zipcode package, remove nonmatches
data("zipcode")
clean_data_cconly = data_cconly %>%
left_join(zipcode, by = "zip") %>%
filter_all(all_vars(!is.na(.))) %>%
mutate(visitweek = epiyear(visitdate)*100 + epiweek(visitdate))
#write to csv for import into SaTScan as case and coordinates files
clean_data_cconly %>%
write_csv("./cc_only/clean_ILIbyZIP_cconly.csv")
#############################################################
#### ----- aggregate weekly counts (Sun-Sat)------ ####
# wk_data_ccandtriagenotes= data_ccandtriagenotes %>%
# mutate(visitdate = mdy(visitdate)) %>%
# mutate(visitweek = epiweek(visitdate)) %>%
# group_by(zip, visitweek) %>%
# summarize(Count = sum(Count)) %>%
# left_join(zipcode, by = "zip") %>%
# filter_all(all_vars(!is.na(.)))
#
# write_csv(wk_data_ccandtriagenotes, "./ccandtriagenotes/clean_weekly_ILIbyZIP_ccandtriagenotes.csv")
#
# wk_data_cconly = data_cconly %>%
# mutate(visitweek = epiweek(mdy(visitdate))) %>%
# group_by(zip, visitweek) %>%
# summarize(Count = sum(Count)) %>%
# left_join(zipcode, by = "zip") %>%
# filter_all(all_vars(!is.na(.)))
#
# write_csv(wk_data_cconly, "./cc_only/clean_weekly_ILIbyZIP_cconly.csv")
|
2e133d84526a5b084e3384a10220fee43f6a68a5
|
a9564ad0510948b45d2e9e004978e1fdb363d0de
|
/global.R
|
9fc3f120b598f65ce595f0d7d49557c893e22ca5
|
[] |
no_license
|
fishsciences/juvenile-salmonid-habitat-calculator
|
fd0c67ccdacc2bd0c1e7a94d40822df4e4003a8c
|
4efa961d3138390117c3a89a4fdccac3b0e15b79
|
refs/heads/master
| 2020-06-01T06:57:31.698663
| 2019-06-10T20:22:36
| 2019-06-10T20:22:36
| 190,688,723
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 543
|
r
|
global.R
|
library(shiny)
library(shinythemes)
library(shinysense)
library(shinydashboard)
library(shinyWidgets)
library(dplyr)
library(ggplot2)
library(DT)
empty_data <- tibble(ForkLength_mm = 25:105, Value = NA)
cutoff <- min(empty_data$ForkLength_mm)
calc_territory_size <- function(fl){
# from Grant and Kramer 1990
# takes fork length in mm and returns territory size in hectares (multiplier converts m2 to hectares)
(10 ^ (2.61 * log10(fl/10) - 2.83)) * 1e-4 # need to convert fl to cm
}
round4dec <- function(x){
round(x * 1e4)/1e4
}
|
29c92436122013fbe549532dc6859bda1757f0bd
|
1c1e3a18812b6c627d420e5ac898f86b1ec44bb4
|
/R/as_classification.R
|
a0d30ff8d928c2b27bb60672d80e7cb040d1da13
|
[] |
no_license
|
uRosConf/categorical
|
3e6ae2697e4d9733ae7a1b5baf7376bca6daec8c
|
ccff57be8024dc52d0e294f6c107311006e161b9
|
refs/heads/master
| 2020-03-28T10:37:34.337619
| 2018-09-14T06:01:16
| 2018-09-14T06:01:16
| 148,127,818
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,150
|
r
|
as_classification.R
|
#' Convert a data.frame to a classification object
#'
#' @param x the data.frame to convert. See, details for the format of the
#' data.frame
#' @param compute_parent compute the parent column of the input
#' @param order selection of columns from x.
#'
#' @details
#' The data.frame should contain the following columns:
#' \describe{
#' \item{id}{the id of the category (cast to character)}
#' \item{label}{the label of the category (cast to character)}
#' \item{level}{the level of the classification in which the category belongs
#' (should be integer)}
#' \item{parent}{the id of the parent category (cast to character). Can be
#' omitted when \code{compute_parent = FALSE}. Should contain missing values
#' for categories in level 1 of the classification.}
#' }
#'
#' @export
as_classification <- function(x, compute_parent = FALSE, order = 1:4) {
# Put input data in right order
if (compute_parent) order <- order[1:3]
meta = x[order]
if (compute_parent) meta[[4]] <- character(nrow(meta))
# Rename columns
names(meta) <- c("id", "label", "level", "parent")
# Check types of meta
for (col in c(1,2,4)) meta[[col]] <- as.character(meta[[col]])
stopifnot(is.integer(meta$level))
if (!all(unique(meta$level) == seq(1, max(meta$level))))
stop("The levels should be numbered from 1 sequentially up.")
# Check duplicated ids
if (any(duplicated(meta$id)))
stop(paste0("Duplicated id in dataframe. Example:",
sample(meta$id[duplicated(meta$id)],1)))
# Check if tree complete; all same depth
for (i in seq(1,max(meta$level) - 1)) {
vals <- meta$id[meta$level == i]
result <- any(vals %in% meta$parent) == FALSE
if (result) {
stop(paste("Level",i,"not in parent column"))
}
}
# Computing parent
if (compute_parent) {
for (i in seq(1,max(meta$level))) {
for (z in meta[meta$level == i,"id"]) {
meta$parent[meta[,"level"] == i + 1 &
substr(meta$id,1,unique(nchar(meta[meta$level == i,"id"]))) == z] <- z
}
}
}
# Creating meta
meta <- split(meta, meta$level)
structure(meta, class = "classification")
}
|
b7ff4468d5fafd2c43252b17ec5a420b7bf45095
|
f0489c47853fc78a49bfbc28ca3cf39798b17431
|
/man/NMFfitXn-class.Rd
|
699cf80db71571c9810dcf8b37836b3f7dcc9c0f
|
[] |
no_license
|
pooranis/NMF
|
a7de482922ea433a4d4037d817886ac39032018e
|
c9db15c9f54df320635066779ad1fb466bf73217
|
refs/heads/master
| 2021-01-17T17:11:00.727502
| 2019-06-26T07:00:09
| 2019-06-26T07:00:09
| 53,220,016
| 0
| 0
| null | 2016-03-05T19:46:24
| 2016-03-05T19:46:24
| null |
UTF-8
|
R
| false
| true
| 1,829
|
rd
|
NMFfitXn-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NMFSet-class.R
\docType{class}
\name{NMFfitXn-class}
\alias{NMFfitXn-class}
\title{Structure for Storing All Fits from Multiple NMF Runs}
\description{
This class is used to return the result from a multiple run of a single NMF
algorithm performed with function \code{nmf} with option
\code{keep.all=TRUE} (cf. \code{\link{nmf}}).
}
\details{
It extends both classes \code{\linkS4class{NMFfitX}} and \code{list}, and
stores the result of each run (i.e. a \code{NMFfit} object) in its
\code{list} structure.
IMPORTANT NOTE: This class is designed to be \strong{read-only}, even though
all the \code{list}-methods can be used on its instances. Adding or removing
elements would most probably lead to incorrect results in subsequent calls.
Capability for concatenating and merging NMF results is for the moment only
used internally, and should be included and supported in the next release of
the package.
}
\section{Slots}{
\describe{
\item{\code{.Data}}{standard slot that contains the S3 \code{list} object data.
See R documentation on S3/S4 classes for more details (e.g., \code{\link{setOldClass}}).}
}}
\examples{
# generate a synthetic dataset with known classes
n <- 20; counts <- c(5, 2, 3);
V <- syntheticNMF(n, counts)
# get the class factor
groups <- V$pData$Group
# perform multiple runs of one algorithm, keeping all the fits
res <- nmf(V, 3, nrun=3, .options='k') # .options=list(keep.all=TRUE) also works
res
summary(res)
# get more info
summary(res, target=V, class=groups)
# compute/show computational times
runtime.all(res)
seqtime(res)
# plot the consensus matrix, computed on the fly
\dontrun{ consensusmap(res, annCol=groups) }
}
\seealso{
Other multipleNMF: \code{\link{NMFfitX-class}},
\code{\link{NMFfitX1-class}}
}
|
a017e8fa9dbfb7ecbc14f211ff5bf48cd2851cc8
|
cd27523fe71a6a3e5a48184a7a3efa3b492804f1
|
/MainLinearModel/MainLinearModel.r
|
dce697f6a7b1ac46d3cefa6916ec1dc523eb611b
|
[] |
no_license
|
SaraMWillis/OverlappingGenesProject
|
225602140bda2124dfa19be40a0ea7f96fbb2e20
|
5b3ba98a2414d929a146c0307a0335d844f71e8b
|
refs/heads/master
| 2023-02-15T00:50:28.062313
| 2021-01-08T17:06:19
| 2021-01-08T17:06:19
| 75,343,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,606
|
r
|
MainLinearModel.r
|
# Main Linear Model
# This is the linear model that was used to calculate the relative effect sizes of the various hypotheses
# The data this linear model was run on is included in the directory. To run it, specify the path to the
# directory on your computer to read in the dataframe.
library(MASS)
library(nlme)
library(lme4)
require(lmerTest)
library(optimx)
# The file that is needed to run this is located in this directory as FileForMainModel.csv
df <- read.csv(file = '', header = T)
head(df)
# below is used to determine the optimal value of lambda for a box cox transformation
bc <- boxcox(df$ISD~1, lambda = seq(.1,.7,0.01))
bc$x[which.max(bc$y)]
# lambda has been rounded to 0.4 for our analyses
lambda = 0.4
# We define the box cox transformation (note: we only included the power and did not include the
# scalars for the transformation. This isn't important since they are only a scaling factor.)
bc.transform <- function(x,L){
x^L
}
# The transformed ISD values are then saved as a new column in the dataframe
ISD.transform <- bc.transform(df$ISD,lambda)
df$ISD.transform<-ISD.transform
# A mixed linear model is then created using two random effects and two fixed effects.
# Designation (artificially-frameshifted non-overlapping controls, ancestral genes, novel genes) & Frame (+1 vs. +2) are the fixed effects
# Species and Homology group are the random effects
data.two.random.effects <- lmer(df$ISD.transform ~ df$Designation + df$Frame + (1|df$Species)+(1|df$HomologyGroup))
# The output from the linear model is then found using Summary
summary(data.two.random.effects)
|
c0dd61ed890396451f6d728b77d248a3a4593604
|
39c56797684d2ee5278ea31aa74c9a4c5ed00c66
|
/plot4.R
|
393ac71dfe9c59fc6aa3d29d89ba0cac884cda75
|
[] |
no_license
|
wtf13/Exploratory-Data-Analysis-Course-Project-1
|
5f265f4fb271e786a8213302d32ef9e96d5e4181
|
ce87057ad0692c950ad12fdc369c546cf07cafc3
|
refs/heads/master
| 2021-01-16T18:34:57.614661
| 2017-08-12T07:59:05
| 2017-08-12T07:59:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,239
|
r
|
plot4.R
|
library(data.table)
dat<-fread("./household_power_consumption.txt")
dat<-dat[dat$Dat %in% c("1/2/2007","2/2/2007"),]
library(lubridate)
dat$DateTime<-dmy_hms(paste(dat$Date,dat$Time))
dat$Sub_metering_1<-as.numeric(dat$Sub_metering_1)
dat$Sub_metering_2<-as.numeric(dat$Sub_metering_2)
dat$Sub_metering_3<-as.numeric(dat$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
dat$DateTime<-dmy_hms(paste(dat$Date,dat$Time))
dat$Global_active_power<-as.numeric(dat$Global_active_power)
dat$Voltage<-as.numeric(dat$Voltage)
dat$Global_reactive_power<-as.numeric(dat$Global_reactive_power)
plot(dat$DateTime,dat$Global_active_power,type="l",xlab="",
ylab="Global Active Power")
plot(dat$DateTime,dat$Voltage,type="l",xlab="datetime",
ylab="Voltage")
plot(dat$DateTime,dat$Sub_metering_1, type = "l",
ylab = "Energy sub metering", xlab = "")
lines(dat$DateTime,dat$Sub_metering_2,col="red")
lines(dat$DateTime,dat$Sub_metering_3,col="blue")
legend("topright",pch="-",col=c("black","red","blue"),legend=c("Sub_metering_1",
"Sub_metering_2","Sub_metering_3"),bty="n",cex=0.75)
plot(dat$DateTime,dat$Global_reactive_power,type="l",xlab="datetime",
ylab="Global_reactive_power")
dev.off()
|
36fc12d05f07765a2f3b27d5f9e8b08ca2c2f6c0
|
17d26d36ace79f115b603368d9f124eebbe52511
|
/charts.R
|
b1a3082e50a4407c7a7072a4be8b067aca4e27ed
|
[] |
no_license
|
philipbarrett/apxSignal
|
f5c7ffc2cb081a8f0e3dc9c3a2ffb717acf9077a
|
9f3ba25bd15e1b891c37e916255e8358e847363b
|
refs/heads/master
| 2021-05-31T23:39:02.127450
| 2016-03-18T04:41:19
| 2016-03-18T04:41:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,153
|
r
|
charts.R
|
####################################################################
## charts.R
## Script to create charts for the nonlinear filter problem
## Philip Barrett, Chicago 09mar2016
####################################################################
rm(list=ls())
Rcpp::sourceCpp('momErr.cpp')
library(filters)
library(scales)
library(MASS)
library(nleqslv)
## The plot of mu and sigma updated
XX <- seq( -4, 4, length.out = 401 )
rho <- .9
sig.eps <- sqrt( 1 - rho ^ 2 )
ff <- sapply( XX, mu_sig2_update, mu_sig2=c(0,sig.eps/sqrt(1-rho^2)), sig_eps=sig.eps, rho=rho, y=1 )
gg <- sapply( XX, mu_sig2_update, mu_sig2=c(0,sig.eps/sqrt(1-rho^2)), sig_eps=sig.eps, rho=rho, y=0 )
pdf('/home/philip/Dropbox//2016/Research/thesis/charts/mu_prime.pdf')
plot( XX, ff[1,], ylim=c(-4,4), type='l', lwd=2, col='blue', xlab=expression(psi),
ylab=expression(paste(mu, "'" ) ) )
lines( XX, gg[1,], ylim=c(-4,4), type='l', lwd=2, col='red' )
abline( h=0, lwd=.5 )
legend('topleft', c('y=1', 'y=0'), lwd=2, col=c('blue', 'red'), bty='n' )
dev.off()
pdf('/home/philip/Dropbox//2016/Research/thesis/charts/sigma_prime.pdf')
plot( XX, sqrt( ff[2,] ), type='l', lwd=2, col='blue', xlab=expression(psi),
ylab=expression(paste(sigma^2, "'" ) ) )
lines( XX, sqrt( gg[2,] ), type='l', lwd=2, col='red' )
legend('right', c('y=1', 'y=0'), lwd=2, col=c('blue', 'red'), bty='n' )
dev.off()
### Create the simulations for comparing the threshold and other filters ###
set.seed(654)
theta.hat <- 0
# The UKF parameters #
Q <- sig.eps^2
R <- .0 ^ 2
f <- function(x) rho * x
g <- function(x) if( x > theta.hat ) 1 else 0
# Create the simulation #
x.0 <- rnorm( 1, 0, sig.eps)
K <- 200
# The initial point and the length of the simulation
v.x <- c( ar1_sim( K, rho, sig.eps ) )
v.y <- 1 * ( v.x > theta.hat )
# v.y <- v.x <- rep(0, K)
# v.x[1] <- x.0
# v.y[1] <- g( v.x[1] ) + rnorm( 1, 0, R )
# for( i in 2:K ){
# v.x[i] <- f( v.x[i-1] ) + rnorm( 1, 0, sqrt( Q ) )
# v.y[i] <- g( v.x[i] ) + rnorm( 1, 0, sqrt( R ) )
# } # Create the simulated state and signal
kappa <- 10
mu.sig2.0 <- c( 0, 1 )
thresh.ukf <- ukf.compute( mu.sig2.0[1], mu.sig2.0[2] , v.y, f, g, Q, R, 1, alpha=1, kappa=kappa, quad = F )
# thresh.ukf.mc <- ukf.compute( mu.sig2.0[1], mu.sig2.0[2] , v.y, f, g, Q, R, 1, alpha=1, kappa=kappa, quad = F, n.mc=10000 )
thresh.ukf.quad <- ukf.compute( mu.sig2.0[1], mu.sig2.0[2] , v.y, f, g, Q, R, 1, alpha=1, kappa=kappa, quad = T )
# The UKF (using various integration rules)
thresh <- thresh_filter( mu.sig2.0, rep(0,K), sig.eps, rho, v.y )
thresh.gf <- gauss_filter( mu.sig2.0, rep(0,K), sig.eps, rho, v.y )
# The threshold filter
#### THIS CHART INCLUDED ####
pdf('/home/philip/Dropbox//2016/Research/thesis/charts/dyn_thresh.pdf')
plot( c(1,K), range( c( v.x, thresh[,1] + sqrt(thresh[,2]),
thresh[,1] - sqrt(thresh[,2]) ) ), type='n', xlab='Period',
ylab='x' )
points( 1:K, 1.02 * v.y - .01, pch=19, col=alpha('darkgreen', .5), cex=.5 )
lines( 1:K, thresh[-(K+1),1], col='blue', lwd=2 )
lines( 1:K, thresh[-(K+1),1] + sqrt(thresh[-(K+1),2]), col='blue', lty=2 )
lines( 1:K, thresh[-(K+1),1] - sqrt(thresh[-(K+1),2]), col='blue', lty=2 )
lines( 1:K, v.x, lwd=2 )
legend( 'topright', c( 'x', 'Threshold filter mean',
'Plus/minus one std dev', 'Signal' ),
lwd=c(2,2,1,0), lty=c(1,1,2, NA), pch=c(NA,NA,NA,19), bty='n',
col=c( 'black','blue', 'blue', alpha( 'darkgreen', .5) ))
abline( h=0, lwd=.5 )
dev.off()
mu.sig.bar.fun <- function( mu.sig.bar, y ){
out <- mu.sig.bar - mu_sig2_update( mu.sig.bar, theta.hat, sig.eps, rho, y )
}
mu.sig.bar.1 <- nleqslv( c( 1, .5 ), mu.sig.bar.fun, y=1 )
mu.sig.bar.0 <- nleqslv( c( 1, .5 ), mu.sig.bar.fun, y=0 )
pdf('/home/philip/Dropbox//2016/Research/thesis/charts/xsect_thresh.pdf')
plot( thresh[-(1:5),1], thresh[-(1:5),2], xlab=expression(mu), ylab=expression(sigma^2),
pch=19, col='blue', cex=.5, xlim=c(-1,1), ylim=c(.2, .5) )
points( c( mu.sig.bar.1$x[1], mu.sig.bar.0$x[1] ),
c( mu.sig.bar.1$x[2], mu.sig.bar.0$x[2] ), pch=19 )
legend( 'bottomright', c('Mean-variance pairs', 'Limit point'), pch=19,
col=c('blue','black'), bty='n' )
dev.off()
pdf('/home/philip/Dropbox//2016/Research/thesis/charts/dyn_gf.pdf')
plot( c(1,K), range( c( v.x, thresh.gf[,1] + sqrt(thresh.gf[,2]),
thresh.gf[,1] - sqrt(thresh.gf[,2]) ) ), type='n', xlab='Period',
ylab='x' )
lines( 1:K, thresh[-(K+1),1], col='blue', lwd=2 )
points( 1:K, 1.02 * v.y - .01, pch=19, col=alpha('darkgreen', .5), cex=.5 )
lines( 1:K, thresh.gf[-(K+1),1], col='red', lwd=2 )
lines( 1:K, thresh.gf[-(K+1),1] + sqrt(thresh.gf[-(K+1),2]), col='red', lty=2 )
lines( 1:K, thresh.gf[-(K+1),1] - sqrt(thresh.gf[-(K+1),2]), col='red', lty=2 )
lines( 1:K, v.x, lwd=2 )
legend( 'topright', c( 'x', 'Exact Gaussian filter mean',
'Plus/minus one std dev', 'Threshold filter mean', 'Signal' ),
lwd=c(2,2,1,2,0), lty=c(1,1,2,1, NA), pch=c(NA,NA,NA,NA,19), bty='n',
col=c( 'black','red', 'red', 'blue', alpha( 'darkgreen', .5) ))
abline( h=0, lwd=.5 )
dev.off()
pdf('/home/philip/Dropbox//2016/Research/thesis/charts/xsect_gf.pdf')
plot( thresh.gf[-(1:20),1], thresh.gf[-(1:20),2], xlab=expression(mu), ylab=expression(sigma^2),
pch=19, col='red', cex=.5, xlim=c(-1,1), ylim=c(.2, .5) )
# points( c( mu.sig.bar.1$x[1], mu.sig.bar.0$x[1] ),
# c( mu.sig.bar.1$x[2], mu.sig.bar.0$x[2] ), pch=19 )
legend( 'bottomright', c('Mean-variance pairs', 'Limit point'), pch=19,
col=c('red','black'), bty='n' )
dev.off()
plot( c(1,K), range( c(thresh.ukf$m, v.x, thresh[,1]) ), type='n', xlab='Period',
ylab='x' )
points( 1:K, 1.1 * sd(v.x) * ( 2*v.y-1 ), pch=19, col=alpha('darkgreen', .5), cex=.5 )
lines( 1:K, thresh.gf[-(K+1),1], col='red', lwd=2 )
lines( 1:K, thresh.ukf$m.pred[-(K+1)], col='red', lwd=1, lty=2 )
# lines( 1:K, thresh.ukf.mc$m.pred[-(K+1)], col='red', lwd=1, lty=3 )
# lines( 1:K, thresh.ukf.quad$m.pred[-(K+1)], col='red', lwd=1, lty=3 )
# First point is the period 0 predictor for period 1 => Last point predicts
# period K+1
lines( 1:K, thresh[-(K+1),1], col='blue', lwd=2 )
# Likewise
# lines( 1:K, thresh.ukf$m + sqrt( c( thresh.ukf$P.pred[-K] ) ), col='red', lwd=2, lty=2 )
# lines( 1:K, thresh.ukf$m - sqrt( c( thresh.ukf$P.pred[-K] ) ), col='red', lwd=2, lty=2 )
lines( 1:K, v.x, lwd=2 )
legend( 'bottomright', c( 'x', 'Threshold filter', 'Exact Gaussian Filter',
'Unscented Kalman Filter', 'Signal' ),
lwd=c(2,2,2,1,1,0), lty=c(1,1,1,2,3, NA), pch=c(NA,NA,NA,NA,NA,19), bty='n',
col=c( 'black','blue', 'red', 'red', 'red', alpha( 'darkgreen', .5) ))
abline( h=0, lwd=.5 )
plot( 1:K, sqrt(thresh[-(K+1),2]), type='l', lwd=2, col='red' )
lines( 1:K, sqrt(thresh[-(K+1),2]), type='l', lwd=2, col='blue' )
rmse <- sqrt( cumsum( ( v.x - thresh[-(K+1),1] ) ^ 2 ) / 1:K )
rmse.gf <- sqrt( cumsum( ( v.x - thresh.gf[-(K+1),1] ) ^ 2 ) / 1:K )
rmse.ukf <- sqrt( cumsum( ( v.x - thresh.ukf$m.pred[-(K+1)] ) ^ 2 ) / 1:K )
plot( c(1,K), range( rmse, rmse.gf ), type='n', xlab='Period', ylab='Rolling RMSE' )
lines( 1:K, rmse, col='blue', lwd=2)
lines( 1:K, rmse.gf, col='red', lwd=2 )
bias <- cumsum( ( v.x - thresh[-(K+1),1] ) ) / 1:K
bias.gf <- cumsum( ( v.x - thresh.gf[-(K+1),1] ) ) / 1:K
bias.ukf <- cumsum( v.x - thresh.ukf$m.pred[-(K+1)] ) / 1:K
plot( c(1,K), range( bias, bias.gf ), type='n', xlab='Period', ylab='Rolling bias' )
lines( 1:K, bias, col='blue', lwd=2)
lines( 1:K, bias.gf, col='red', lwd=2 )
abline( h=0, lwd=.5 )
#### Now generate a bunch of simulations and see the properties of the errors ###
set.seed(4321)
n.sim <- 100000
n.pds <- 20
multi.x <- multi_ar1_sim( n.sim, n.pds, rho, 0, sig.eps )
multi.theta.hat <- 0.0 * multi.x # multi_norm_thresh( n.sim, n.pds, rho, sig.eps )
multi.y <- 1 * ( multi.x > multi.theta.hat )
multi.thresh <- multi_thresh_filter( multi.x, multi.theta.hat, multi.y,
c( 0, sig.eps^2 ), sig.eps, rho )
multi.gauss <- multi_gauss_filter( multi.x, multi.theta.hat, multi.y,
c( 0, sig.eps^2 ), sig.eps, rho )
err <- multi.thresh$mu[,-(n.pds+1)] - t( multi.x )
bias <- apply( err, 2, mean )
rmse <- apply( err, 2, sd )
mse <- apply( err, 2, var )
err.gf <- multi.gauss$mu[,-(n.pds+1)] - t( multi.x )
bias.gf <- apply( err.gf, 2, mean )
rmse.gf <- apply( err.gf, 2, sd )
mse.gf <- apply( err.gf, 2, var )
sig.mean <- apply( sqrt( multi.thresh$sig2 ), 2, mean )
# multi.thresh.ukf <- list( m.pred=0*multi.thresh$mu, P.pred=0*multi.thresh$sig2 )
# for( i in 1:n.sim ){
# temp <- ukf.compute( 0, sig.eps^2, multi.y[,i], f, g, Q, R, 1,
# alpha=1, kappa=kappa, quad = F )
# multi.thresh.ukf$m.pred[i,] <- temp$m.pred
# multi.thresh.ukf$P.pred[i,] <- temp$P.pred
# }
# err.ukf <- multi.thresh.ukf$m.pred[,-(n.pds+1)] - t( multi.x )
# bias.ukf <- apply( err.ukf, 2, mean )
# rmse.ukf <- apply( err.ukf, 2, sd )
# mse.ukf <- apply( err.gf, 2, var )
#### THIS CHART INCLUDED ####
# plot( 1:n.pds, rmse.ukf, col='red', lty=2, lwd=2, type='l', xlab='Periods', ylab='RMSE' )
plot( 1:n.pds, rmse.gf, col='red', lwd=2, type='l', xlab='Periods', ylab='RMSE' )
# lines( 1:n.pds, rmse.gf, col='red', lwd=2 )
lines( 1:n.pds, rmse, col='blue', lwd=2 )
# lines( 1:20, sqrt(apply(multi.gauss$sig2[,-(n.pds+1)],2,mean)), lty=2, col='red' )
plot( 1:20, apply(multi.x, 1, sd), lwd=2, type='l', xlab='Periods',
ylab='State sd' )
tot.var.thresh <- apply(multi.thresh$sig2[,-(n.pds+1)],2,mean) + apply(multi.thresh$mu[,-(n.pds+1)],2,var)
tot.var.gf <- apply(multi.gauss$sig2[,-(n.pds+1)],2,mean) + apply(multi.gauss$mu[,-(n.pds+1)],2,var)
lines( 1:20, sqrt(tot.var.thresh), lwd=2, col='blue' )
lines( 1:20, sqrt(tot.var.gf), lwd=2, col='red' )
legend( 'bottomright', c('State variance', 'Total variance: Threshold filter', 'Total variance: Exact Gaussian filter'),
bty='n', lwd=2, col=c( 'black', 'blue', 'red' ) )
plot( 1:20, 1 - tot.var.gf / apply(multi.x, 1, var), lwd=2, col='red', type='l' )
lines( 1:20, 1 - tot.var.thresh / apply(multi.x, 1, var), lwd=2, col='blue' )
#### NOW DO CONDITIONAL BIAS CHARTS ####
n.pds <- 100000
burn <- 1000
x.lr <- c( ar1_sim( n.pds + burn, rho, sig.eps ) )
# Long run x
theta.hat.lr <- 0 * x.lr
y.lr <- 1 * ( x.lr > theta.hat.lr )
# Create the filters
thresh.lr <- thresh_filter( c(0,sig.eps^2), theta.hat.lr, sig.eps, rho, y.lr )
thresh.lr.gf <- gauss_filter( c(0,sig.eps^2), theta.hat.lr, sig.eps, rho, y.lr )
thresh.lr.ukf <- ukf.compute( 0, sig.eps^2, y.lr, f, g, Q, R, 1, alpha=1, kappa=kappa )
# De-burn
thresh.lr <- thresh.lr[-(1:burn),]
thresh.lr.gf <- thresh.lr.gf[-(1:burn),]
m.thresh.lr.ukf <- cbind( thresh.lr.ukf$m.pred[-(1:burn)],
thresh.lr.ukf$P.pred[-(1:burn)] )
x.lr <- x.lr[-(1:burn)]
y.lr <- y.lr[-(1:burn)]
# Create the conditional biases
bias.pos.lr <- mean( thresh.lr[-(n.pds+1),1][y.lr==1] - x.lr[y.lr==1] )
bias.pos.lr.gf <- mean( thresh.gf[-(n.pds+1),1][y.lr==1] - x.lr[y.lr==1] )
bias.pos.lr.ukf <- mean( m.thresh.lr.ukf[-(n.pds+1),1][y.lr==1] - x.lr[y.lr==1] )
bias.neg.lr <- mean( thresh.lr[-(n.pds+1),1][y.lr==0] - x.lr[y.lr==0] )
bias.neg.lr.gf <- mean( thresh.lr.gf[-(n.pds+1),1][y.lr==0] - x.lr[y.lr==0] )
bias.neg.lr.ukf <- mean( m.thresh.lr.ukf[-(n.pds+1),1][y.lr==0] - x.lr[y.lr==0] )
n.same <- sequence(rle(y.lr)$lengths)
# The number of identical signals
table( n.same, y.lr )
bias.p.seq <- c( by( thresh.lr[-(n.pds+1),1][y.lr==1] - x.lr[y.lr==1],
n.same[y.lr==1], mean ) )[-1]
bias.p.seq.gf <- c( by( thresh.lr.gf[-(n.pds+1),1][y.lr==1] - x.lr[y.lr==1],
n.same[y.lr==1], mean ) )[-1]
bias.p.seq.ukf <- c( by( m.thresh.lr.ukf[-(n.pds+1),1][y.lr==1] - x.lr[y.lr==1],
n.same[y.lr==1], mean ) )[-1]
bias.n.seq <- c( by( thresh.lr[-(n.pds+1),1][y.lr==0] - x.lr[y.lr==0],
n.same[y.lr==0], mean ) )[-1]
bias.n.seq.gf <- c( by( thresh.lr.gf[-(n.pds+1),1][y.lr==0] - x.lr[y.lr==0],
n.same[y.lr==0], mean ) )[-1]
bias.n.seq.ukf <- c( by( m.thresh.lr.ukf[-(n.pds+1),1][y.lr==0] - x.lr[y.lr==0],
n.same[y.lr==0], mean ) )[-1]
rmse.p.seq <- c( by( thresh.lr[-(n.pds+1),1][y.lr==1] - x.lr[y.lr==1],
n.same[y.lr==1], function(x) sqrt(mean(x^2)) ) )[-1]
rmse.p.seq.gf <- c( by( thresh.lr.gf[-(n.pds+1),1][y.lr==1] - x.lr[y.lr==1],
n.same[y.lr==1], function(x) sqrt(mean(x^2)) ) )[-1]
rmse.p.seq.ukf <- c( by( m.thresh.lr.ukf[-(n.pds+1),1][y.lr==1] - x.lr[y.lr==1],
n.same[y.lr==1], function(x) sqrt(mean(x^2)) ) )[-1]
rmse.n.seq <- c( by( thresh.lr[-(n.pds+1),1][y.lr==0] - x.lr[y.lr==0],
n.same[y.lr==0], function(x) sqrt(mean(x^2)) ) )[-1]
rmse.n.seq.gf <- c( by( thresh.lr.gf[-(n.pds+1),1][y.lr==0] - x.lr[y.lr==0],
n.same[y.lr==0], function(x) sqrt(mean(x^2)) ) )[-1]
rmse.n.seq.ukf <- c( by( m.thresh.lr.ukf[-(n.pds+1),1][y.lr==0] - x.lr[y.lr==0],
n.same[y.lr==0], function(x) sqrt(mean(x^2)) ) )[-1]
## Now plot them
plot( c(1,10), range( bias.p.seq[1:10], bias.p.seq.ukf[1:10],
bias.n.seq[1:10], bias.n.seq.ukf[1:10] ), type='n' )
lines( 1:10, bias.p.seq[1:10], lwd=2, col='blue' )
lines( 1:10, bias.n.seq[1:10], lwd=2, col='blue', lty=2 )
lines( 1:10, bias.p.seq.gf[1:10], lwd=2, col='red' )
lines( 1:10, bias.n.seq.gf[1:10], lwd=2, col='red', lty=2 )
lines( 1:10, bias.p.seq.ukf[1:10], col='red' )
lines( 1:10, bias.n.seq.ukf[1:10], col='red', lty=2 )
abline(h=0, lwd=.5)
plot( c(1,10), range( 0, rmse.p.seq[1:10], rmse.p.seq.ukf[1:10],
rmse.n.seq[1:10], rmse.n.seq.ukf[1:10] ), type='n' )
lines( 1:10, rmse.p.seq[1:10], lwd=2, col='blue' )
# lines( 1:10, rmse.n.seq[1:10], lwd=2, col='blue', lty=2 )
lines( 1:10, rmse.p.seq.ukf[1:10], lwd=2, col='red' )
# lines( 1:10, rmse.n.seq.ukf[1:10], lwd=2, col='red', lty=2 )
abline(h=0, lwd=.5)
|
cb64da91277a94bcbe9feac35959986f190be7e8
|
c7ef12b941afd9c9a73d2749091ec2f0c65820b4
|
/Titanic.R
|
9246f55286f282f4959ab097d5d9ac09900d78c8
|
[] |
no_license
|
rahulace/Predicting-Titanic-Survivors-Using-Title---new-approach-
|
9f673f69f8a93cb9546f1e0156d08d1079d0672e
|
e5ad406c61d4bc9d0153335c6b7840285793eac2
|
refs/heads/master
| 2021-09-13T18:38:49.454348
| 2018-05-03T06:56:56
| 2018-05-03T06:56:56
| 92,377,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,211
|
r
|
Titanic.R
|
library(ggplot2)
library(stringr)
setwd("C:/Users/DELL/Desktop/Projects/Predicting-Titanic-Survivors-Using-Title---new-approach-")
#Loading raw data
train <- read.csv("train_titanic.csv")
test <- read.csv("test_titanic.csv")
#Adding "survived" variable to the test set to allow combining dataset
test.survived <- data.frame(Survived = rep("None", nrow=(test)),test[,])
test.survived
data.combined <- rbind(train, test.survived)
#Data type of dataset
str(data.combined)
#Converting datatype to factor
data.combined$Survived <- as.factor(data.combined$Survived)
data.combined$Pclass <- as.factor(data.combined$Pclass)
data.combined$Sex <- as.factor(data.combined$Sex)
#survival rates
table(data.combined$Survived)
str(train)
#Survival rate as per class
train$Pclass <- as.factor(train$Pclass)
train$Survived <- as.factor(train$Survived)
ggplot(train, aes(x = Pclass, fill = train$Survived)) +
geom_bar(width = 0.5) +
xlab("Pclass") +
ylab("Total count") +
labs(fill = "Survived")
#Coverting "Names" to character
train$Name <- as.character(train$Name)
#To check unique names in training data set
length(unique(as.character(data.combined$Name)))
#Get duplicate names and store them as a vector
dup.names <- as.character(data.combined[which(duplicated(as.character(data.combined$Name))), "Name"])
dup.names
#To check if title has any correlation with other variables
misses <- data.combined[which(str_detect(data.combined$Name, "Miss")),]
misses
mrses <- data.combined[which(str_detect(data.combined$Name, "Mrs")),]
mrses
mres <- data.combined[which(str_detect(data.combined$Name, "Mr")),]
mres
masters <- data.combined[which(str_detect(data.combined$Name, "Master")),]
masters
#Create function to extract titles
titlecreator <- function(Name) {
Name <- as.character(Name)
if (length(grep("Miss", Name)) > 0) {
return("Miss")
} else if (length(grep("Mrs", Name)) > 0) {
return("Mrs")
} else if (length(grep("Master", Name)) > 0) {
return("Master")
} else if (length(grep("Mr", Name)) > 0) {
return("Mr")
} else {
return("Other")
}
}
Titles <- NULL
for(i in 1:nrow(data.combined)) {
Titles <- c(Titles, titlecreator(data.combined[i,"Name"]))
}
data.combined$Title <- as.factor(Titles)
# To check survival rate with titles
ggplot(data.combined[1:891,], aes(x = Title, fill = Survived)) +
geom_bar(width = 0.5) +
facet_wrap(~Pclass) +
ggtitle("Pclass") +
xlab("Title") +
ylab("Total count") +
labs(fill = "Survived")
#Distribution of males and females in dataset
table(data.combined$Sex)
#Visualize 3 way relation between Sex, Class and survival rate
ggplot(data.combined[1:891,], aes(x = Sex, fill = Survived)) +
geom_bar(width = 0.5) +
facet_wrap(~Pclass) +
ggtitle("Pclass") +
xlab("Sex") +
ylab("Total count") +
labs(fill = "Survived")
#Females have higher survival rate than males
#Relation between Sex, Class and survival rate
ggplot(data.combined[1:891,], aes(x = Age, fill = Survived)) +
facet_wrap(~Sex + ~Pclass) +
geom_histogram(binwidth = 10)+
xlab("Age") +
ylab("Total count")
#Distribution of age over entire dataset
summary(data.combined$Age)
#To see which title has maximum Na's in age
summary(misses$Age)
summary(masters$Age)
summary(mres$Age) #highest no. of NA's
summary(mrses$Age)
#Relation between Sex and survival rate for titles = "misses"
ggplot(misses[misses$Survived != "None",], aes(x = Age, fill = Survived)) +
facet_wrap(~Pclass) +
geom_histogram(binwidth = 5)+
xlab("Age") +
ylab("Total count")
# Exploring sibsp variable
summary(data.combined$SibSp)
#Converting sibsp to factor
data.combined$SibSp <- as.factor(data.combined$SibSp)
#Relation between Sibsp, Class and survival rate
ggplot(data.combined[1:891,], aes(x = SibSp, fill = Survived)) +
stat_count(width = 0.5) +
facet_wrap(~Pclass + Title) +
ggtitle("Pclass, Title" ) +
xlab("SibSp") +
ylab("Total count") +
ylim(0,300) +
labs(fill = "Survived")
#Title is difinetly a strong predictor
# Exploring Parch variable
summary(data.combined$Parch)
#Converting Parch to factor
data.combined$Parch <- as.factor(data.combined$Parch)
#Relation between Parch, Class and survival rate
ggplot(data.combined[1:891,], aes(x = Parch, fill = Survived)) +
stat_count(width = 0.5) +
facet_wrap(~Pclass + Title) +
ggtitle("Pclass, Title" ) +
xlab("Parch") +
ylab("Total count") +
ylim(0,300) +
labs(fill = "Survived")
#Creating a family size feature
temp.SibSp <- c(train$SibSp, test$SibSp)
temp.Parch <- c(train$Parch, test$Parch)
data.combined$family.size <- as.factor(temp.SibSp + temp.Parch + 1)
#Relation between Family Size, Class and survival rate
ggplot(data.combined[1:891,], aes(x = family.size, fill = Survived)) +
stat_count(width = 0.5) +
facet_wrap(~Pclass + Title) +
ggtitle("Pclass, Title" ) +
xlab("Family Size") +
ylab("Total count") +
ylim(0,300) +
labs(fill = "Survived")
# Exploring Fares variable
summary(data.combined$Fare)
str(data.combined$Fare)
#Visualizing fare
ggplot(data.combined, aes(x = Fare)) +
geom_histogram(binwidth = 5) +
ggtitle("Fare Distribution") +
xlab("Fare") +
ylab("Total Count") +
ylim(0,300)
#Relation between Fare, Class and survival rate
ggplot(data.combined[1:891,], aes(x = Fare, fill = Survived)) +
stat_count(width = 0.5) +
facet_wrap(~Pclass + Title) +
ggtitle("Pclass, Title" ) +
xlab("Fare") +
ylab("Total count") +
ylim(0,300) +
labs(fill = "Survived")
# Exploring Embarked variable
summary(data.combined$Embarked)
str(data.combined$Embarked)
#Relation between Embarked, Class and survival rate
ggplot(data.combined[1:891,], aes(x = Embarked, fill = Survived)) +
geom_bar() +
facet_wrap(~Pclass + Title) +
ggtitle("Pclass, Title" ) +
xlab("Embarked") +
ylab("Total count") +
ylim(0,300) +
labs(fill = "Survived")
######################################################################################
#Prdictive Model
#Random Forst
library(randomForest)
#Model1 = Train set with only Pclass and Title
rf.train1 <- data.combined[1:891, c("Pclass", "Title")]
rf.label <- as.factor(train$Survived)
set.seed(1234)
rf.1 <- randomForest(x = rf.train1, y = rf.label, importance = T, ntree = 1000)
rf.1 # 79.01% accracy
varImpPlot(rf.1) #Title is way stronger predictor than Pclass
#Model2 = Train set with only Pclass, SibSp and Title
rf.train2 <- data.combined[1:891, c("Pclass", "Title", "SibSp")]
set.seed(1234)
rf.2 <- randomForest(x = rf.train2, y = rf.label, importance = T, ntree = 1000)
rf.2 # 80.07% accracy
varImpPlot(rf.2) #Title is way stronger predictor than Pclass
#Model3 = Train set with only Pclass, SibSp, Parch and Title
rf.train3 <- data.combined[1:891, c("Pclass", "Title", "SibSp", "Parch")]
set.seed(1234)
rf.3 <- randomForest(x = rf.train3, y = rf.label, importance = T, ntree = 1000)
rf.3 # 80.92% accracy
varImpPlot(rf.3) #Title is way stronger predictor than Pclass
#Model4 = Train set with only Pclass, Family Size and Title
rf.train4 <- data.combined[1:891, c("Pclass", "Title", "family.size")]
set.seed(1234)
rf.4 <- randomForest(x = rf.train4, y = rf.label, importance = T, ntree = 1000)
rf.4 # 81.82% accracy
varImpPlot(rf.4) #Family Size is a stronger predictor than Parch and SibSp
#Model5 = Train set with only Pclass, Family Size, Fare and Title
rf.train5 <- data.combined[1:891, c("Pclass", "Title", "family.size", "Fare")]
set.seed(1234)
rf.5 <- randomForest(x = rf.train5, y = rf.label, importance = T, ntree = 1000)
rf.5 # 83.39% accracy
varImpPlot(rf.5) #Combination of Family Size and Fare brings better accuracy
#Model6 = Train set with only Pclass, Family Size, Fare, Embarked and Title
rf.train6 <- data.combined[1:891, c("Pclass", "Title", "family.size", "Fare", "Embarked")]
set.seed(1234)
rf.6 <- randomForest(x = rf.train6, y = rf.label, importance = T, ntree = 1000)
rf.6 # 81.37% accracy
varImpPlot(rf.6) #Embarked brings down accuracy, hence mot required
# Best Randon Forest model is Model 4 - combination of Pclass, Family Size, Fare and Title
# Our feature engineered variable "Tilte" is strongest predictor of Titanic Passengers survival rate!
|
21569854b8de6640e4dc07687beaabeeaaab9b4f
|
c70a288ec70b52086bacc4653c9433d8650dba5b
|
/Oregon_coho/code/Siletz_model_runs.R
|
237508acc7bff3e3484cf756e55ba922df62f2c1
|
[] |
no_license
|
merrillrudd/VAST_SN
|
b6ac33749f7788d3b3346b29ad04eefcb8cba418
|
0d7bdf06587528b9889f10e99dbaa3c8cb3ff629
|
refs/heads/master
| 2020-12-20T14:04:00.320406
| 2020-07-07T22:49:42
| 2020-07-07T22:49:42
| 236,097,842
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 116,794
|
r
|
Siletz_model_runs.R
|
rm(list=ls())
#devtools::install_local( "C:/Users/James.Thorson/Desktop/Git/FishStatsUtils", force=TRUE, dep=FALSE )
# library(VAST)
# devtools::load_all("C:\\merrill\\FishStatsUtils")
devtools::load_all("C:\\merrill\\DHARMa\\DHARMa")
devtools::load_all("C://merrill/TMB_contrib_R/TMBhelper")
library(VAST)
devtools::load_all("C:\\merrill\\FishStatsUtils")
devtools::load_all("C:\\merrill\\VASTPlotUtils")
# sil_dir <- "~/Projects/Spatiotemporal/VAST_SN/Oregon_coho/Siletz"
sil_dir <- "C:/merrill/VAST_SN/Oregon_coho/Siletz"
# jim_dir <- "C:/Users/James.Thorson/Desktop/Work files/Collaborations/2018 -- Rudd stream network/2020-06-01"
# load(file.path(jim_dir, "general_inputs.Rdata"))
load(file.path(sil_dir, "general_inputs.Rdata"))
# path <- file.path(jim_dir, "V2")
#############
## IID
#############
path <- file.path(sil_dir, "multivar_landcover_IID_dist5")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
# ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
# ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
# ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"="IID", "Epsilon2"="IID")
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=0)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
Par <- fit1$ParHat
Map <- fit1$tmb_list$Map
Map$logSigmaM = factor( c(1,NA,2,3,NA,NA) )
# Reduced model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE) #,
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=0,
test_fit = FALSE) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
plotQQunif( Plots$dharmaRes )
#############
## IID
#############
path <- file.path(sil_dir, "multivar_landcover_IID_dist5_RWEps")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"="IID", "Epsilon2"="IID")
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=2)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
Map$logSigmaM <- factor(c(1,2,3,NA,NA,NA))
Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = NA
Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=3,
test_fit = FALSE,
getJointPrescision = TRUE) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
Zlim = c(min(dens),max(dens))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
plotQQunif( Plots$dharmaRes )
#################
## Factor
##################
path <- file.path(sil_dir, "multivar_landcover_dist5")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=0)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel, Variance parameters for juvenile positive catch rates, habitat covariate effects on zero-inflated probability (fixed to zero)
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
Map$logSigmaM = factor( c(1,NA,2,NA,NA,NA) )
Map$gamma1_ctp[which(Map$gamma1_ctp == 1)] = NA
Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=0,
test_fit = FALSE) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
png(file.path(fig, "Diagnostic_figure.png"), height = 600, width = 1000)
par(mfrow = c(1,2))
plotQQunif(dharmaRes)
testDispersion(dharmaRes)
dev.off()
###########
path <- file.path(sil_dir, "multivar_landcover_dist11")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=0)
ObsModel = c("PosDist"=11, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
# Map$logSigmaM = factor( c(1,NA,2,NA,NA,NA) )
# Map$gamma1_ctp[which(Map$gamma1_ctp == 1)] = NA
# Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
# Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=0,
test_fit = FALSE) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
##################
path <- file.path(sil_dir, "multivar_landcover_dist5_RW")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=2, "Epsilon1"=0, "Epsilon2"=0)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
Map$logSigmaM = factor( c(1,NA,2,NA,NA,NA) )
Map$gamma1_ctp[which(Map$gamma1_ctp == 1)] = NA
Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=3,
test_fit = FALSE) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
##################
path <- file.path(sil_dir, "multivar_landcover_dist5_RWEps")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=2)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
Map$logSigmaM = factor( c(1,NA,2,NA,NA,NA) )
Map$gamma1_ctp[which(Map$gamma1_ctp == 1)] = NA
Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=3,
test_fit = FALSE) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
Report <- fit$Report
Sdreport <- fit$parameter_estimates$SD
TmbData <- fit$data_list
Data <- fit$data_list
ParHat <- fit$ParHat
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(14), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
Res <- Data %>% mutate(Residuals = dharmaRes$scaledResiduals)
p <- ggplot(Res) +
geom_point(data = Network_sz_LL, aes(x = Lon, y = Lat), color = "gray", alpha = 0.5, cex = 0.5) +
geom_point(aes(x = Lon, y = Lat, fill = Residuals, shape = Category), cex = 3, alpha = 0.8) +
scale_fill_distiller(palette = "Spectral") +
scale_shape_manual(values = c(24,21)) +
xlab("Longitude") + ylab("Latitude") +
facet_wrap(~Year) +
theme_bw(base_size = 14)
ggsave(file.path(fig, "Scaled_residuals_on_map.png"), p, height = 10, width = 12)
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
hist(dharmaRes)
png(file.path(fig, "Hist.png"), height = 600, width = 600)
testDispersion(dharmaRes)
dev.off()
png(file.path(fig, "Diagnostic_figure.png"), height = 600, width = 1000)
par(mfrow = c(1,2))
plotQQunif(dharmaRes)
testDispersion(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
##################
path <- file.path(sil_dir, "multivar_dist5_RWEps")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=2)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
# X_gtp = X_gtp_all,
# X_itp = X_itp_all,
# Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
Map$logSigmaM = factor(rep(NA,length(Map$logSigmaM)) #factor( c(1,NA,2,NA,NA,NA) )
# Map$gamma1_ctp[which(Map$gamma1_ctp == 1)] = NA
# Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
# Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
# X_gtp = X_gtp_all,
# X_itp = X_itp_all,
# Xconfig_zcp = Xconfig_all2,
# # Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
# X_gtp = X_gtp_all,
# X_itp = X_itp_all,
# Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=3,
test_fit = FALSE) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
Report <- fit$Report
Sdreport <- fit$parameter_estimates$SD
TmbData <- fit$data_list
Data <- fit$data_list
ParHat <- fit$ParHat
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(14), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
hist(dharmaRes)
png(file.path(fig, "Hist.png"), height = 600, width = 600)
testDispersion(dharmaRes)
dev.off()
png(file.path(fig, "Diagnostic_figure.png"), height = 600, width = 1000)
par(mfrow = c(1,2))
plotQQunif(dharmaRes)
testDispersion(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
##################
path <- file.path(sil_dir, "juveniles_landcover_dist5_RWEps")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count_juv
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=1, "Epsilon2"=1)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=2)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=rep(0,nrow(Data)),
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_juv,
X_itp = X_itp_juv,
Xconfig_zcp = Xconfig_juv2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
# Map$logSigmaM = factor( c(1,NA,2,NA,NA,NA) )
Map$logSigmaM <- factor(c(1,NA,NA))
# Map$gamma1_ctp[which(Map$gamma1_ctp == 1)] = NA
# Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
# Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=rep(0,nrow(Data)),
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_juv,
X_itp = X_itp_juv,
Xconfig_zcp = Xconfig_juv2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=rep(0,nrow(Data)),
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_juv,
X_itp = X_itp_juv,
Xconfig_zcp = Xconfig_juv2,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=3,
test_fit = FALSE) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
Res <- Data %>% mutate(Residuals = dharmaRes$scaledResiduals)
p <- ggplot(Res) +
geom_point(data = Network_sz_LL, aes(x = Lon, y = Lat), color = "gray", alpha = 0.5, cex = 0.5) +
geom_point(aes(x = Lon, y = Lat, fill = Residuals), cex = 3, pch = 24, alpha = 0.8) +
scale_fill_distiller(palette = "Spectral") +
# scale_shape_manual(values = c(24,21)) +
xlab("Longitude") + ylab("Latitude") +
facet_wrap(~Year) +
theme_bw(base_size = 14)
ggsave(file.path(fig, "Scaled_residuals_on_map.png"), p, height = 10, width = 12)
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
hist(dharmaRes)
png(file.path(fig, "Hist.png"), height = 600, width = 600)
testDispersion(dharmaRes)
dev.off()
png(file.path(fig, "Diagnostic_figure.png"), height = 600, width = 1000)
par(mfrow = c(1,2))
plotQQunif(dharmaRes)
testDispersion(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
##################
path <- file.path(sil_dir, "multivar_landcover_dist5_v2")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=0)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
Map$logSigmaM = factor( c(1,NA,2,NA,NA,NA) )
Map$gamma1_ctp[which(Map$gamma1_ctp == 1)[1:10]] = NA
# Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
test_fit = FALSE,
optimize_args = list(startpar = fit1$parameter_estimates$par)) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
##################
path <- file.path(sil_dir, "multivar_landcover_dist7")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=0)
ObsModel = c("PosDist"=7, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
# Map$logSigmaM = factor( c(1,NA,2,NA,NA,NA) )
# Map$gamma1_ctp[which(Map$gamma1_ctp == 1)[1]] = NA
# # Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
# Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
test_fit = FALSE,
optimize_args = list(startpar = fit1$parameter_estimates$par)) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
##################
path <- file.path(sil_dir, "multivar_landcover_dist2")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_dens
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=2, "Epsilon1"=2, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=1, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=0)
ObsModel = c("PosDist"=2, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
Map$logSigmaM = factor( c(1,NA,NA,NA,NA,NA) )
# Map$gamma1_ctp[which(Map$gamma1_ctp == 1)[1]] = NA
# # Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
# Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
test_fit = FALSE,
optimize_args = list(startpar = fit1$parameter_estimates$par)) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
### habsurvey
path <- file.path(sil_dir, "multivar_habsurvey_dist5")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
Data <- Data_count
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=0)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all3,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
Map$logSigmaM = factor( c(NA,NA,NA,NA,NA,NA) )
Map$gamma1_ctp <- factor(rep(NA,length(Map$gamma1_ctp)))
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all3,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_all,
Xconfig_zcp = Xconfig_all3,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=0,
test_fit = FALSE) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
df <- data.frame("Model" = c("multivar_landcover_dist11",
"multivar_landcover_dist5",
"multivar_landcover_IID_dist5",
"multivar_landcover_dist7",
"multivar_landcover_dist5_RW",
"multivar_landcover_dist5_RWEps",
"multivar_landcover_dist5_v2",
"multivar_landcover_dist2",
"multivar_habsurvey_dist5"))
df$AIC <- NULL
for(i in 1:nrow(df)){
res <- readRDS(file.path(sil_dir, df[i,"Model"], "Fit.rds"))
aic <- as.numeric(res$parameter_estimates$AIC)
df[i,"AIC"] <- aic
}
df$dAIC <- sapply(1:nrow(df), function(x) df[x,"AIC"] - min(df[,"AIC"]))
df[order(df$dAIC),]
## remove last year of juveniles
#################
## Factor
##################
path <- file.path(sil_dir, "multivar_landcover_dist5_RWEps_rm")
# unlink(path, TRUE)
dir.create(path, showWarnings = FALSE)
setwd(path)
fig <- file.path(path, "figures")
dir.create(fig, showWarnings=FALSE)
#
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.cpp"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.dll"), to = path)
ignore <- file.copy(from = file.path(sil_dir, "VAST_v8_2_0.o"), to = path)
index <- which(Data_count$Category == "Juveniles" & Data_count$Year == 2017)
Data <- Data_count[-index,]
X_itp_inp <- X_itp_all[-index,,]
## turn on spatial and spatiotemporal effects
## two factors -- one for each category (spawners and juveniles)
FieldConfig = c("Omega1"=0, "Epsilon1"=0, "Omega2"=2, "Epsilon2"=2)
## random walk structure on temporal intercepts and spatiotemporal random effect
## not much information for juveniles, model needs a little more structure to converge
RhoConfig = c("Beta1"=3, "Beta2"=1, "Epsilon1"=0, "Epsilon2"=2)
ObsModel = c("PosDist"=5, "Link"=0)
## other options
OverdispersionConfig = c("Eta1"=0, "Eta2"=0)
Options = c("Calculate_Range"=1,
"Calculate_effective_area"=1)
## wrapper function to set up common settings
settings <- make_settings( Version = "VAST_v8_2_0",
n_x = nrow(Network_sz),
Region = "Stream_network",
FieldConfig=FieldConfig,
RhoConfig=RhoConfig,
OverdispersionConfig=OverdispersionConfig,
Options=Options,
ObsModel=ObsModel,
purpose = "index2",
fine_scale=FALSE,
bias.correct=FALSE)
settings$Method <- "Stream_network"
settings$grid_size_km <- 1
# compile model and check parameters
fit0 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
working_dir=path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
run_model = FALSE,
X_gtp = X_gtp_all,
X_itp = X_itp_inp,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
test_fit = FALSE)
# CompileDir = jim_dir)
Par <- fit0$tmb_list$Parameters
Map <- fit0$tmb_list$Map
# Map$beta1_ft <- factor(rep(NA, length(Map$beta1_ft)))
# Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
Map$logSigmaM = factor( c(NA,NA,NA,NA,NA,NA) )
Map$gamma1_ctp <- factor(rep(NA, length(Map$gamma1_ctp)))
# Map$gamma1_ctp[which(Map$gamma1_ctp == 1)] = NA
# Map$gamma1_ctp[which(Map$gamma1_ctp == 2)] = 1
# Map$gamma1_ctp <- factor(Map$gamma1_ctp)
# first model run
fit1 = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
X_gtp = X_gtp_all,
X_itp = X_itp_inp,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=FALSE,
newtonsteps=0,
test_fit = FALSE)
# CompileDir = jim_dir)
check <- TMBhelper::Check_Identifiable(fit1$tmb_list$Obj)
# Reduced model run
fit = fit_model( "settings"=settings,
"Lat_i"=Data[,"Lat"],
"Lon_i"=Data[,"Lon"],
"t_i"=Data[,'Year'],
"c_i"=as.numeric(Data[,"CategoryNum"]) - 1,
"b_i"=Data[,'Catch_KG'],
"a_i"=Data[,'AreaSwept_km2'],
"v_i"=Data[,'Vessel'],
working_dir = path,
input_grid=cbind("Lat"=Data[,"Lat"], "Lon"=Data[,"Lon"],"child_i"=Data[,"Knot"],"Area_km2"=Data[,"AreaSwept_km2"]),
Network_sz_LL=Network_sz_LL,
Network_sz = Network_sz,
Map = Map,
Parameters = Par,
X_gtp = X_gtp_all,
X_itp = X_itp_inp,
Xconfig_zcp = Xconfig_all2,
# Q_ik = Q_ik,
getsd=TRUE,
newtonsteps=3,
test_fit = FALSE,
optimize_args = list(startpar = fit1$parameter_estimates$par)) #,
# CompileDir = jim_dir)
## save model fit
saveRDS(fit, file.path(path, "Fit.rds"))
## load model fit
fit <- readRDS(file.path(path, "Fit.rds"))
dens <- quantile(log(fit$Report$D_gcy))
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5, Zlim = c(min(dens),max(dens)))
VASTPlotUtils::plot_maps(plot_set = c(7), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(5), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.5)
VASTPlotUtils::plot_maps(plot_set = c(3), fit = fit, Sdreport = fit$parameter_estimates$SD, TmbData = fit$data_list, spatial_list = fit$spatial_list, DirName = fig, category_names = c("Spawners", "Juveniles"), cex = 0.75, Panel = "Year", Zlim = c(min(dens),max(dens)))
## plot effective area occupied and center of gravity
VASTPlotUtils::plot_range_index(Report = fit$Report, TmbData = fit$data_list, Sdreport = fit$parameter_estimates$SD, Znames = colnames(fit$data_list$Z_xm), PlotDir = fig, Year_Set = fit$year_labels, use_biascorr = TRUE, category_names = c("Spawners", "Juveniles"))
VASTPlotUtils::plot_biomass_index(fit = fit, Sdreport = fit$parameter_estimates$SD, DirName = fig, category_names = c("Spawners", "Juveniles"), add = spawn_info, Plot_suffix = "Count", interval_width = 1.96)
dharmaRes = summary( fit, what="residuals")
png(file.path(fig, "DHARMa_res.png"), height = 600, width = 900)
plot(dharmaRes, quantreg = TRUE)
dev.off()
# Various potential plots
png(file.path(fig, "QQplot.png"), height = 600, width = 600)
plotQQunif(dharmaRes)
dev.off()
Plots = plot(fit,
working_dir=paste0(path,"/figures/"),
land_color=rgb(0,0,0,0),
quantreg=TRUE )
######################################
### Manuscript figures
######################################
## Network
net <- ggplot(Network_sz_LL_info, aes(x = Lon, y = Lat)) +
geom_point(aes(fill = Network, cex = Network), color = gray(0.9), pch = 21, alpha = 0.6) +
xlab("Longitude") + ylab("Latitude") +
scale_fill_manual(values = c("gray", "goldenrod")) +
# guides(fill = guide_legend(title = "")) +
theme_bw(base_size = 14)
## locations for arrows
l2 <- lapply(1:nrow(Network_sz_LL), function(x){
parent <- Network_sz_LL$parent_s[x]
find <- Network_sz_LL %>% filter(child_s == parent)
if(nrow(find)>0) out <- cbind.data.frame(Network_sz_LL[x,], 'Lon2'=find$Lon, 'Lat2'=find$Lat)
if(nrow(find)==0) out <- cbind.data.frame(Network_sz_LL[x,], 'Lon2'=NA, 'Lat2'=NA)
return(out)
})
l2 <- do.call(rbind, l2)
net <- net + geom_segment(data=l2, aes(x = Lon,y = Lat, xend = Lon2, yend = Lat2), arrow=arrow(length=unit(0.2,"cm")), col="gray")
ggsave(file.path(fig_dir, "Network.png"), net, width = 8, height = 6)
## network with observations
net_wObs <- net +
geom_point(data = Data_dens, aes(color = Category), cex = 2) +
scale_color_brewer(palette = "Set1") +
guides(color = guide_legend(title = "Survey location"))
ggsave(file.path(fig_dir, "Survey_locs.png"), net_wObs, height = 6, width = 8)
colors <- RColorBrewer::brewer.pal(3, "Set1")
pspawn <- ggplot() +
geom_point(data = Network_sz_LL, aes(x = Lon, y = Lat), color = "gray", cex = 1, alpha = 0.5) +
geom_point(data = Data_dens %>% filter(Category == "Spawners"), aes(x = Lon, y = Lat, size = Catch_KG, color = Category), alpha = 0.6) +
facet_wrap(.~Year) +
scale_color_manual(values = colors[2]) +
theme_bw(base_size = 14) +
ggtitle("Observed spawner density") +
guides(size = guide_legend(title = "Coho per km"), color = FALSE)
ggsave(file.path(fig_dir, "Spawner_Density_byYear.png"), pspawn, height = 12, width = 14)
pjuv <- ggplot() +
geom_point(data = Network_sz_LL, aes(x = Lon, y = Lat), color = "gray", cex = 1, alpha = 0.5) +
geom_point(data = Data_dens %>% filter(Category == "Juveniles"), aes(x = Lon, y = Lat, size = Catch_KG, color = Category), alpha = 0.6) +
facet_wrap(.~Year) +
scale_color_manual(values = colors[1]) +
theme_bw(base_size = 14) +
ggtitle("Observed juvenile density") +
guides(size = guide_legend(title = "Coho per km"), color = FALSE)
ggsave(file.path(fig_dir, "Juvenile_Density_byYear.png"), pjuv, height = 12, width = 14)
pobs <- ggplot() +
geom_point(data = Network_sz_LL, aes(x = Lon, y = Lat), color = "gray", cex = 1, alpha = 0.5) +
geom_point(data = Data_dens, aes(x = Lon, y = Lat, size = Catch_KG, color = Category), alpha = 0.6) +
facet_wrap(.~Year) +
scale_color_brewer(palette = "Set1") +
theme_bw(base_size = 14) +
ggtitle("Observed density") +
guides(size = guide_legend(title = "Coho per km")) +
scale_x_continuous(breaks = as.numeric(quantile(round(Data_dens$Lon,1),prob=c(0.05,0.5,0.99)))) +
xlab("Longitude") + ylab("Latitude")
ggsave(file.path(fig_dir, "Observed_density_byYear.png"), pobs, height = 9, width = 10)
### Results
library(tidyverse)
base <- readRDS(file.path(sil_dir, "multivar_landcover_dist5_RWEps", "Fit.rds"))
iid <- readRDS(file.path(sil_dir, "multivar_landcover_IID_dist5", "Fit.rds"))
juv <- readRDS(file.path(sil_dir, "juveniles_landcover_dist5_RWEps", "Fit.rds"))
## compare maps
dens_byModel <- lapply(1:3, function(x){
if(x == 1){
Report <- base$Report
year_labels = base$year_labels
years_to_plot = base$years_to_plot
spatial_list <- base$spatial_list
name <- "Multivariate factor analysis"
}
if(x == 2){
Report <- iid$Report
year_labels = iid$year_labels
years_to_plot = iid$years_to_plot
spatial_list <- iid$spatial_list
name <- "Independent"
}
if(x == 3){
Report <- juv$Report
year_labels = juv$year_labels
years_to_plot = juv$years_to_plot
spatial_list <- juv$spatial_list
name <- "Juvenile survey only"
}
Array_xct = log(Report$D_gcy)
if(x %in% c(1:2)) dimnames(Array_xct) <- list(Node = 1:dim(Array_xct)[1], Category = c("Spawners","Juveniles"), Year = year_labels)
if(x == 3) dimnames(Array_xct) <- list(Node = 1:dim(Array_xct)[1], Category = c("Juveniles"), Year = year_labels)
xct <- reshape2::melt(Array_xct) %>% mutate(Model = name)
xctll <- full_join(xct, cbind.data.frame("Node" = 1:spatial_list$n_g,spatial_list$latlon_g))
return(xctll)
})
dens <- do.call(rbind, dens_byModel)
plot_dens <- dens #%>% filter(Year %in% c(1997,2007,2017))
plot_dens$value <- as.numeric(plot_dens$value)
p <- ggplot(plot_dens %>% filter(Model == "Multivariate factor analysis") %>% filter(Category == "Spawners")) +
geom_point(aes(x = Lon, y = Lat, color = value), cex = 1.5, alpha = 0.75) +
scale_color_distiller(palette = "Spectral") +
# scale_color_viridis_c() +
facet_wrap(Year ~ .) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="log(Coho per km)")) +
ggtitle("Spawner log-density") +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Spawner_density_base.png"), p, height = 12, width = 15)
p <- ggplot(plot_dens %>% filter(Model == "Multivariate factor analysis") %>% filter(Category == "Juveniles")) +
geom_point(aes(x = Lon, y = Lat, color = value), cex = 1.5, alpha = 0.75) +
scale_color_distiller(palette = "Spectral") +
facet_wrap(Year ~ .) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="log(Coho per km)")) +
ggtitle("Juvenile log-density") +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Juvenile_density_base.png"), p, height = 12, width = 15)
plot_both <- plot_dens %>% filter(Model == "Multivariate factor analysis") %>% filter(Year %in% seq(1997,2017,by=5))
p <- ggplot(plot_both) +
geom_point(data= plot_both %>% filter(Category == "Juveniles"), aes(x = Lon, y = Lat, color = value), cex = 2.5, alpha = 0.75) +
geom_point(data = plot_both %>% filter(Category == "Spawners"), aes(x = Lon, y = Lat, fill = value), cex = 3, alpha = 0.75, pch = 21, color = "white") +
scale_color_distiller(palette = "Spectral") +
scale_fill_distiller(palette = "Spectral") +
facet_grid(Year ~ Category) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="Juveniles"), fill=guide_colourbar(title="Spawners")) +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Density_sub_base.png"), p, height = 15, width = 10)
plot_both <- plot_dens %>% filter(Model == "Multivariate factor analysis")
p <- ggplot(plot_both) +
geom_point(aes(x = Lon, y = Lat, color = value), alpha = 0.75) +
# geom_point(data = hab_df %>% filter(variable == "land_cover") %>% filter(grepl("Developed", value)), aes(x = Lon, y = Lat), pch = 1, stroke = 1.2) +
# geom_point(data = Data_dens, aes(x = Lon, y = Lat), alpha = 0.75, pch = 1, stroke = 1.2) +
# geom_point(data= plot_both %>% filter(Category == "Juveniles"), aes(x = Lon, y = Lat, color = value), cex = 1, alpha = 0.75) +
# geom_point(data = plot_both %>% filter(Category == "Spawners"), aes(x = Lon, y = Lat, fill = value), cex = 1.5, alpha = 0.75, pch = 21) +
scale_color_distiller(palette = "Spectral") +
scale_fill_distiller(palette = "Spectral") +
facet_wrap(Year ~ Category, ncol = 6) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="log(Coho per km)")) +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Density_compare_base.png"), p, height = 18, width = 15)
plot_both <- plot_dens %>% filter(Model == "Independent")
p <- ggplot(plot_both) +
geom_point(aes(x = Lon, y = Lat, color = value), alpha = 0.75) +
# geom_point(data = Data_dens, aes(x = Lon, y = Lat), alpha = 0.75, pch = 1, stroke = 1.2) +
# geom_point(data= plot_both %>% filter(Category == "Juveniles"), aes(x = Lon, y = Lat, color = value), cex = 1, alpha = 0.75) +
# geom_point(data = plot_both %>% filter(Category == "Spawners"), aes(x = Lon, y = Lat, fill = value), cex = 1.5, alpha = 0.75, pch = 21) +
scale_color_distiller(palette = "Spectral") +
scale_fill_distiller(palette = "Spectral") +
facet_wrap(Year ~ Category, ncol = 6) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="log(Coho per km)")) +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Density_compare_IID.png"), p, height = 18, width = 15)
plot_both <- plot_dens %>% filter(Model != "Independent") %>% filter(Year %in% c(1997,2005,2017))
p <- ggplot(plot_both) +
geom_point(aes(x = Lon, y = Lat, color = value), cex = 3, alpha = 0.75) +
# geom_point(data = Data_dens, aes(x = Lon, y = Lat), alpha = 0.75, pch = 1, stroke = 1.2) +
# geom_point(data= plot_both %>% filter(Category == "Juveniles"), aes(x = Lon, y = Lat, color = value), cex = 1, alpha = 0.75) +
# geom_point(data = plot_both %>% filter(Category == "Spawners"), aes(x = Lon, y = Lat, fill = value), cex = 1.5, alpha = 0.75, pch = 21) +
scale_color_distiller(palette = "Spectral") +
scale_fill_distiller(palette = "Spectral") +
facet_grid(Year ~ Model) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="log(Coho per km)")) +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Density_compare_Juv.png"), p, height = 18, width = 15)
## covariate impact
covar <- lapply(1, function(x){
if(x == 1){
Report <- base$Report
year_labels = base$year_labels
years_to_plot = base$years_to_plot
spatial_list <- base$spatial_list
name <- "Multivariate factor analysis"
}
Array_xct = Report$eta2_gct
dimnames(Array_xct) <- list(Node = 1:dim(Array_xct)[1], Category = c("Spawners","Juveniles"), Year = year_labels)
xct <- reshape2::melt(Array_xct) %>% mutate(Model = name)
xctll <- full_join(xct, cbind.data.frame("Node" = 1:spatial_list$n_g,spatial_list$latlon_g))
return(xctll)
})
covar <- do.call(rbind, covar)
hab_sub <- hab_df %>% filter(variable == 'land_cover')
covar_sub <- covar %>%
filter(Model == "Multivariate factor analysis") %>%
filter(Category == "Juveniles") %>% filter(Year == max(Year)) %>%
rename(child_s = Node) %>%
select(-c(Category, Year, Lat, Lon)) %>%
rename(Impact = value)
hab_plot <- full_join(hab_sub, covar_sub)
library(ggthemes)
p <- ggplot(hab_plot) +
geom_point(aes(x = Lon, y = Lat, fill = value, size = -Impact), pch = 21, alpha = 0.75) +
scale_fill_brewer(palette = "Set1") +
xlab("Longitude") + ylab("Latitude") +
guides(fill=guide_legend(title="Land cover"), size=guide_legend(title = "Juvenile covariate impact")) +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Juv_covar_base.png"), p, height = 5, width = 8)
## epsilon
eps_byModel <- lapply(1:3, function(x){
if(x == 1){
Report <- base$Report
year_labels = base$year_labels
years_to_plot = base$years_to_plot
spatial_list <- base$spatial_list
name <- "Multivariate factor analysis"
}
if(x == 2){
Report <- iid$Report
year_labels = iid$year_labels
years_to_plot = iid$years_to_plot
spatial_list <- iid$spatial_list
name <- "Independent"
}
if(x == 3){
Report <- iid$Report
year_labels = iid$year_labels
years_to_plot = iid$years_to_plot
spatial_list <- iid$spatial_list
name <- "Juvenile survey only"
}
Array_xct = Report$Epsilon2_gct
dimnames(Array_xct) <- list(Node = 1:dim(Array_xct)[1], Category = c("Spawners","Juveniles"), Year = year_labels)
xct <- reshape2::melt(Array_xct) %>% mutate(Model = name)
xctll <- full_join(xct, cbind.data.frame("Node" = 1:spatial_list$n_g,spatial_list$latlon_g))
return(xctll)
})
eps <- do.call(rbind, eps_byModel)
plot_eps <- eps #%>% filter(Year %in% c(1997,2007,2017))
plot_eps$value <- as.numeric(plot_eps$value)
p <- ggplot(plot_eps %>% filter(Model == "Multivariate factor analysis") %>% filter(Category == "Spawners")) +
geom_point(aes(x = Lon, y = Lat, color = abs(value)), cex = 1.5, alpha = 0.75) +
scale_color_distiller(palette = "Spectral") +
# scale_color_viridis_c() +
facet_wrap(Year ~ .) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="Variation")) +
ggtitle("Spawner spatiotemporal variation in abundance-density") +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Spawner_epsilon_base.png"), p, height = 12, width = 15)
p <- ggplot(plot_eps %>% filter(Model == "Multivariate factor analysis") %>% filter(Category == "Juveniles")) +
geom_point(aes(x = Lon, y = Lat, color = abs(value)), cex = 1.5, alpha = 0.75) +
scale_color_distiller(palette = "Spectral") +
facet_wrap(Year ~ .) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="Variation")) +
ggtitle("Juvenile spatiotemporal variation in abundance-density") +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Juvenile_epsilon_base.png"), p, height = 12, width = 15)
plot_both <- plot_eps %>% filter(Model == "Multivariate factor analysis") %>% filter(Year %in% seq(1997,2017,by=5))
p <- ggplot(plot_both) +
geom_point(data= plot_both %>% filter(Category == "Juveniles"), aes(x = Lon, y = Lat, color = abs(value)), cex = 2.5, alpha = 0.75) +
geom_point(data = plot_both %>% filter(Category == "Spawners"), aes(x = Lon, y = Lat, fill = abs(value)), cex = 3, alpha = 0.75, pch = 21, color = "white") +
scale_color_distiller(palette = "Spectral") +
scale_fill_distiller(palette = "Spectral") +
facet_grid(Year ~ Category) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="Juveniles"), fill=guide_colourbar(title="Spawners")) +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Epsilon_sub_base.png"), p, height = 15, width = 10)
plot_both <- plot_eps %>% filter(Model == "Multivariate factor analysis")
p <- ggplot(plot_both) +
geom_point(aes(x = Lon, y = Lat, color = abs(value)), alpha = 0.75) +
geom_point(data = Data_dens, aes(x = Lon, y = Lat), alpha = 0.75, pch = 1, stroke = 1.2) +
# geom_point(data= plot_both %>% filter(Category == "Juveniles"), aes(x = Lon, y = Lat, color = value), cex = 1, alpha = 0.75) +
# geom_point(data = plot_both %>% filter(Category == "Spawners"), aes(x = Lon, y = Lat, fill = value), cex = 1.5, alpha = 0.75, pch = 21) +
scale_color_distiller(palette = "Spectral") +
scale_fill_distiller(palette = "Spectral") +
facet_wrap(Year ~ Category, ncol = 6) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="Variation")) +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Epsilon_compare_base.png"), p, height = 18, width = 15)
plot_both <- plot_eps %>% filter(Model != "Independent") %>% filter(Year %in% c(1997,2005,2017))
p <- ggplot(plot_both) +
geom_point(aes(x = Lon, y = Lat, color = abs(value)), cex = 3, alpha = 0.75) +
# geom_point(data = Data_dens, aes(x = Lon, y = Lat), alpha = 0.75, pch = 1, stroke = 1.2) +
# geom_point(data= plot_both %>% filter(Category == "Juveniles"), aes(x = Lon, y = Lat, color = value), cex = 1, alpha = 0.75) +
# geom_point(data = plot_both %>% filter(Category == "Spawners"), aes(x = Lon, y = Lat, fill = value), cex = 1.5, alpha = 0.75, pch = 21) +
scale_color_distiller(palette = "Spectral") +
scale_fill_distiller(palette = "Spectral") +
facet_grid(Year ~ Model) +
xlab("Longitude") + ylab("Latitude") +
guides(color=guide_colourbar(title="Variation")) +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Epsilon_compare_Juv.png"), p, height = 18, width = 15)
## effective area occupied
eao_byModel <- lapply(1:3, function(x){
if(x == 1){
SD <- TMB::summary.sdreport(base$parameter_estimates$SD)
TmbData <- base$data_list
year_labels = base$year_labels
years_to_plot = base$years_to_plot
spatial_list <- base$spatial_list
name <- "Multivariate factor analysis"
}
if(x == 2){
SD <- TMB::summary.sdreport(iid$parameter_estimates$SD)
TmbData <- iid$data_list
year_labels = iid$year_labels
years_to_plot = iid$years_to_plot
spatial_list <- iid$spatial_list
name <- "Independent"
}
if(x == 3){
SD <- TMB::summary.sdreport(juv$parameter_estimates$SD)
TmbData <- juv$data_list
year_labels = juv$year_labels
years_to_plot = juv$years_to_plot
spatial_list <- juv$spatial_list
name <- "Juvenile survey only"
}
EffectiveName = "effective_area_cyl"
SD_effective_area_ctl = SD_log_effective_area_ctl = array( NA, dim=c(unlist(TmbData[c('n_c','n_t','n_l')]),2), dimnames=list(NULL,NULL,NULL,c('Estimate','Std. Error')) )
use_biascorr = TRUE
# Extract estimates
SD_effective_area_ctl = SD_log_effective_area_ctl = array( NA, dim=c(unlist(TmbData[c('n_c','n_t','n_l')]),2), dimnames=list(NULL,NULL,NULL,c('Estimate','Std. Error')) )
# Effective area
if( use_biascorr==TRUE && "unbiased"%in%names(SD) ){
SD_effective_area_ctl[] = SD[which(rownames(SD)==EffectiveName),c('Est. (bias.correct)','Std. Error')]
}
if( !any(is.na(SD_effective_area_ctl)) ){
message("Using bias-corrected estimates for effective area occupied (natural scale)...")
}else{
message("Not using bias-corrected estimates for effective area occupied (natural scale)...")
SD_effective_area_ctl[] = SD[which(rownames(SD)==EffectiveName),c('Estimate','Std. Error')]
}
# Log-Effective area
if( use_biascorr==TRUE && "unbiased"%in%names(SD) ){
SD_log_effective_area_ctl[] = SD[which(rownames(SD)==paste0("log_",EffectiveName)),c('Est. (bias.correct)','Std. Error')]
}
if( !any(is.na(SD_log_effective_area_ctl)) ){
message("Using bias-corrected estimates for effective area occupied (log scale)...")
}else{
message("Not using bias-corrected estimates for effective area occupied (log scale)...")
SD_log_effective_area_ctl[] = SD[which(rownames(SD)==paste0("log_",EffectiveName)),c('Estimate','Std. Error')]
}
Index_ctl=array(SD_log_effective_area_ctl[,,,'Estimate'],dim(SD_log_effective_area_ctl)[1:3])
if(x %in% c(1:2)) dimnames(Index_ctl) <- list(Category = c("Spawners","Juveniles"), Year = year_labels, Stratum = NA)
if(x==3)dimnames(Index_ctl) <- list(Category = c("Juveniles"), Year = year_labels, Stratum = NA)
sd_Index_ctl=array(SD_log_effective_area_ctl[,,,'Std. Error'],dim(SD_log_effective_area_ctl)[1:3])
if(x %in% c(1:2)) dimnames(sd_Index_ctl) <- list(Category = c("Spawners","Juveniles"), Year = year_labels, Stratum = NA)
if(x == 3) dimnames(sd_Index_ctl) <- list(Category = c("Juveniles"), Year = year_labels, Stratum = NA)
df1 <- reshape2::melt(Index_ctl) %>% rename("Estimate" = value)
df2 <- reshape2::melt(sd_Index_ctl) %>% rename("SD" = value)
df <- full_join(df1, df2) %>% mutate(Model = name)
return(df)
})
eao <- do.call(rbind, eao_byModel)
p <- ggplot(eao) +
geom_ribbon(aes(x = Year, ymin = Estimate - 1.96*SD, ymax = Estimate + 1.96*SD, fill = Model), alpha = 0.25) +
# geom_point(aes(x = Year, y = Estimate, color = Model), cex = 3) +
geom_line(aes(x = Year, y = Estimate, color = Model), lwd = 2) +
coord_cartesian(ylim = c(0,max(eao$Estimate + 1.96 * eao$SD)*1.01)) +
facet_grid(~Category) +
ylab("Effective area occupied (km^2)") +
theme_bw(base_size = 14) +
scale_color_brewer(palette = "Set1") +
scale_fill_brewer(palette = "Set1")
ggsave(file.path(fig_dir, "Compare_effective_area_occupied.png"), p, height = 6, width = 14)
|
f0bf4d88df0f203bebff8a66f876e23f1673d6f8
|
a85179c4ed324de28a0648ac43d43113c7b44e94
|
/Exercises/Week 8 Solutions - Blank.R
|
bfba5eceb166aac407afc8304541175f42bf450c
|
[] |
no_license
|
aringhosh/SFUStat452
|
226fe5ed46fb027dd1f472149c3e9091545dd8a6
|
83662344fef43601f38e997015f0ba3db90afd74
|
refs/heads/master
| 2020-03-22T01:23:04.377827
| 2017-12-04T09:08:54
| 2017-12-04T09:08:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
Week 8 Solutions - Blank.R
|
#Import Data
Diab <- read.csv("Data/pima-diabetes.csv")
head(Diab,n=3)
summary(Diab)
#Remove "missing" values
#Remove Outcome
#Extract predictors and response
#Compute the mean and variance of each predictor
#Compute principal components of the predictors
#Compute principal components of the standardized predictors
#Plot both PCAs
#Center and scale the predictors
#Comfirm that we standardized correctly
##########################################
### Fit a PCR model to predict Glucose ###
##########################################
library(pls)
|
cb5408ed58d4f31aaf4d7d92539c50e450d26140
|
ebd6f68d47e192da7f81c528312358cfe8052c8d
|
/swig/Examples/test-suite/r/funcptr_runme.R
|
c6127ef68d570219dbe43122c9908cfd0a257f84
|
[
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0"
] |
permissive
|
inishchith/DeepSpeech
|
965ad34d69eb4d150ddf996d30d02a1b29c97d25
|
dcb7c716bc794d7690d96ed40179ed1996968a41
|
refs/heads/master
| 2021-01-16T16:16:05.282278
| 2020-05-19T08:00:33
| 2020-05-19T08:00:33
| 243,180,319
| 1
| 0
|
Apache-2.0
| 2020-02-26T05:54:51
| 2020-02-26T05:54:50
| null |
UTF-8
|
R
| false
| false
| 276
|
r
|
funcptr_runme.R
|
clargs <- commandArgs(trailing=TRUE)
source(file.path(clargs[1], "unittest.R"))
dyn.load(paste("funcptr", .Platform$dynlib.ext, sep=""))
source("funcptr.R")
cacheMetaData(1)
unittest(do_op(1, 3, add), 4)
unittest(do_op(2, 3, multiply), 6)
unittest(do_op(2, 3, funcvar()), 5)
|
360e26b80a3f37bba5fc310e513489a4a87263a0
|
e2a5cdf2dcbd788ac7c091897b5a027a809c302a
|
/R/pumpCase.R
|
5aeb66f2419a47e6c768cdcaa3c9ad13e2b49ea5
|
[] |
no_license
|
lindbrook/cholera
|
3d20a0b76f9f347d7df3eae158bc8a357639d607
|
71daf0de6bb3fbf7b5383ddd187d67e4916cdc51
|
refs/heads/master
| 2023-09-01T01:44:16.249497
| 2023-09-01T00:32:33
| 2023-09-01T00:32:33
| 67,840,885
| 138
| 13
| null | 2023-09-14T21:36:08
| 2016-09-10T00:19:31
|
R
|
UTF-8
|
R
| false
| false
| 1,414
|
r
|
pumpCase.R
|
#' Extract numeric case IDs by pump neighborhood.
#'
#' @param x An object created by \code{neighborhoodEuclidean()}, \code{neighborhoodVoronoi()} or \code{neighborhoodWalking()}.
#' @param case Character. "address" or "fatality"
#' @return An R list of numeric ID of cases by pump neighborhoods.
#' @export
#' @examples
#' \dontrun{
#' pumpCase(neighborhoodEuclidean())
#' pumpCase(neighborhoodVoronoi())
#' pumpCase(neighborhoodWalking())
#' }
pumpCase <- function(x, case) UseMethod("pumpCase", x)
pumpCase.default <- function(x, case) NULL
#' @export
pumpCase.euclidean <- function(x, case = "address") {
pumps <- sort(unique(x$nearest.pump))
out <- lapply(pumps, function(p) {
x$anchors[x$nearest.pump == p]
})
stats::setNames(out, paste0("p", pumps))
}
#' @export
pumpCase.voronoi <- function(x, case = "address") {
output <- x$statistic.data
if (x$case.location == "address") {
lapply(output, function(x) cholera::ortho.proj$case[x == 1])
} else if (x$case.location == "anchor") {
lapply(output, function(x) cholera::fatalities.address$anchor[x == 1])
}
}
#' @export
pumpCase.walking <- function(x, case = "address") {
if (case == "address") {
x$cases
} else if (case == "fatality") {
lapply(x$cases, function(dat) {
cholera::anchor.case[cholera::anchor.case$anchor %in% dat, "case"]
})
} else stop('case must either be "address" or "fatality"')
}
|
f5956fa6d01f43427bf5821ca75f655ba3992dfc
|
8c5693b89a888992fe71d6d351699f1c429130ef
|
/p2.R
|
f83b0adaff68b78492089106b58ee7bb13a0cfbb
|
[] |
no_license
|
Aprajita177/RepData_PeerAssessment1
|
edac552779eb47bf62317cd646ad0df386e463dd
|
edfdf955138a4b3a17e736d2468172de81f8d1a2
|
refs/heads/master
| 2021-03-21T04:14:46.563895
| 2020-03-14T15:31:57
| 2020-03-14T15:31:57
| 247,262,618
| 0
| 0
| null | 2020-03-14T11:20:47
| 2020-03-14T11:20:47
| null |
UTF-8
|
R
| false
| false
| 570
|
r
|
p2.R
|
setwd("C:/Users/MAHE/Documents/RepData_PeerAssessment1")
data<-read.csv("activity.csv")
data$date<-as.Date(data$date)
sum_step<-aggregate(data$step,by=list(data$date),FUN=sum,na.rm=TRUE)
library(ggplot2)
data$days=tolower(weekdays(data$date))
data$day_type<-ifelse(data$days=="saturday"|data$days=="sunday","weekend","weekday")
avg_step<-aggregate(data$steps,by=list(data$interval,data$day_type),FUN=mean,na.rm=TRUE)
colnames(avg_step)<-c("interval","day_type","steps")
ggplot(aes(x=interval,y=steps),data=avg_step)+geom_line()+facet_wrap(~avg_step$day_type)
|
f271366c52468ae40dad44203df24a5d4af0374c
|
5e613fdaaf680b7220a9331133d79a7dcbca8acd
|
/R/deps/taxize-master/man/getcredibilityratingfromtsn.Rd
|
73f6f195bf30b2bedbfc35dd4e23a09640f34d9f
|
[
"MIT"
] |
permissive
|
hmarx/Alpine-Sky-Islands
|
df0fd965ca4e1d4e3071aa9362ee615a5510175d
|
72ab7d914fea6c76c9ae105e042e11088a9be87f
|
refs/heads/master
| 2021-05-01T02:44:59.818086
| 2017-08-08T15:02:45
| 2017-08-08T15:02:45
| 39,544,747
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
rd
|
getcredibilityratingfromtsn.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/itis.R
\name{getcredibilityratingfromtsn}
\alias{getcredibilityratingfromtsn}
\title{Get credibility rating from tsn}
\usage{
getcredibilityratingfromtsn(tsn, ...)
}
\arguments{
\item{tsn}{TSN for a taxonomic group (numeric)}
\item{...}{optional additional curl options (debugging tools mostly)}
}
\description{
Get credibility rating from tsn
}
\examples{
\dontrun{
getcredibilityratingfromtsn(526852, config=timeout(4))
}
}
\keyword{internal}
|
781300aa18fa960da25900c0f4ecc59b752fa483
|
025649ef7dc50a16f28653ab419fd4ac95d6ae9b
|
/man/insertSpreadAddin.Rd
|
1e8450e3941eaaa49e0ba95e54e35bfa81a361e1
|
[] |
no_license
|
kendonB/typeless
|
0541fea0397e9b17b9afa33f6e2bc28837f83313
|
e1ed4ee2155954725164b165017385788b17a228
|
refs/heads/master
| 2021-01-19T22:21:33.495455
| 2017-04-19T23:31:55
| 2017-04-19T23:31:55
| 88,799,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 304
|
rd
|
insertSpreadAddin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addin_defs.R
\name{insertSpreadAddin}
\alias{insertSpreadAddin}
\title{Insert spread.}
\usage{
insertSpreadAddin()
}
\description{
Call this function as an addin to insert \code{spread(} at the cursor position.
}
|
3e0e233b2d0292725af64872edd9c0c177e09832
|
6878c8d13df01ce2670c80818239d08845394a5b
|
/my proj.R
|
e34482f1496289a8d4b07823f80507a2a5214409
|
[] |
no_license
|
anu7991/new
|
68148ec86b3de4a8247d6258a8a4c42d3c4d8a10
|
5c1c99c917191e96788f275615e54af1cf7164ff
|
refs/heads/master
| 2022-10-14T05:32:34.370942
| 2020-05-29T04:42:27
| 2020-05-29T04:42:27
| 264,513,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,676
|
r
|
my proj.R
|
#installing and loading required packages
library(tidyverse)
library(ggplot2)
library(dplyr)
install.packages("DataExplorer")
library(DataExplorer)
library(caret)
install.packages("caTools")
library(caTools)
#loading required file
db = read_csv("C:/Users/gadda/Downloads/ml-latest-small/ml-latest-small/diabetes.csv")
db <- db %>%
mutate(Insulin = replace(Insulin, Insulin == "0", NA))
is.na(db$Insulin)
db$Insulin
#replacing NA values with Median of its observations
db = db %>% mutate(Insulin = replace(Insulin,is.na(Insulin),median(Insulin ,na.rm=T)))
db = db %>% mutate(BloodPressure = replace(BloodPressure, BloodPressure == "0" , NA))
db = db %>% mutate(BloodPressure = replace(BloodPressure,is.na(BloodPressure),median(BloodPressure ,na.rm=T)))
db = db %>% mutate(SkinThickness = replace(SkinThickness,SkinThickness == "0",NA))
db = db %>% mutate(SkinThickness = replace(SkinThickness,is.na(SkinThickness),median(SkinThickness, na.rm=T)))
glimpse(db)
#checking the distributions
ggplot(db,aes(x = SkinThickness)) + geom_histogram(binwidth = 0.25)
db %>% count(SkinThickness)
db = db %>% mutate(Glucose = replace(Glucose,Glucose == "0",NA))
db = db %>% mutate(Glucose = replace(Glucose,is.na(Glucose),median(Glucose, na.rm=T)))
glimpse(db)
ggplot(db,aes(x= Glucose)) +geom_histogram(binwidth = 0.25)
ggplot(db,aes(x= Outcome ,y = Glucose)) + geom_point()
db$Outcome=as.factor(db$Outcome)
plot_correlation(db,type = 'continous','Review.Date')
create_report(db)
#implementing feature engineering
#implementing a model with 4 highlyly correlated variables on outcome
db1= db %>% select(Glucose,BMI,Age,Pregnancies,Outcome)
str(db1)
set.seed(100)
traindataindex = createDataPartition(db1$Outcome,p=0.8,list=F)
train1=db1[traindataindex, ]
test1=db1[-traindataindex, ]
r = glm(Outcome ~ Glucose + BMI + Age + Pregnancies,data = train1,family = 'binomial')
summary(r)
#predicting outcome
pred = predict(r,newdata = test1,type = 'response')
pred
y_pred_num <- ifelse(pred > 0.5, 1, 0)
y_pred <- factor(y_pred_num, levels=c(0, 1))
y_act <- train1$Outcome
mean(y_pred==y_act)
#implementing a model with all features included in dataset
str(db)
set.seed(100)
sample = sample.split(db$Outcome, SplitRatio = 0.75)
trainingData = subset(db, sample == TRUE)
testData = subset(db, sample == FALSE)
logmod = glm(Outcome ~ Pregnancies +Glucose+BloodPressure+SkinThickness+BMI+Age+DiabetesPedigreeFunction+Insulin,data = trainingData,family = binomial)
summary(logmod)
pred1=predict(logmod,newdata = testData,type = 'response')
pred1
x_pred_num=ifelse(pred1 > 0.5,1,0)
x_pred=factor(x_pred_num,levels = c(0,1))
x_act =trainingData$Outcome
mean(x_pred==x_act)
|
b2a13c45c7139861b22f58ca0c370039c824fc42
|
b32dd1f1c3b674c1c558570dd0319590694dee34
|
/man/skew.Rd
|
247c6a5fc30c8bd8b250f4e15d037baacff9f367
|
[] |
no_license
|
cran/valmetrics
|
1595ca14df527d868302c7105861b94a49599986
|
9964419ce0f640ce71fe2ff7dbe8d0c1048350be
|
refs/heads/master
| 2023-02-21T04:20:10.619811
| 2021-01-13T14:30:02
| 2021-01-13T14:30:02
| 334,226,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 836
|
rd
|
skew.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skew.R
\name{skew}
\alias{skew}
\title{skew}
\usage{
skew(o, p)
}
\arguments{
\item{o}{A numeric vector. Observed values.}
\item{p}{A numeric vector. Predicted values.}
}
\value{
Skewness of residuals.
}
\description{
Calculates the Skewness of residuals from observed and
predicted values.
}
\details{
Interpretation: smaller is better.
}
\examples{
obs<-c(1:10)
pred<-c(1, 1 ,3, 2, 4, 5, 6, 8, 7, 10)
skew(o=obs, p=pred)
}
\references{
Piikki K., Wetterlind J., Soderstrom M., Stenberg B. (2021). Perspectives
on validation in digital soil mapping of continuous attributes. A review.
Soil Use and Management. \doi{10.1111/sum.12694}
}
\author{
Kristin Piikki, Johanna Wetterlind, Mats Soderstrom and Bo Stenberg,
E-mail: \email{kristin.piikki@slu.se}
}
|
d268294dc0dd5057df66406a5d6380e3937ee427
|
10c2bc2f0ba9dacf702b373bc5f8b57d6f42a0f4
|
/bin/degs_pbmc_prediction.R
|
60606bc08ea2b2831d1906c908b5f64833df7513
|
[] |
no_license
|
powellgenomicslab/SingleCell_Prediction
|
930a18575cae78282675d1be79844f529926b9d5
|
3935dee4cd1b811201a25c6403a6ae5be99f4ac4
|
refs/heads/master
| 2021-03-22T03:28:50.418324
| 2019-10-14T01:18:59
| 2019-10-14T01:18:59
| 88,580,986
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,997
|
r
|
degs_pbmc_prediction.R
|
# Set up command-line arguments -------------------------------------------
args <- commandArgs(trailingOnly = TRUE)
seedPart <- args[1]
positiveClass <- args[2]
mlMethod <- args[3]
positiveClassFormat <- gsub("\\+", "", positiveClass)
# Load libraries ----------------------------------------------------------
library("here")
library("dplyr")
library("caret")
library("pROC")
source(here("bin/degs_prediction.R"))
# Read data ---------------------------------------------------------------
dirData <- paste0("degs_", positiveClass, "_boot-seed_", seedPart)
features <- readRDS(here(file.path("results", "2018-03-27_pbmc_degs_feature-selection", dirData, "degsRes.RDS")))
# Create results diretory -------------------------------------------------
newDir <- here(file.path("results", "2018-03-27_pbmc_degs_prediction", paste0("degs_", positiveClass, "_boot-seed_", seedPart, "_", mlMethod)))
dir.create(newDir)
# Read data ---------------------------------------------------------------
pbmc <- readRDS(here("data/pbmc3k_filtered_gene_bc_matrices/pbmc3k_final_list.Rda"))
pbmc$meta.data %>%
mutate(cellType = if_else(cell.type == positiveClass, positiveClassFormat, "other")) %>%
mutate(cellType = factor(cellType, levels = c(positiveClassFormat, "other"))) -> expMetadata
rownames(expMetadata) <- rownames(pbmc$meta.data)
# Set up general variables ------------------------------------------------
probPart <- 0.5
phenoVar <- "cellType"
# Get expression data and metadata ----------------------------------------
expData <- pbmc$data %>% Matrix::t() %>% as.matrix()
expData <- log2(expData + 1)
if(!all(rownames(expData) == rownames(expMetadata))){
stop("Expression data and metadata are not ordered by cell id")
}
set.seed(seedPart)
trainIndex <- createDataPartition(expMetadata[[phenoVar]], p = probPart, list = FALSE, times = 1)
expTrain <- expData[trainIndex, ]
expTrainMeta <- expMetadata[trainIndex, ]
expTest <- expData[-trainIndex, ]
expTestMeta <- expMetadata[-trainIndex, ]
dataSummary <- capture.output(cat(sprintf("Number of genes: %i\nNumber of cells: %i\n", ncol(expData), nrow(expData))))
writeLines(file.path(newDir, "expData_summary.txt"), text = dataSummary, sep = "\n")
# Train model -------------------------------------------------------------
trainedModel <- trainDEGModel(expTrain, expMetadata = expTrainMeta, method = mlMethod, features = features, pVar = phenoVar,
positiveClass = positiveClassFormat, seed = 66)
saveRDS(trainedModel, file = file.path(newDir, "trained_model.RDS"))
# Perform prediction in new dataset ---------------------------------------
predictions <- degPredict(features, expTest, trainedModel)
saveRDS(predictions, file = file.path(newDir, "predictions.RDS"))
rocRes <- roc(response = expTestMeta[[phenoVar]],
predictor = predictions[[positiveClassFormat]],
levels = trainedModel$levels)
saveRDS(rocRes, file = file.path(newDir, "roc.RDS"))
|
bea046b520355f56d5a4fde600695c045252a5e9
|
bfd694d3d822703e057aba2cfb2714fbefd85f83
|
/st_events_generation.R
|
a79c187c435192d60a10f6b3eb632e2f40c8d003
|
[] |
no_license
|
RFASilva/simulateddatasets
|
64b63b57643c41205b5c3d3327f6f14f6363553f
|
aab258a6f0b1ad5bd790f834dad4b2d436b5154c
|
refs/heads/master
| 2021-07-20T12:27:32.657467
| 2020-03-29T18:03:20
| 2020-03-29T18:03:20
| 89,979,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,184
|
r
|
st_events_generation.R
|
# Generation of datasets
library(data.table)
library("stpp")
library("rgl")
library("lgcp")
library("sf")
# Cluster Process Daily
pcp1 <- rpcp(nparents = 100, mc = 500, npoints = 30000, s.region = usaboundaries, t.region = c(1, 525600), discrete.time = TRUE, replace=FALSE, cluster = c("normal", "exponential"), dispersion = c(1, 1440) )
write.table(cbind(pcp1$xyt[, 1:2], trunc(pcp1$xyt[, 3])), file = "poisson_cluster_process_Daily.csv",sep = ",", row.names = F, col.names=T)
# Cluster Process Weekly
pcp1 <- rpcp(nparents = 50, mc = 1000, npoints = 30000, s.region = usaboundaries, t.region = c(1, 525600), discrete.time = TRUE, replace=FALSE, cluster = c("uniform", "uniform"), dispersion = c(4, 10800) )
write.table(cbind(pcp1$xyt[, 1:2], trunc(pcp1$xyt[, 3])), file = "poisson_cluster_process_Weekly2.csv",sep = ",", row.names = F, col.names=T)
# Cluster Process Weekly
pcp1 <- rpcp(nparents = 50, mc = 1000, npoints = 30000, s.region = usaboundaries, t.region = c(1, 525600), discrete.time = TRUE, replace=FALSE, cluster = c("uniform", "uniform"), dispersion = c(0.034, 10800) )
write.table(cbind(pcp1$xyt[, 1:2], trunc(pcp1$xyt[, 3])), file = "poisson_cluster_process_Weekly3.csv",sep = ",", row.names = F, col.names=T)
# Plot data in a Space-time Cube
pcp1 <- cbind(pcp1$xyt[, 1:2], pcp1$xyt[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(pcp1[,3], nbcol)
plot3d(pcp1[,1], pcp1[,2], pcp1[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
# Homogenous Process
hpp1 <- rpp(npoints=30000, s.region = usaboundaries, t.region = c(1, 525600), discrete.time = TRUE, replace = TRUE)
write.table(cbind(hpp1$xyt[, 1:2], trunc(hpp1$xyt[, 3])), file = "homogenous_process.csv",sep = ",", row.names = F, col.names=T)
# Plot data in a Space-time Cube
hpp1 <- cbind(hpp1$xyt[, 1:2], hpp1$xyt[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(hpp1[,3], nbcol)
plot3d(hpp1[,1], hpp1[,2], hpp1[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
# Mixed Cluster and Homogenous Process
pcp_mixed <- rpcp(nparents = 50, mc = 1000, npoints = 30000, s.region = usaboundaries, t.region = c(1, 525600), discrete.time = TRUE, replace=FALSE, cluster = c("uniform", "uniform"), dispersion = c(1, 1440) )
hpp_mixed <- rpp(npoints=5000, s.region = usaboundaries, t.region = c(1, 525600), discrete.time = TRUE, replace = TRUE)
process_mixed <- rbind(pcp_mixed$xyt, hpp_mixed$xyt)
write.table(cbind(process_mixed[, 1:2], trunc(process_mixed[, 3])), file = "poisson_cluster_process_daily_noise.csv",sep = ",", row.names = F, col.names=T)
# Plot data in a Space-time Cube
process_mixed <- cbind(process_mixed[, 1:2], process_mixed[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(process_mixed[,3], nbcol)
plot3d(process_mixed[,1], process_mixed[,2], process_mixed[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
# CONTAGIOUS PROCESSES
cont2 <- rinter(npoints=3000,
s.region = usaboundaries,
t.region = c(1, 525600),
discrete.time = TRUE,
thetas=0, deltas=1,
replace = TRUE,
thetat=0, deltat=10080,
recent=10, inhibition=FALSE)
write.table(cbind(cont2$xyt[, 1:2], trunc(cont2$xyt[, 3])), file = "contagious_2.csv",sep = ",", row.names = F, col.names=T)
cont2 <- rinter(npoints=3000,
s.region = usaboundaries,
t.region = c(1, 525600),
discrete.time = TRUE,
thetas=0, deltas=5,
replace = TRUE,
thetat=0, deltat=26280,
recent=1, inhibition=FALSE)
cont3 <- rinter(npoints=5000,
s.region = usaboundaries,
t.region = c(1, 525600),
discrete.time = TRUE,
thetas=0, deltas=0.02,
replace = TRUE,
thetat=0, deltat=1440,
recent=1, inhibition=FALSE)
cont3teste <- cbind(cont3$xyt[, 1:2], cont3$xyt[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(cont3teste[,3], nbcol)
plot3d(cont3teste[,1], cont3teste[,2], cont3teste[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
write.table(cbind(cont3$xyt[, 1:2], trunc(cont3$xyt[, 3])), file = "contagious_3.csv",sep = ",", row.names = F, col.names=T)
# Log-Gaussian Cox Point Patterns
lgcp4 <- rlgcp(npoints =12000,
s.region = usaboundaries,
discrete.time = TRUE,
scale=c(0.02, 1),
t.region=c(0,365),
nx = 20, ny = 20, nt = 365, separable = FALSE,
model = "cesare", param = c(1, 1, 3, 1, 1, 2), var.grf =1, mean.grf = 20)
lgcp4 <- rlgcp(npoints =5000,
s.region = usaboundaries,
discrete.time = TRUE,
scale=c(1, 10),
t.region=c(0,365),
nx = 50, ny = 50, nt = 175, separable = FALSE,
model = "gneiting", param = c(1, 1, 1, 1, 1, 2), var.grf = 32, mean.grf = 20)
lgcp4 <- rlgcp(npoints = 10000,
s.region = usaboundaries,
nx = 50, ny = 50, nt = 50, separable = FALSE,
model = "gneiting", param = c(1, 1, 1, 1, 1, 2), var.grf = -3, mean.grf = 1)
lgcp4 <- rlgcp(npoints = 10000,
scale = c(5, 5),
nx = 60, ny = 60, nt = 50, separable = FALSE,
model = "gneiting", param = c(1, 1, 1, 1, 1, 2), var.grf = -3, mean.grf = 1)
lgcp4teste <- cbind(lgcp4$xyt[, 1:2], lgcp4$xyt[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(lgcp4teste[,3], nbcol)
plot3d(lgcp4teste[,1], lgcp4teste[,2], lgcp4teste[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
hppbla <- rpp(npoints=5000, s.region = usaboundaries, t.region = c(1, 365), discrete.time = TRUE, replace = TRUE)
write.table(cbind(hppbla$xyt[, 1:2], trunc(hppbla$xyt[, 3])), file = "homogenous_process_log.csv",sep = ",", row.names = F, col.names=T)
write.table(cbind(lgcp4$xyt[, 1:2], ceiling(lgcp4$xyt[, 3]*365000 /1000)), file = "log_gaussian_test.csv",sep = ",", row.names = F, col.names=T)
N <- lgcp4$Lambda[,,1]
for(j in 2:(dim(lgcp4$Lambda)[3])){N <- N + lgcp4$Lambda[, , j]}
image(N, col = grey((1000:1) / 1000)) ; box()
animation(lgcp4$xyt, cex = 0.8, runtime = 10, add = TRUE,
prevalent = "orange")
write.table(cbind(lgcp4$xyt[, 1:2], trunc(lgcp4$xyt[, 3])), file = "log_gaussian_cox_process3.csv",sep = ",", row.names = F, col.names=T)
lgcp4 <- rlgcp(npoints =10000,
s.region = usaboundaries,
discrete.time = TRUE,
scale=c(20, 365),
t.region=c(0,730),
nx = 20, ny = 20, nt = 730, separable = FALSE,
model = "gneiting", param = c(1, 1, 1, 1, 1, 2), var.grf =5, mean.grf = 20)
write.table(cbind(lgcp4$xyt[, 1:2], trunc(lgcp4$xyt[, 3])), file = "log_gaussian_cox_process3.csv",sep = ",", row.names = F, col.names=T)
lgcp4teste <- cbind(lgcp4$xyt[, 1:2], lgcp4$xyt[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(lgcp4teste[,3], nbcol)
plot3d(lgcp4teste[,1], lgcp4teste[,2], lgcp4teste[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
write.table(cbind(lgcp4$xyt[, 1:2], trunc(lgcp4$xyt[, 3])), file = "log_gaussian_cox_process.csv",sep = ",", row.names = F, col.names=T)
# Just tests
lbd <- function(x,y,t,a) {exp(-4*y) * exp(-2*t)}
pcp_lbda <- rpcp(nparents = 50, mc = 1000,
npoints = 30000, s.region = usaboundaries,
t.region = c(1, 525600), discrete.time = TRUE,
replace=FALSE, cluster = "uniform", lambda = lbd,
dispersion = c(4, 1440) )
# ESTE AQUI CRIA GRUPOS APENAS NUMA PARTE DOS ESTADOS UNIDOS QUE SE CALHAR E ALGO QUE QUERO E POSSO MISTURAR
# COM RUIDO
lbda <- function(x,y,t){ 10 }
pcp2teste <- rpcp(nparents=30, npoints=30000,
s.region = usaboundaries,
t.region = c(1, 525600),
discrete.time = TRUE,
dispersion = c(2, 1440),
cluster = "exponential",
)
# Plot data in a Space-time Cube
pcpteste <- cbind(pcp2teste$xyt[, 1:2], pcp2teste$xyt[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(pcpteste[,3], nbcol)
plot3d(pcpteste[,1], pcpteste[,2], pcpteste[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
# TESTES PARA PROCESSO CONTAGIOSO
bla <- rinter(npoints=250, recent=1,
deltas=7.5, deltat=10,
inhibition=FALSE)
data(northcumbria)
cont1 <- rinter(npoints=2500, s.region=northcumbria, t.region=c(1,200),
thetas=0, deltas=5000, thetat=0, deltat=10, recent=1, inhibition=FALSE)
# 1dia de inibicao
cont1 <- rinter(npoints=2500,
s.region = usaboundaries,
t.region = c(1, 525600),
discrete.time = TRUE,
thetas=0, deltas=2,
thetat=0, deltat=1440,
recent=1, inhibition=FALSE)
cont2 <- rinter(npoints=250,
s.region = usaboundaries,
t.region = c(1, 300),
discrete.time = TRUE,
thetas=0, deltas=1,
replace = TRUE,
thetat=0, deltat=30,
recent=1, inhibition=FALSE)
cont2 <- rinter(npoints=50000,
s.region = usaboundaries,
t.region = c(1, 525600),
discrete.time = TRUE,
thetas=0, deltas=2,
replace = TRUE,
thetat=0, deltat=10080,
recent=1, inhibition=FALSE)
cont3 <- rinter(npoints=5000,
s.region = usaboundaries,
t.region = c(1, 525600),
discrete.time = TRUE,
thetas=0, deltas=0.5,
replace = TRUE,
thetat=0, deltat=10080,
recent=1, inhibition=FALSE)
write.table(cbind(cont3$xyt[, 1:2], trunc(cont3$xyt[, 3])), file = "contagious_3.csv",sep = ",", row.names = F, col.names=T)
# Plot data in a Space-time Cube
contteste <- cbind(cont3$xyt[, 1:2], cont3$xyt[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(contteste[,3], nbcol)
plot3d(contteste[,1], contteste[,2], contteste[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
data(northcumbria)
cont1 = rinter(npoints=250, s.region=northcumbria, t.region=c(1,200),
thetas=0, deltas=5000, thetat=0, deltat=10, recent=1, inhibition=FALSE)
lgcp1 <- rlgcp(npoints=3000,
separable=TRUE,
model="exponential", param=c(1,1,1,1,1,2), var.grf = 2, mean.grf = -0.5 * 2)
lgcp2 <- rlgcp(npoints=200,
s.region = usaboundaries,
t.region = c(1, 365),
separable=TRUE,
model="exponential", param=c(0.1,0.1,0.1,0.1,0.1,0.2), var.grf=0.02, mean.grf=-0.04)
lgcp4 <- rlgcp(npoints=2000,
s.region=northcumbria, t.region=c(1,400),
scale=c(1000, 400),
discrete.time = TRUE,
nx=50, ny=50, nt=50,
separable=TRUE,
model="exponential", param=c(0.01,0.01,0.01,0.01,0.01,0.02), var.grf=1, mean.grf=0)
lgcp4 <- rlgcp(npoints =12000,
s.region = usaboundaries,
t.region=c(0,1),
nx = 50, ny = 50, nt = 50, separable = FALSE,
model = "gneiting", param = c(1, 1, 1, 1, 1, 2), var.grf =0.25, mean.grf = 0)
lgcp4 <- rlgcp(npoints = 200, nx = 50, ny = 50, nt = 50, separable = FALSE,
model = "gneiting", param = c(1, 1, 1, 1, 1, 2), var.grf = 1, mean.grf = 0)
lgcp4 <- rlgcp(npoints =12000,
s.region = usaboundaries,
scale=c(0.02, 1),
t.region=c(0,365),
nx = 20, ny = 20, nt = 365, separable = FALSE,
model = "cesare", param = c(1, 1, 3, 1, 1, 2), var.grf =1, mean.grf = 20)
N <- lgcp4$Lambda[,,1]
for(j in 2:(dim(lgcp4$Lambda)[3])){N <- N + lgcp4$Lambda[, , j]}
image(N, col = grey((1000:1) / 1000)) ; box()
animation(lgcp4$xyt, cex = 0.8, runtime = 10, add = TRUE,
prevalent = "orange")
lgcp1 <- rlgcp(npoints = 8000, nx = 50, ny = 50, nt = 50, separable = TRUE,
model = "exponential", param = c(1, 1, 1, 1, 1, 2), var.grf =2, mean.grf = -0.5*2)
lgcp1teste <- cbind(lgcp1$xyt[, 1:2], lgcp1$xyt[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(lgcp1teste[,3], nbcol)
plot3d(lgcp1teste[,1], lgcp1teste[,2], lgcp1teste[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
lgcp4 <- rlgcp(npoints =10000,
s.region = usaboundaries,
discrete.time = TRUE,
scale=c(0.02, 365),
t.region=c(0,730),
nx = 20, ny = 20, nt = 730, separable = FALSE,
model = "gneiting", param = c(1, 1, 1, 1, 1, 2), var.grf =5, mean.grf = 20)
lgcp4 <- rlgcp(npoints =10000,
s.region = usaboundaries,
discrete.time = TRUE,
scale=c(20, 365),
t.region=c(0,730),
nx = 20, ny = 20, nt = 730, separable = FALSE,
model = "gneiting", param = c(1, 1, 1, 1, 1, 2), var.grf =5, mean.grf = 20)
lgcp4teste <- cbind(lgcp4$xyt[, 1:2], lgcp4$xyt[, 3])
nbcol = 100
color = rev(rainbow(nbcol, start = 0/6, end = 4/6))
zcol = cut(lgcp4teste[,3], nbcol)
plot3d(lgcp4teste[,1], lgcp4teste[,2], lgcp4teste[,3], xlab = "longitude", ylab= "latitude", zlab ="time", col =color[zcol])
N <- lgcp4$Lambda[,,1]
for(j in 2:(dim(lgcp4$Lambda)[3])){N <- N + lgcp4$Lambda[, , j]}
image(N, col = grey((1000:1) / 1000)) ; box()
animation(lgcp4$xyt, cex = 0.8, runtime = 10, add = TRUE,
prevalent = "orange")
|
b8071d9f229439a1e1c22f6fdd4c14ac681474a9
|
47e6293d178771302b133e6c1b2c89f64e218dc1
|
/man/tidy_cdm.Rd
|
ae6f5ba6c48a26d91bb701434d1ea1997658c5dc
|
[] |
no_license
|
fkeck/flexitarian
|
8a0e876aa1c57dada4d4dba8acef7fac990862f6
|
3da0dae2477994f0c407b40de1daf07e20239125
|
refs/heads/master
| 2022-08-21T08:46:52.230224
| 2022-08-15T08:53:01
| 2022-08-15T08:53:01
| 202,161,521
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 694
|
rd
|
tidy_cdm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{tidy_cdm}
\alias{tidy_cdm}
\title{Community data matrix to tibble}
\usage{
tidy_cdm(x, row.name = "SITE", key.name = "TAXON", value.name = "COUNT")
}
\arguments{
\item{x}{a community matrix or dataframe.}
\item{row.name}{name of the column where row names are transfered.}
\item{key.name}{name of the key column.}
\item{value.name}{name of the value column.}
}
\value{
a tibble.
}
\description{
Community data matrix to tibble
}
\examples{
x <- matrix(rpois(100, 10), nrow = 10)
rownames(x) <- paste("Site", 1:10)
colnames(x) <- paste("Species", LETTERS[1:10])
x
x_tidy <- tidy_cdm(x)
x_tidy
}
|
184f073e3fb2a75b6ec51056e2923b401c143b93
|
3222354e788f13415b26bd31861b899e37812eb4
|
/partialSCRIPT.R
|
e53897e9ce1321cbe116535bb478283a54e4a389
|
[] |
no_license
|
stevenyuser/eyewearanalysis
|
b70ed731d5e64b4006573fa970334e06059b1927
|
7f3e9420a061e9a6a1d76d82215dc5848313b9ca
|
refs/heads/main
| 2023-08-10T06:48:33.020060
| 2021-09-07T03:24:27
| 2021-09-07T03:24:27
| 378,267,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,650
|
r
|
partialSCRIPT.R
|
# load patternize
library(patternize)
# List with samples -- reduced list for testing
IDlist <- c('LabA3',
'LabB3',
'ReadingA2',
'ReadingB3')
# landmark list
prepath <- 'landmarks/landmarks_jpg'
extension <- '_landmarks.txt'
landmarkList <- makeList(IDlist, 'landmark', prepath, extension)
# image list
prepath <- 'images/Edit1_Enhanced'
extension <- '.jpg'
imageList <- makeList(IDlist, 'image', prepath, extension)
# align color patterns
RGB <- c(208, 99, 0)
rasterList_lanRGB <- patLanRGB(imageList, landmarkList, RGB, transformRef = 'LabA3', resampleFactor = 1,
colOffset = 0.01, crop = TRUE, res = 300, adjustCoords = TRUE, plot = 'stack')
# sum color patterns
summedRaster <- sumRaster(rasterList_lanRGB, IDlist, 'RGB')
outline <- read.table('cartoon/LabA3_outline.txt', header = F)
lines <- list.files(path = 'cartoon', pattern = 'LabA3_vein', full.names = T)
colfunc <- c("black","lightblue","blue","green", "yellow","red")
plotHeat(summedRaster = summedRaster, IDlist, plotCartoon = T, refShape = 'target', outline, lines, landmarkList,
cartoonID = 'LabA3', cartoonFill = T, cartoonOrder = 'under', colpalette = colfunc)
plotHeat(summedRaster, IDlist, plotCartoon = F, refShape = 'target', outline = outline,
lines = lines, landList = landmarkList,
imageList = imageList, cartoonID = 'LabA3', cartoonFill = 'red', cartoonOrder = 'under',
colpalette = colfunc)
area <- patArea(rasterList_lanRGB, IDlist, refShape = 'target', type = 'RGB', outline = outline, imageList =
imageList, cartoonID = 'LabA3')
|
233c866d0d03fafca524f997c1fa234abcbc8acc
|
9836f08434e08bcd1abf0cb001b217fe3ef01188
|
/cachematrix.R
|
35e6c233a685083527edea964bd581f737d627fc
|
[] |
no_license
|
ivwolfman/ProgrammingAssignment2
|
ada2c366b59d377c2d953e56e772395a32636e07
|
5b917629c7bcabce13063cdbbe3eb5d319dc9f38
|
refs/heads/master
| 2021-01-17T18:02:44.660160
| 2014-12-21T05:41:10
| 2014-12-21T05:41:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,481
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Generate function vector for cacheing a matrix and its inverse.
## The returned function vector contains the following methods:
## - setMatrix() must be called with a square numeric or complex matrix
## - getMatrix() returns the last matrix last provided to makeCacheMatrix()
## or setMatrix(),
## - setInverse() sets the inverse matrix, and must be called with a
## square numeric or complex matrix
## - getInverse() returns the last matrix provided to setInverse(),
## or NULL if setInverse() has not been called
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
setMatrix <- function(y) {
x <<- y
inv <<- NULL
}
getMatrix <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(setMatrix = setMatrix,
getMatrix = getMatrix,
setInverse = setInverse,
getInverse = getInverse)
}
## Calculate and cache the solution to the matrix stored in x. Functionally
## equivalent to solve(x$getMatrix(), ...), but cacheSolve() is more efficient
## when it is called multiple times with the same matrix, due to cacheing
## the solution.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$getMatrix()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
44fe5cdcb828fb8d747d0fdea4acecab77ae91fc
|
89806ba41093b9fc3fc96d3cd70c4fd45598af2b
|
/survey-svystandardize.R
|
a7ed36c665f060fc3f41a86d33eae63e0e2cc9c9
|
[] |
no_license
|
yikeshu0611/Survey-data-analysis
|
90ddefab7582b14f4ec19ed82bf985d4003b89d5
|
ff81a2417e68ae2e3501ff3e515695dfc0b7901e
|
refs/heads/master
| 2023-03-15T21:57:11.273703
| 2019-08-26T14:53:42
| 2019-08-26T14:53:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,158
|
r
|
survey-svystandardize.R
|
# This code expands on the example provided in documentation to
# survey::svystandardize() by replicating all point estimates in NCHS Data Brief No. 92,
# April 2012, # "Total and High-density Lipoprotein Cholesterol in Adults: National
# Health and Nutrition Examination Survey, 2009-2010"
# http://www.cdc.gov/nchs/data/databriefs/db92.htm
# http://www.cdc.gov/nchs/data/databriefs/db92.pdf
# Replicating age-adjusted estimates in Figure 1
# http://www.cdc.gov/nchs/data/databriefs/db92_fig1.png
# As noted in documentation, standard errors do not exactly match NCHS estimates
# Michael Laviolette PhD MPH, statman54@gmail.com
library(dplyr)
library(srvyr)
library(survey)
data(nhanes)
# convert variables of interest to factor
nhanes <- nhanes %>%
# code variables to factors
# race: 1 = Hispanic, 2 = non-Hispanic white, 3 = non-Hispanic black,
# 4 = other
# RIAGENDR (gender): 1 = male, 2 = female
# HI_CHOL (high cholesterol): 1 = Yes, 0 = No
mutate(race = factor(race, 1:4,
c("Hispanic", "Non-Hispanic white",
"Non-Hispanic black", "Other")),
RIAGENDR = factor(RIAGENDR, 1:2, c("Men", "Women")),
# indicator for high cholesterol
HI_CHOL = factor(HI_CHOL, 1:0, c("Yes", "No")),
# this is to have a variable with same value throughout;
# needed to standardize over entire sample
all_adults = 1)
# create survey design object
design <- as_survey_design(nhanes, ids = SDMVPSU, strata = SDMVSTRA,
weights = WTMEC2YR, nest = TRUE)
# function to compute estimates of high cholesterol for age 20+, standardized
# by age groups
# single argument is subpopulation over which standardization occurs, as string
getPrevalence <- function(over) {
group_vars <- syms(over)
svystandardize(design, by = ~ agecat, over = make.formula(over),
# using NCHS standard population for ages 6-19, 20-39,
# 40-59, 60+
population = c(55901, 77670, 72816, 45364),
# only HI_CHOL has missing values
excluding.missing = ~ HI_CHOL) %>%
filter(agecat != "(0,19]") %>%
group_by(!!!group_vars) %>%
summarize(pct = survey_mean(HI_CHOL == "Yes", na.rm = TRUE)) %>%
mutate_at("pct", function(x) round(100 * x, 1)) %>%
mutate_at("pct_se", function(x) round(100 * x, 3))
}
# Both sexes, all race and ethnicity groups (that is, all adults age 20+)
# CDC prevalence: 13.4
getPrevalence("all_adults")
# By sex, all race-ethnicity groups
# Men 12.2
# Women 14.3
getPrevalence("RIAGENDR")
# By race-ethnicity group, both sexes
# Hispanic Non-Hispanic white Non-Hispanic black
# Total 14.5 13.5 10.3
getPrevalence("race")
# By race-ethnicity group and sex
# Hispanic Non-Hispanic white Non-Hispanic black
# Men 15.4 11.4 10.2
# Women 13.2 15.4 10.3
getPrevalence(c("race", "RIAGENDR"))
### END
|
146b52aa29ddc347af13e8db5bfa364e27c1d09c
|
88c18faabe83ce2c3a07a13791b3e6026619518f
|
/R/gwas_random_snps_chipseq.R
|
10a88fd550a8d90a0adcc45a3f087a4f7fc01501
|
[] |
no_license
|
sq-96/heart_atlas
|
fd98edc9b305f1ab6fa5d327fe9c9034f4c1114b
|
3deed4c3d382072ccfd78d43459d1b53d93eff3f
|
refs/heads/master
| 2023-06-25T13:26:05.273996
| 2021-07-29T20:35:20
| 2021-07-29T20:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,998
|
r
|
gwas_random_snps_chipseq.R
|
source('R/analysis_utils.R')
#hg38
markers <- readRDS('/project2/gca/aselewa/heart_atlas_project/ArchR/ArchR_heart_latest_noAtrium/PeakCalls/DA_MARKERS_FDRP_1_log2FC_1.rds')
cm.markers <- hg38ToHg19(markers$Cardiomyocyte)
peak.set.hg19 <- hg38ToHg19(peak.set)
seqlevelsStyle(peak.set.hg19) <- "NCBI"
#hg19
finemap.res <- readRDS('GWAS/finemapping/aFib_Finemapped_GeneMapped_ActivePromoter_07222021.gr.rds')
finemap.res <- finemap.res[!duplicated(finemap.res$snp),]
high.pip.snps <- finemap.res[finemap.res$pip>0.5,]
low.pip.snps <- finemap.res[finemap.res$pip<0.01,]
high.pip.snps.gr <- GRanges(seqnames = high.pip.snps$chr,
ranges = IRanges(start = high.pip.snps$pos, end = high.pip.snps$pos),
snp = high.pip.snps$snp)
low.pip.snps.gr <- GRanges(seqnames = low.pip.snps$chr,
ranges = IRanges(start = low.pip.snps$pos, end = low.pip.snps$pos),
snp = low.pip.snps$snp)
# load all SNPs with MAF > 5%
dbsnp150 <- rtracklayer::import('/project2/xinhe/shared_data/dbsnp150/dbsnp_150_maf05_snpsOnly.vcf.gz') #hg19
dbsnp150.gr <- SummarizedExperiment::rowRanges(dbsnp150)
dbsnp150.gr$SNP_id <- rownames(VariantAnnotation::info(dbsnp150))
seqlevelsStyle(dbsnp150.gr) <- 'UCSC'
#overlap random snps with OCRs
high.pip.snps.gr <- subsetByOverlaps(high.pip.snps.gr, peak.set.hg19)
low.pip.snps.gr <- subsetByOverlaps(low.pip.snps.gr, peak.set.hg19)
nreps <- 15
snp.list <- list()
for(i in 1:nreps){
snp.list[[i]] <- low.pip.snps.gr[sample(1:length(low.pip.snps.gr), size = length(high.pip.snps.gr), replace = F),]
}
fgt.chip <- readr::read_tsv('ENCODE/FGT_ChIP_lifted_from_mm10.bed', col_names = F)
fgt.chip.gr <- GRanges(seqnames = sub('chr','',fgt.chip$X1), ranges = IRanges(start = fgt.chip$X2, end = fgt.chip$X3), type=fgt.chip$X4)
h3k <- readr::read_tsv('ENCODE/H3k27ac_gwas_hg19/hg19_mapped/H3K27ac_heart_concat.bed', col_names = F)
h3k.gr <- GRanges(seqnames = sub('chr','',h3k$X1), ranges = IRanges(start = h3k$X2, end = h3k$X3))
encode.gr <- list("Fog/Gata4/Tbx5"=fgt.chip.gr, "H3k27ac"=h3k.gr)
high.pip.overlaps <- join_overlap_list(gr.list = encode.gr, X = high.pip.snps.gr)
random.overlaps <- lapply(snp.list, function(x){join_overlap_list(gr.list = encode.gr, X = x)})
high.pip.overlaps.prop <- lapply(high.pip.overlaps, function(x){length(unique(x$snp))/length(high.pip.snps.gr)})
random.overlaps.prop <- unlist(lapply(random.overlaps, function(x){
sapply(x, function(y){length(unique(y$snp))/length(snp.list[[1]])})
}))
mean.fgt.random.prop <- mean(random.overlaps.prop[names(random.overlaps.prop)=="Fog/Gata4/Tbx5"])
sd.fgt.random.prop <- sd(random.overlaps.prop[names(random.overlaps.prop)=="Fog/Gata4/Tbx5"])/sqrt(length(snp.list[[1]]))
mean.h3.random.prop <- mean(random.overlaps.prop[names(random.overlaps.prop)=="H3k27ac"])
sd.h3.random.prop <- sd(random.overlaps.prop[names(random.overlaps.prop)=="H3k27ac"])/sqrt(length(snp.list[[1]]))
chipseq.df <- data.frame(props = c(high.pip.overlaps.prop$`Fog/Gata4/Tbx5`, high.pip.overlaps.prop$H3k27ac,
mean.fgt.random.prop, mean.h3.random.prop),
type = rep(c("Fog/Gata4/Tbx5","H3k27ac"), 2),
SNPs = rep(c("GWAS SNPs in OCRs (PIP > 0.5)", "GWAS SNPs in OCRs (PIP < 0.01)"), each = 2),
sd = c(NA, NA, sd.fgt.random.prop, sd.h3.random.prop))
chipseq.df$SNPs <- factor(chipseq.df$SNPs, levels = c("GWAS SNPs in OCRs (PIP > 0.5)", "GWAS SNPs in OCRs (PIP < 0.01)"))
pdf('ChIP_seq_PIP50_overlap.pdf', width=8, height=6)
ggplot(chipseq.df, aes(x=type, y=props, fill=SNPs)) +
geom_bar(stat='identity', position='dodge') +
ggClean() +
ylab('Proportion of SNPs') +
xlab('') +
scale_fill_brewer(palette = 'Set2') +
geom_errorbar(aes(ymin=props-(1.96*sd), ymax=props+(1.96*sd)), width=0.1,
position=position_dodge(.9)) + coord_cartesian(ylim = c(0, 1))
dev.off()
|
356128e32a715a65ea5b61968b01ca6068bb0904
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/agRee/examples/agree.sdd.Rd.R
|
6036bfa550218d7613f1fbf8a14aedba5bcbfad5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 187
|
r
|
agree.sdd.Rd.R
|
library(agRee)
### Name: agree.sdd
### Title: Smallest Detectable Difference
### Aliases: agree.sdd
### Keywords: htest
### ** Examples
data(petVT)
agree.sdd(petVT$cerebellum)
|
ea23cbe5351eb03451a8e4116c0c326e59c1566f
|
ab01b36a70413e220cd9a95e756c22c2f9b9b602
|
/rprog_data_ProgAssignment3-data/rankhospital.R
|
02f183170e73220472a3db7bb75fe35dedf6b23d
|
[] |
no_license
|
TiagoDinisFonseca/DataScience
|
3af4ba1a9eec3ad14a173f418145bed7e8ef3feb
|
4a045cdfac4d6573248a28d3510b4cef1530c16f
|
refs/heads/master
| 2020-04-02T10:36:30.587951
| 2014-07-04T19:30:46
| 2014-07-11T00:20:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,009
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num = "best"){
# get data
outcomedata <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# get the list of states and test if state is in
states <- unique(outcomedata[,7])
if(!(toupper(state) %in% states))
stop("invalid state")
# tests if outcome is an acceptable choice
if(tolower(outcome) == "heart attack"){
i <- 11}
else if(tolower(outcome) == "heart failure"){
i <- 17}
else if(tolower(outcome) == "pneumonia"){
i <- 23}
else{
stop("invalid outcome")}
# filters the data for the state and not NA
tmp <- subset(outcomedata, outcomedata[,7] == state & !is.na(suppressWarnings(as.numeric(outcomedata[,i]))))
# transform best in 1 and worst in length(tmp)
if(num == "best"){
num <- 1}
else if (num == "worst"){
num <- nrow(tmp)}
else if (num > nrow(tmp)){
return(NA)}
# compute the ordered list
result <- tmp[order(suppressWarnings(as.numeric(tmp[,i])), tmp[,2]) , 2]
# returns the result
result[num]
}
|
07bcee19999784cd11bd41e34ec5ebcc5d6bee07
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GWmodel/examples/LondonHP.Rd.R
|
cf8fb17ca5de028f0fa9515f032e4c90dba4a19b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 270
|
r
|
LondonHP.Rd.R
|
library(GWmodel)
### Name: LondonHP
### Title: London house price data set (SpatialPointsDataFrame)
### Aliases: LondonHP londonhp
### Keywords: data,house price
### ** Examples
data(LondonHP)
data(LondonBorough)
ls()
plot(londonborough)
plot(londonhp, add=TRUE)
|
6651ffd62b5443c1560dfbbef9e2040933a32732
|
d9a4dce87b2975f3242e722955e69e221057b034
|
/R/make_sets.R
|
07eae3af43f5455534d742ba38aa4223178c5a27
|
[] |
no_license
|
JoeLugo-zz/Classification
|
63d4e19b91546712b2d7e8155ed4b92ab73b5215
|
4772d055c177c7c6797d5cec50d6df15b7e80000
|
refs/heads/master
| 2022-11-04T08:12:39.271258
| 2017-02-01T04:02:19
| 2017-02-01T04:02:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,022
|
r
|
make_sets.R
|
MakeSets <- function(train.df, test.df) {
obs <- train.df$obs
train.df$startdate <- NULL
train.df$enddate <- NULL
train.df$id <- NULL
test.df$startdate <- NULL
test.df$enddate <- NULL
test.df$id <- NULL
interesting <- as.integer(train.df$interesting)
train.df$interesting <- NULL
for (i in 1:length(interesting)) {
interesting[i] = paste("interesting",interesting[i], sep = "")
}
interesting <- as.factor(interesting)
train.df <- cbind(train.df, interesting)
train1 <- train.df[complete.cases(train.df),]
train2 <- train.df[ , !(names(train.df) %in% c("gross_monthly_income_imputed","net_monthly_income_imputed",
"gross_monthly_income_cat", "net_monthly_income_cat",
"gross_household_income","net_household_income"))]
train2 <- train2[complete.cases(train2),]
train3 <- train.df[ , !(names(train.df) %in% c("gender","position","year_birth","age_member","age_cat",
"age_head","num_members", "num_children","partner","civil_status",
"dom_sit","dwell_type","urban_char","occ",
"gross_monthly_income_imputed","net_monthly_income_imputed",
"gross_monthly_income_cat","net_monthly_income_cat",
"gross_household_income","net_household_income","edu",
"edu_diploma","edu_cat","is_member","recruitment","origin",
"have_simPC"))]
train3 <- train3[complete.cases(train3),]
train4 <- train.df[ , !(names(train.df) %in% c("gender","position","year_birth","age_member","age_cat",
"age_head","num_members", "num_children","partner","civil_status",
"dom_sit","dwell_type","urban_char","occ",
"gross_monthly_income_imputed","net_monthly_income_imputed",
"gross_monthly_income_cat","net_monthly_income_cat",
"gross_household_income","net_household_income","edu",
"edu_diploma","edu_cat","is_member","recruitment","origin",
"have_simPC","interesting_mean","enjoy_mean","difficult_mean",
"thinking_mean","clear_mean"))]
train4 <- train4[complete.cases(train4),]
train1$obs <- NULL
train2$obs <- NULL
train3$obs <- NULL
train4$obs <- NULL
####################################################################################################################################
completeFun <- function(data, desiredCols) {
completeVec <- complete.cases(data[, desiredCols])
return(data[completeVec, ])
}
test1 <- test.df[complete.cases(test.df),]
test2 <- test.df[ , !(names(test.df) %in% c("gross_monthly_income_imputed","net_monthly_income_imputed",
"gross_monthly_income_cat", "net_monthly_income_cat",
"gross_household_income","net_household_income"))]
test2 <- test2[complete.cases(test2),]
test2 <- test2[ ! test2$obs %in% unique(c(test1$obs)),]
test3 <- test.df[ , !(names(test.df) %in% c("gender","position","year_birth","age_member","age_cat",
"age_head","num_members", "num_children","partner","civil_status",
"dom_sit","dwell_type","urban_char","occ",
"gross_monthly_income_imputed","net_monthly_income_imputed",
"gross_monthly_income_cat","net_monthly_income_cat",
"gross_household_income","net_household_income","edu",
"edu_diploma","edu_cat","is_member","recruitment","origin",
"have_simPC"))]
test3 <- test3[complete.cases(test3),]
test3 <- test3[ ! test3$obs %in% unique(c(test1$obs, test2$obs)),]
test4 <- test.df[ , !(names(test.df) %in% c("gender","position","year_birth","age_member","age_cat",
"age_head","num_members", "num_children","partner","civil_status",
"dom_sit","dwell_type","urban_char","occ",
"gross_monthly_income_imputed","net_monthly_income_imputed",
"gross_monthly_income_cat","net_monthly_income_cat",
"gross_household_income","net_household_income","edu",
"edu_diploma","edu_cat","is_member","recruitment","origin",
"have_simPC","interesting_mean","enjoy_mean","difficult_mean",
"thinking_mean","clear_mean"))]
test4 <- test4[complete.cases(test4),]
test4 <- test4[ ! test.df$obs %in% unique(c(test1$obs,test2$obs,test3$obs)),]
train_list <- list(train1, train2, train3, train4)
test_list <- list(test1, test2, test3, test4)
return(list(train_list, test_list))
}
all_sets_split <- MakeSets(newtrain, newtest)
train_list_split <- all_sets_split[[1]]
test_list_split <- all_sets_split[[2]]
all_sets <- MakeSets(train_data, test_data)
train_list <- all_sets[[1]]
test_list <- all_sets[[2]]
|
c7e181136a2613b2616a75ce9a976475b72ebc36
|
079e516d033cb06871432f77e6a44d8d6d4b145d
|
/R/SaveProps.R
|
57611c8111ce5332c9c74441bf83579f0edae7e2
|
[] |
no_license
|
willhonaker/R2Adobe
|
6fee15236440f6332a92c9c101bd7f49930e5986
|
7aae4cefadb1109cdc0c018469d3bcdd3ccddea1
|
refs/heads/master
| 2021-11-23T18:05:58.210442
| 2021-11-22T22:45:45
| 2021-11-22T22:45:45
| 204,045,985
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,291
|
r
|
SaveProps.R
|
#' @title Save Prop
#'
#' @details Enables/disables/updates a prop for a selection of RSIDs (bulk version coming soon).
#'
#' @description Enables/disables/updates a prop for a selection of RSIDs (bulk version coming soon).
#'
#' @param id
#' @param name
#' @param description
#' @param rsids
#' @param enabled
#' @param pathing_enabled
#' @param list_enabled
#' @param participation_enabled
#' @param verbosity
#'
#' @return Message indicating that the prop was saved.
#'
#' @importFrom jsonlite fromJSON
#'
#' @export
#'
#' @examples \dontrun{
#' SaveProps(id = "prop10",
#' name = "My Cool Prop (c10)",
#' description = "[Information about your prop here.]",
#' rsids = c("myrisd1", "myrsid2", "myrsid3"))
#' }
#'
SaveProps <- function(id,
name,
description,
rsids,
enabled = "true",
pathing_enabled = "true",
list_enabled = "false",
participation_enabled = "false",
verbosity = FALSE){
prop_info <- list(id = id,
name = name,
description = description,
enabled = enabled,
pathing_enabled = pathing_enabled,
list_enabled = list_enabled,
participation_enabled = participation_enabled,
verbosity = FALSE)
prop_info_df <- data.frame(props = c(''))
prop_info_df$props <- list(data.frame(prop_info))
prop_info_df$rsid_list <- list("the_rsid_list_goes_here")
prop_query <- toJSON(unbox(prop_info_df), pretty=TRUE)
prop_query <- gsub("the_rsid_list_goes_here", paste(rsids, collapse = '","'), prop_query)
readable_response <- JWTPost("https://api.omniture.com/admin/1.4/rest/?method=ReportSuite.SaveProps",
accept_header = "application/json",
content_type_header = "application/json",
body = prop_query,
verbose_output = verbosity)
if(readable_response == TRUE){
message("Successfully saved prop.")
} else {
message(readable_response) ## Is this possible?
message("Prop not updated! Check query and try again.")
}
}
|
675e5da463a299ff8e5fd78e269fd6084f99ae45
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/quickpsy/examples/plotpar.Rd.R
|
20fba15cca752a51da7b72629c6c82e85ff74f76
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
plotpar.Rd.R
|
library(quickpsy)
### Name: plotpar
### Title: Plot the values of the parameters
### Aliases: plotpar
### ** Examples
library(MPDiR) # contains the Vernier data
fit <- quickpsy(Vernier, Phaseshift, NumUpward, N,
grouping = .(Direction, WaveForm, TempFreq), B = 10)
plotpar(fit)
plotpar(fit, x = WaveForm)
plotpar(fit, xpanel = Direction)
plotpar(fit, color = Direction)
plotpar(fit, color = Direction, ypanel = WaveForm, geom = 'point')
|
652add1d13b6015b25583746783df5f0e324b583
|
5c5567ac9cef11a37dce1fdcce451d128d688e21
|
/Machine Learning with R (4th Ed.)/Chapter 12/Chapter_12.R
|
ddb1b3c250d3df710f3ff38ea968d8b096ede387
|
[] |
no_license
|
dataspelunking/MLwR
|
177dc5ef7c025e1c8b08fb65a1ccc98a9d48dddc
|
478dbf1e348d834b30fe0bfee130fec3c8f4bce1
|
refs/heads/master
| 2023-06-08T18:12:20.269578
| 2023-05-29T18:46:10
| 2023-05-29T18:46:10
| 27,689,480
| 243
| 478
| null | 2018-03-25T15:21:53
| 2014-12-07T23:48:51
|
R
|
UTF-8
|
R
| false
| false
| 5,276
|
r
|
Chapter_12.R
|
##### Chapter 12: Advanced Data Preparation --------------------
## Exploring R's tidyverse ----
library(tidyverse) # load all tidyverse packages
# convert the Titanic training dataset into a tibble
library(tibble) # not necessary if tidyverse is already loaded
titanic_csv <- read.csv("titanic_train.csv")
titanic_tbl <- as_tibble(titanic_csv)
titanic_tbl
# read the titanic training dataset using readr
library(readr) # not necessary if tidyverse is already loaded
titanic_train <- read_csv("titanic_train.csv")
# read the titanic training dataset using readxl
library(readxl)
titanic_train <- read_excel("titanic_train.xlsx")
# preparing and piping data with dplyr
library(dplyr)
# filter for female rows only
titanic_train |> filter(Sex == "female")
# select only name, sex, and age columns
titanic_train |> select(Name, Sex, Age)
# combine multiple dplyr verbs and save output to a tibble
titanic_women <- titanic_train |>
filter(Sex == "female") |>
select(Name, Sex, Age) |>
arrange(Name)
# create a new feature indicating elderly age
titanic_train |>
mutate(elderly = if_else(Age >= 65, 1, 0))
# create multiple features within the same mutate command
titanic_train |>
mutate(
elderly = if_else(Age >= 65, 1, 0),
child = if_else(Age < 18, 1, 0)
)
# compute survival rate by gender
titanic_train |>
group_by(Sex) |>
summarize(survival_rate = mean(Survived))
# compute average survival rate for children vs. non-children
titanic_train |>
filter(!is.na(Age)) |>
mutate(child = if_else(Age < 18, 1, 0)) |>
group_by(child) |>
summarize(survival_rate = mean(Survived))
# transform the dataset and pipe into a decision tree
library(rpart)
m_titanic <- titanic_train |>
filter(!is.na(Age)) |>
mutate(AgeGroup = if_else(Age < 18, "Child", "Adult")) |>
select(Survived, Pclass, Sex, AgeGroup) |>
rpart(formula = Survived ~ ., data = _)
library(rpart.plot)
rpart.plot(m_titanic)
## Transforming text with stringr ----
library(readr)
titanic_train <- read_csv("titanic_train.csv")
library(stringr)
# examine cabin prefix code
titanic_train <- titanic_train |>
mutate(CabinCode = str_sub(Cabin, start = 1, end = 1))
# compare cabin prefix to passenger class
table(titanic_train$Pclass, titanic_train$CabinCode,
useNA = "ifany")
# plot of survival probability by cabin code
library(ggplot2)
titanic_train |> ggplot() +
geom_bar(aes(x = CabinCode, y = Survived),
stat = "summary", fun = "mean") +
ggtitle("Titanic Survival Rate by Cabin Code")
# look at the first few passenger names
head(titanic_train$Name)
# create a title / salutation feature
titanic_train <- titanic_train |>
# use regular expressions to find the characters between the comma and period
mutate(Title = str_extract(Name, ", [A-z]+\\."))
# look at the first few examples
head(titanic_train$Title)
# clean up the title feature
titanic_train <- titanic_train |>
mutate(Title = str_replace_all(Title, "[, \\.]", ""))
# examine output
table(titanic_train$Title)
# group titles into related categories
titanic_train <- titanic_train |>
mutate(TitleGroup = recode(Title,
# the first few stay the same
"Mr" = "Mr", "Mrs" = "Mrs", "Master" = "Master",
"Miss" = "Miss",
# combine variants of "Miss"
"Ms" = "Miss", "Mlle" = "Miss", "Mme" = "Miss",
# anything else will be "Other"
.missing = "Other",
.default = "Other"
)
)
# examine output
table(titanic_train$TitleGroup)
# plot of survival probability by title group
library(ggplot2)
titanic_train |> ggplot() +
geom_bar(aes(x = TitleGroup, y = Survived),
stat = "summary", fun = "mean") +
ggtitle("Titanic Survival Rate by Salutation")
## Cleaning dates with lubridate ----
library(lubridate)
# reading in Machine Learning with R publication dates in different formats
mdy(c("October 25, 2013", "10/25/2013"))
dmy(c("25 October 2013", "25.10.13"))
ymd("2013-10-25")
# construct MLwR publication dates
MLwR_1stEd <- mdy("October 25, 2013")
MLwR_2ndEd <- mdy("July 31, 2015")
MLwR_3rdEd <- mdy("April 15, 2019")
# compute differences (returns a difftime object)
MLwR_2ndEd - MLwR_1stEd
MLwR_3rdEd - MLwR_2ndEd
# convert the differences to durations
as.duration(MLwR_2ndEd - MLwR_1stEd)
as.duration(MLwR_3rdEd - MLwR_2ndEd)
# convert the duration to years
dyears()
as.duration(MLwR_2ndEd - MLwR_1stEd) / dyears()
as.duration(MLwR_3rdEd - MLwR_2ndEd) / dyears()
# easier-to-remember version of the above:
time_length(MLwR_2ndEd - MLwR_1stEd, unit = "years")
time_length(MLwR_3rdEd - MLwR_2ndEd, unit = "years")
# compute age (in duration)
USA_DOB <- mdy("July 4, 1776") # USA's Date of Birth
time_length(mdy("July 3 2023") - USA_DOB, unit = "years")
time_length(mdy("July 5 2023") - USA_DOB, unit = "years")
# compute age (using intervals)
interval(USA_DOB, mdy("July 3 2023")) / years()
interval(USA_DOB, mdy("July 5 2023")) / years()
# compute age (using integer divison)
USA_DOB %--% mdy("July 3 2023") %/% years()
USA_DOB %--% mdy("July 5 2023") %/% years()
# function to compute calendar age
age <- function(birthdate) {
birthdate %--% today() %/% years()
}
# compute age of celebrities
age(mdy("Jan 12, 1964")) # Jeff Bezos
age(mdy("June 28, 1971")) # Elon Musk
age(mdy("Oct 28, 1955")) # Bill Gates
|
0c949f3e96fd4e8b2c8bea34eedff208984201e6
|
435accdd6071c18f2ff67edc0675abe5b38edf8e
|
/7. MFM_R1/source code/dataframe.R
|
ab702017ee5edcc4ec146b1a0c4a7bb7d62d85c7
|
[] |
no_license
|
ardyadipta/melek-for-member
|
b0a4220a3c2ef99c0b29eabfb9b6de3cb5217266
|
61185eeb1c2dc06cffd627a90b051c87398e9aff
|
refs/heads/master
| 2020-03-19T09:06:18.645385
| 2018-05-18T06:01:21
| 2018-05-18T06:01:21
| 136,260,310
| 1
| 1
| null | 2018-06-06T02:26:58
| 2018-06-06T02:26:58
| null |
UTF-8
|
R
| false
| false
| 387
|
r
|
dataframe.R
|
names = name_vec <- c("Ani", "Ana", "Budi", "Asep", "Udin")
ages = sample(20:30, 5)
gender = c("F", "F", "M", "M", "M")
work = c(T, F, T, F, F)
bio = data.frame(names, ages, gender, work, stringsAsFactors = F)
bio
str(bio)
#slicing
bio
bio[,1]
bio[1,]
bio[,1:3]
bio[, "names"]
bio$ages > 25
bio[(bio$ages > 25 & bio$gender == "M") | bio$ages < 26, ]
subset(df, bio$ages > 25)
?subset
|
60f4ea2158db72dc17f63e23b423497b0b3a2560
|
1a4ed96bc9e61c559b593bcee4fa673951ef7a2c
|
/man/copy_labels.Rd
|
a740089726b855a0d4ca442d895a9183e4d3bc53
|
[] |
no_license
|
henrydoth/labelled
|
b22d66bd584ff2726549ea454306aea67c5d21c4
|
b1cc1acf7e0054bc202fb2bef81d6622654c7b88
|
refs/heads/main
| 2023-08-23T02:33:40.363484
| 2021-11-02T08:32:09
| 2021-11-02T08:32:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,867
|
rd
|
copy_labels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/copy_labels.R
\name{copy_labels}
\alias{copy_labels}
\alias{copy_labels_from}
\title{Copy variable and value labels and SPSS-style missing value}
\usage{
copy_labels(from, to, .strict = TRUE)
copy_labels_from(to, from, .strict = TRUE)
}
\arguments{
\item{from}{A vector or a data.frame (or tibble) to copy labels from.}
\item{to}{A vector or data.frame (or tibble) to copy labels to.}
\item{.strict}{When \code{from} is a labelled vector, \code{to} have to be of the same
type (numeric or character) in order to copy value labels and SPSS-style
missing values. If this is not the case and \code{.strict = TRUE}, an error
will be produced. If \code{.strict = FALSE}, only variable label will be
copied.}
}
\description{
This function copies variable and value labels (including missing values)
from one vector to another or from one data frame to another data frame.
For data frame, labels are copied according to variable names, and only
if variables are the same type in both data frames.
}
\details{
Some base \R functions like \code{\link[base:subset]{base::subset()}} drop variable and
value labels attached to a variable. \code{copy_labels} could be used
to restore these attributes.
\code{copy_labels_from} is intended to be used with \pkg{dplyr} syntax,
see examples.
}
\examples{
library(dplyr)
df <- tibble(
id = 1:3,
happy = factor(c('yes', 'no', 'yes')),
gender = labelled(c(1, 1, 2), c(female = 1, male = 2))
) \%>\%
set_variable_labels(
id = "Individual ID",
happy = "Are you happy?",
gender = "Gender of respondent"
)
var_label(df)
fdf <- df \%>\% filter(id < 3)
var_label(fdf) # some variable labels have been lost
fdf <- fdf \%>\% copy_labels_from(df)
var_label(fdf)
# Alternative syntax
fdf <- subset(df, id < 3)
fdf <- copy_labels(from = df, to = fdf)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.