blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc366a01e3d9dd711c1de1ddbcffd36ea94d9e69
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sensors4plumes/examples/polygrid2grid.Rd.R
|
e06da7662a76a1388e6f6cbed0de4d57f0a6fa16
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 604
|
r
|
polygrid2grid.Rd.R
|
library(sensors4plumes)
### Name: polygrid2grid
### Title: Coerce SpatialPolygridDataFrame into SpatialGridDataFrame and
### geoTiff file
### Aliases: polygrid2grid
### ** Examples
data(SPolygridDF)
# return SpatialGridDataFrame
SGridDF1 = polygrid2grid(SPolygridDF, zcol = "b")
# generate geoTiff
polygrid2grid(SPolygridDF, returnSGDF = FALSE,
geoTiffPath = "SPolygridDF1")
# plot SpatialGridDataFrame
spplot(SGridDF1)
# view geoTiff with functions from 'raster' and delete it
SGridDF2 = brick("SPolygridDF1.tif")
plot(SGridDF2)
rm(SGridDF2)
file.remove("SPolygridDF1.tif")
|
fd5729c5be5b9ea385af9a0937a933b4b94b2052
|
3c5d52b699e6645cbb086b4f45519f898a4b2f64
|
/tests/testthat/test-io.R
|
4f8b69d496b6b28189b5cd2a6aef5261aa365d44
|
[] |
no_license
|
AndyBunn/dplR
|
249427c0be6c68c80c267ad4392a03a331bb056d
|
8bb524ae6a024affea2c9ec982b0972fc5175280
|
refs/heads/master
| 2023-07-06T04:02:45.073034
| 2023-04-12T21:41:31
| 2023-04-12T21:41:31
| 195,873,071
| 31
| 12
| null | 2023-06-23T14:33:40
| 2019-07-08T19:21:39
|
R
|
UTF-8
|
R
| false
| false
| 9,076
|
r
|
test-io.R
|
context("input / output functions")
test.read.tucson <- function() {
MISSINGVAL <- 0
## Invalid file
tf <- tempfile()
fh <- file(tf, "wt")
on.exit(unlink(tf))
writeLines("TEST1A 1734 1230 456 789 12 34 56 7 6",
fh)
close(fh)
test_that("read.tucson catches lines that are too long", {
expect_error(read.tucson(tf), "failed to read")
})
## Precision 0.01
tf2 <- tempfile()
fh2 <- file(tf2, "wt")
on.exit(unlink(tf2), add=TRUE)
writeLines("TEST2A 1734 1230 456 789 12 34 999", fh2)
close(fh2)
test_that("read.tucson can handle data with precision 0.01", {
res.tf2 <- read.tucson(tf2)
expect_true(is.data.frame(res.tf2))
expect_named(res.tf2, "TEST2A")
expect_equal(row.names(res.tf2), as.character(1734:1738))
expect_equal(res.tf2[[1]], c(12.3, 4.56, 7.89, 0.12, 0.34))
})
## Precision 0.001
tf3 <- tempfile()
fh3 <- file(tf3, "wt")
on.exit(unlink(tf3), add=TRUE)
writeLines("TEST3A 1734 1230 456 789 12 34 -9999", fh3)
close(fh3)
test_that("read.tucson can handle data with precision 0.001", {
res.tf3 <- read.tucson(tf3)
expect_true(is.data.frame(res.tf3))
expect_named(res.tf3, "TEST3A")
expect_equal(row.names(res.tf3), as.character(1734:1738))
expect_equal(res.tf3[[1]], c(1.23, 0.456, 0.789, 0.012, 0.034))
})
## Unusual line separator
tf4 <- tempfile()
fh4 <- file(tf4, "wt")
on.exit(unlink(tf4), add=TRUE)
writeLines(c("TEST4A 1734 1230 456 789 12 34 5",
"TEST4A 1740 678 999"), fh4, sep="\r\r\n")
close(fh4)
test_that("read.tucson works with unusual line separators", {
res.tf4 <- read.tucson(tf4)
expect_true(is.data.frame(res.tf4))
expect_named(res.tf4, "TEST4A")
expect_equal(row.names(res.tf4), as.character(1734:1740))
expect_equal(res.tf4[[1]], c(12.3, 4.56, 7.89, 0.12, 0.34, 0.05, 6.78))
})
## Tab-delimited file
tf5 <- tempfile()
fh5 <- file(tf5, "wt")
on.exit(unlink(tf5), add=TRUE)
writeLines("TEST5A\t1734\t1230\t456\t789\t12\t34\t999", fh5)
close(fh5)
test_that("read.tucson works with tab delimited data", {
res.tf5 <- read.tucson(tf5)
expect_true(is.data.frame(res.tf5))
expect_named(res.tf5, "TEST5A")
expect_equal(row.names(res.tf5), as.character(1734:1738))
expect_equal(res.tf5[[1]], c(12.3, 4.56, 7.89, 0.12, 0.34))
})
## Stop marker is 13th column (non-standard)
tf6 <- tempfile()
fh6 <- file(tf6, "wt")
on.exit(unlink(tf6), add=TRUE)
writeLines(c("TEST6A 1734 123 123 123 123 123 123",
"TEST6A 1740 123 123 123 123 123 123 123 123 123 123 -9999"), fh6)
close(fh6)
test_that("read.tucson accepts stop marker in extra column", {
res.tf6 <- read.tucson(tf6)
expect_true(is.data.frame(res.tf6))
expect_named(res.tf6, "TEST6A")
expect_equal(row.names(res.tf6), as.character(1734:1749))
expect_equal(res.tf6[[1]], rep.int(0.123, 16))
})
## Non-standard missing data marker
tf7 <- tempfile()
fh7 <- file(tf7, "wt")
on.exit(unlink(tf7), add=TRUE)
writeLines("TEST7A 1734 1230 456 . 12 34 999", fh7)
close(fh7)
test_that("read.tucson accepts dot as missing data marker", {
res.tf7 <- read.tucson(tf7)
expect_true(is.data.frame(res.tf7))
expect_named(res.tf7, "TEST7A")
expect_equal(row.names(res.tf7), as.character(1734:1738))
expect_equal(res.tf7[[1]], c(12.3, 4.56, MISSINGVAL, 0.12, 0.34))
})
## Overlapping data is an error
tf8 <- tempfile()
fh8 <- file(tf8, "wt")
on.exit(unlink(tf8), add=TRUE)
writeLines(c("TEST8A 1734 1230 456 789 12 34 999",
"TEST8A 1730 1230 456 789 12 34 999"), fh8)
close(fh8)
test_that("read.tucson stops on overlapping data", {
expect_error(read.tucson(tf8), "failed to read")
})
## Non-standard file with missing decade
tf9 <- tempfile()
fh9 <- file(tf9, "wt")
on.exit(unlink(tf9), add=TRUE)
writeLines(c("TEST9A 1734 123 123 123 123 123 123",
"TEST9A 1750 123 123 123 123 123 123 123 123 123 -9999"), fh9)
close(fh9)
test_that("read.tucson marks missing decades", {
res.tf9 <- read.tucson(tf9)
expect_true(is.data.frame(res.tf9))
expect_named(res.tf9, "TEST9A")
expect_equal(row.names(res.tf9), as.character(1734:1758))
expect_equal(res.tf9[[1]],
c(rep.int(0.123, 6), rep.int(MISSINGVAL, 10),
rep.int(0.123, 9)))
})
## Two series
tf10 <- tempfile()
fh10 <- file(tf10, "wt")
on.exit(unlink(tf10), add=TRUE)
writeLines(c("TST10A 1734 1230 1230 1230 1230 1230 -9999",
"TST10B 1732 123 123 123 123 999"), fh10)
close(fh10)
test_that("read.tucson supports mixed precisions", {
res.tf10 <- read.tucson(tf10)
expect_true(is.data.frame(res.tf10))
expect_named(res.tf10, c("TST10A", "TST10B"))
expect_equal(row.names(res.tf10), as.character(1732:1738))
expect_equal(res.tf10[[1]], c(rep.int(NA_real_, 2), rep.int(1.23, 5)))
expect_equal(res.tf10[[2]], c(rep.int(1.23, 4), rep.int(NA_real_, 3)))
})
## Need 5 characters for year, effect of parameter 'long'
tf11 <- tempfile()
fh11 <- file(tf11, "wt")
on.exit(unlink(tf11), add=TRUE)
writeLines("TST11A -1734 1230 456 789 999", fh11)
close(fh11)
test_that("read.tucson argument 'long' works", {
res.tf11a <- read.tucson(tf11)
expect_true(is.data.frame(res.tf11a))
expect_named(res.tf11a, "TST11A -")
expect_equal(row.names(res.tf11a), as.character(1734:1736))
expect_equal(res.tf11a[[1]], c(12.3, 4.56, 7.89))
res.tf11b <- read.tucson(tf11, long=TRUE)
expect_true(is.data.frame(res.tf11b))
expect_named(res.tf11b, "TST11A")
expect_equal(row.names(res.tf11b), as.character(-1734:-1732))
expect_equal(res.tf11b[[1]], c(12.3, 4.56, 7.89))
})
## Mixed case ("Tst12A" does not have a stop marker)
tf12 <- tempfile()
fh12 <- file(tf12, "wt")
on.exit(unlink(tf12), add=TRUE)
writeLines(c("Tst12A 1734 1230 456 789 12 34 5",
"TST12A 1740 678 999"), fh12)
close(fh12)
test_that("read.tucson corrects mixed case typos", {
res.tf12 <- read.tucson(tf12)
expect_true(is.data.frame(res.tf12))
expect_named(res.tf12, "TST12A")
expect_equal(row.names(res.tf12), as.character(1734:1740))
expect_equal(res.tf12[[1]],
c(12.3, 4.56, 7.89, 0.12, 0.34, 0.05, 6.78))
})
## File has no data (invalid file)
tf13 <- tempfile()
fh13 <- file(tf13, "wt")
on.exit(unlink(tf13), add=TRUE)
writeLines("TST13A 1734", fh13)
close(fh13)
test_that("read.tucson gives empty result when appropriate", {
expect_equal(0, nrow(read.tucson(tf13, header = FALSE)))
})
tf14 <- tempfile()
fh14 <- file(tf14, "wt")
on.exit(unlink(tf14), add=TRUE)
writeLines(c("TST14A 1906 0 0 100 200",
"TST14A 1910 300 200 100 200 300 999",
"TST14B 1905 300 200 100 200 300",
"TST14B 1910 200 100 0 0 999",
"TST14C 1906 0 200 100 200",
"TST14C 1910 300 200 100 0 999"), fh14)
close(fh14)
test_that("read.tucson (by default) preserves edge zeros", {
res.tf14 <- read.tucson(tf14)
expect_true(is.data.frame(res.tf14))
expect_named(res.tf14, c("TST14A", "TST14B", "TST14C"))
expect_equal(row.names(res.tf14), as.character(1905:1914))
expect_equal(res.tf14[[1]],
c(NA_real_, 0, 0, 1, 2, 3, 2, 1, 2, 3))
expect_equal(res.tf14[[2]],
c(3, 2, 1, 2, 3, 2, 1, 0, 0, NA_real_))
expect_equal(res.tf14[[3]],
c(NA_real_, 0, 2, 1, 2, 3, 2, 1, 0, NA_real_))
res.tf14B <- read.tucson(tf14, edge.zeros=FALSE)
expect_true(is.data.frame(res.tf14B))
expect_named(res.tf14B, c("TST14A", "TST14B", "TST14C"))
expect_equal(row.names(res.tf14B), as.character(1905:1914))
NA2 <- rep.int(NA_real_, 2)
NA3 <- rep.int(NA_real_, 3)
expect_equal(res.tf14B[[1]],
c(NA3, 1, 2, 3, 2, 1, 2, 3))
expect_equal(res.tf14B[[2]],
c(3, 2, 1, 2, 3, 2, 1, NA3))
expect_equal(res.tf14B[[3]],
c(NA2, 2, 1, 2, 3, 2, 1, NA2))
})
}
test.read.tucson()
### We should write tests for other I/O functions, also
|
f9b2b1dae4a4ebc98f354514541d5452667b9060
|
dc1f17859c4d14d2d18e34a377a474b7d955c09f
|
/PEPATACr/man/getPrealignments.Rd
|
46ddf3dab4c5216da7b957b3c21d1e6db2b44f23
|
[
"BSD-2-Clause"
] |
permissive
|
databio/pepatac
|
55f4b7947333c3543f892e19e60803d04003eba5
|
9ee0b6c1251b1addae8265c12f16cbfeae76d489
|
refs/heads/master
| 2023-08-08T22:02:23.327668
| 2023-07-31T21:32:28
| 2023-07-31T21:32:28
| 58,678,230
| 46
| 11
|
BSD-2-Clause
| 2023-07-31T21:32:30
| 2016-05-12T21:29:13
|
R
|
UTF-8
|
R
| false
| true
| 424
|
rd
|
getPrealignments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PEPATACr.R
\name{getPrealignments}
\alias{getPrealignments}
\title{Return a list of prealignments from a stats_summary.tsv file if they exist}
\usage{
getPrealignments(stats_file)
}
\arguments{
\item{stats_file}{A looper derived stats_summary.tsv file}
}
\description{
Return a list of prealignments from a stats_summary.tsv file if they exist
}
|
fa2cda12bc0313b7db92ca6926851934b64bbfbd
|
5042c3a97c9a9fa4d0a5d6794960eec8146afa47
|
/lotteryEstimator/man/ParameterizedEstimator.Rd
|
bef95b7e64e9fa224dbf3b70db6b0a3cd9e943d3
|
[] |
no_license
|
Sea2Data/CatchLotteryEstimation
|
eef044b949aa382a0ca70a23c9c32d2ca2a1a4d5
|
e364b505969e5bd861684bdb2fc55f6fe96d2b8f
|
refs/heads/master
| 2020-12-22T06:29:23.504177
| 2020-10-25T09:12:56
| 2020-10-25T09:13:52
| 236,696,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,008
|
rd
|
ParameterizedEstimator.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimation.R
\name{ParameterizedEstimator}
\alias{ParameterizedEstimator}
\title{Parameterized estimator}
\description{
Function contract for parameterized estimators
Parameterized estimators are functions that map a data frame to an estimate, with all other parameters fixed.
}
\details{
Parameterized estimators take a single argument 'sample' which is a \code{\link[data.table]{data.table}} with sample data
Parameterized estimators return a named numeric vector with parameter estimates
}
\examples{
# A parameterized estimator can be obtained from
# \code{\link[lotteryEstimator]{HierarchicalEstimator}}
# by fixing parameters.
# For example a parametierized Horwitz-Thomposon estimator:
numAtAgeSample <- function(sample){countCategorical(sample$age, 2:20)}
numAtAgeHaul <- function(sample){hierarchicalHorvitzThompsonTotals(sample, "SSUid",
numAtAgeSample, "SSUinclusionProb")}
}
|
092da274e9e9cc927395b3122434427da352821c
|
700c80d6185f5e0c13742146e5ef4a4a2f51b02e
|
/GrandMaster.R
|
7935fe92d83866b1b0586d55ee63d937ae74dcef
|
[] |
no_license
|
DanOvando/SNAP
|
e0951a127381a60efb7a00dca1f28661322ae86f
|
9cbe6845c00f7d1827d8d6054929d7dca6cbbceb
|
refs/heads/master
| 2021-01-24T03:58:05.246973
| 2014-07-21T23:51:18
| 2014-07-21T23:51:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,613
|
r
|
GrandMaster.R
|
#==specify options (e.g. what management tactic is going to be searched over)
#==make storage facility for key output (e.g. total cost vs. total profit; depletion, spr)
#==create directories
#==write csv files
#==scenarios: changing MPA size, changing size limit, changing season length, changing effort input through fishers, etc.
require(animation)
require(caTools)
source("Master.R")
source("lenwei.R")
source("VisualizeMovement.R")
source("movArray.R")
source("InitialPop.R")
source("Recruitment.R")
source("samplingFunc.R")
#rm(list=ls())
Graphs<-F
GraphsFish<-F
PrintLifeHistory<-F
#==open access===========================
Life<-read.csv("LifeHistory.csv") # life history characteristics
SimCTL<-read.csv("GrandSimCtl.csv",header=F) # simulation controls
Fleets<-read.csv("Fleets.csv",header=F) # fleet characteristics
season<-read.csv("seasonNULL.csv",header=F) # fishing seasons by fleet
Samp <- read.csv("SamplingParams.csv") # sampling controls for management
NoTakeZone<-read.csv("notakezoneNULL.csv",header=F) # marine protected areas (0=open access, 1=MPA, 2=TURF?)
habitat<-read.csv("habitatNULL.csv",header=F) # habitat quality (recruitment suitability)
OpenAccess<-Master(Life,SimCTL,Fleets,season,Samp,NoTakeZone,habitat,Graphs=F,GraphsFish=F,PrintLifeHistory=F)
OAtotCatch<-apply(OpenAccess$CatchByFisher,2,sum,na.rm=T)
OAtotCost<-apply(OpenAccess$CostByFisher,2,sum,na.rm=T)
OAtotProfit<-apply(OpenAccess$ProfitByFisher,2,sum,na.rm=T)
#==MPA====
Life<-read.csv("LifeHistory.csv") # life history characteristics
SimCTL<-read.csv("GrandSimCtl.csv",header=F) # simulation controls
Fleets<-read.csv("Fleets.csv",header=F) # fleet characteristics
season<-read.csv("seasonNULL.csv",header=F) # fishing seasons by fleet
Samp <- read.csv("SamplingParams.csv") # sampling controls for management
NoTakeZone<-read.csv("notakezone.csv",header=F) # marine protected areas (0=open access, 1=MPA, 2=TURF?)
habitat<-read.csv("habitatNULL.csv",header=F) # habitat quality (recruitment suitability)
halfMPA<-Master(Life,SimCTL,Fleets,season,Samp,NoTakeZone,habitat,Graphs=F,GraphsFish=F,PrintLifeHistory=F)
MPAtotCatch<-apply(halfMPA$CatchByFisher,2,sum,na.rm=T)
MPAtotCost<-apply(halfMPA$CostByFisher,2,sum,na.rm=T)
MPAtotProfit<-apply(halfMPA$ProfitByFisher,2,sum,na.rm=T)
#==Seasons
Life<-read.csv("LifeHistory.csv") # life history characteristics
SimCTL<-read.csv("GrandSimCtl.csv",header=F) # simulation controls
Fleets<-read.csv("Fleets.csv",header=F) # fleet characteristics
season<-read.csv("season.csv",header=F) # fishing seasons by fleet
Samp <- read.csv("SamplingParams.csv") # sampling controls for management
NoTakeZone<-read.csv("notakezoneNULL.csv",header=F) # marine protected areas (0=open access, 1=MPA, 2=TURF?)
habitat<-read.csv("habitatNULL.csv",header=F) # habitat quality (recruitment suitability)
Seas<-Master(Life,SimCTL,Fleets,season,Samp,NoTakeZone,habitat,Graphs=F,GraphsFish=F,PrintLifeHistory=F)
StotCatch<-apply(Seas$CatchByFisher,2,sum,na.rm=T)
StotCost<-apply(Seas$CostByFisher,2,sum,na.rm=T)
StotProfit<-apply(Seas$ProfitByFisher,2,sum,na.rm=T)
#==plot it all
burnIn <-SimCTL[grep('burn',SimCTL[,2]),1]
simTimePlt <-SimCTL[grep('simTime',SimCTL[,2]),1]
par(mfrow=c(5,1),mar=c(.1,4,.1,.1))
plot(OAtotCatch,type="b",xaxt='n',las=2,ylim=c(0,max(OAtotCatch,MPAtotCatch,StotCatch,na.rm=T)))
lines(MPAtotCatch,type="b",col=2)
lines(StotCatch,type="b",col=3)
plot(OAtotCost,lty=2,type="b",xaxt='n',las=2,ylim=c(0,max(OAtotCost,MPAtotCost,StotCost,na.rm=T)))
lines(MPAtotCost,type="b",col=2)
lines(StotCost,type="b",col=3)
plot(OAtotProfit,lty=2,type="b",xaxt='n',las=2,ylim=c(0,max(OAtotProfit,MPAtotProfit,StotProfit,na.rm=T)))
lines(MPAtotProfit,type="b",col=2)
lines(StotProfit,type="b",col=3)
plot(OpenAccess$CostOfManagement,type="b",xaxt='n',las=2,ylim=c(0,max(OpenAccess$CostOfManagement,halfMPA$CostOfManagement,Seas$CostOfManagement,na.rm=T)))
lines(halfMPA$CostOfManagement,type="b",col=2)
lines(Seas$CostOfManagement,type="b",col=3)
plot(OpenAccess$SpawningBiomass[burnIn:simTimePlt],type="b",xaxt='n',las=2,ylab="SpawningBio",ylim=c(0,max(OpenAccess$SpawningBiomass[burnIn:simTimePlt],na.rm=T)))
lines(halfMPA$SpawningBiomass[burnIn:simTimePlt],type="b",col=2)
lines(Seas$SpawningBiomass[burnIn:simTimePlt],type="b",col=3)
#saveHTML(ani.replay(), img.name = "record_plot_oldf",outdir = getwd(),interval=0.05)
|
0be7f86e7cbe4212f342292c65c48555a70ea1c9
|
e3dcd1c39d94e87ca058d1ad9941a660a2dcdac8
|
/ScriptFinal.R
|
6decfae4d7910a63022181cc67f5855a5ded054f
|
[] |
no_license
|
CarolinaRamosR/Extraccion-Datos
|
eae497fe5c5c545dd26ce7eff974de13e3a9dc53
|
f2f5b63fb13ba0692bd72063b841b60def314cd4
|
refs/heads/master
| 2021-06-25T09:55:37.027122
| 2017-09-11T01:10:04
| 2017-09-11T01:10:04
| 103,074,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,383
|
r
|
ScriptFinal.R
|
rm(list=ls())
library(RSelenium)
library(XML)
library(xlsx)
# iniciar navegador Chrome
rD <- rsDriver()
remDr <- rD[["client"]]
remDr$navigate(paste0("http://www.socioempleo.gob.ec/socioEmpleo-war/paginas/procesos/busquedaOferta.jsf?tipo=PRV"))
Sys.sleep(runif(1, 4, 5))#se espera un tiempo de hasta 5 segundos sin realizar nada hasta que la página se cargue por completo
doc1 <- htmlParse(remDr$getPageSource()[[1]]) #toma a la pagina activa y coge todo el código fuente de la pagina en la que se encuentra
pag <- doc1['//*[@id="anunciosIndex"]/table/tbody/tr/td[6]']#copiar el xpath desde chrome
npag <- try(toString(xmlValue(pag[[1]][[1]])))#el archivo es del tipo nodo, entonces hay que cambiarlo para poder leer el valor
npag <- sub(".*:", "", npag)
npag <- as.numeric(npag)
#npag <- npag - 1
num <- try(doc1['//*[@id="anunciosIndex"]/table/tbody/tr/td[4]'])#inpseccionar numero de registros, copiar xpath
nnum <- try(toString(xmlValue(num[[1]][[1]])))#casi siempre la informacion que se necesita esta en el nodo [[1]][[1]]
nnum <- sub("Total:", "", nnum)
nnum <- sub(" registros,", "", nnum)
nnum <- as.numeric(nnum)
nnum <- nnum
base <-matrix(nrow=nnum,ncol=17)
#creacion de las variables de la matriz
variables <- c("cargo", "tipo", "relacion", "sector", "ciudad", "parroquia", "fechai", "fechaf", "instruccion", "remuneracion", "experiencia",
"area", "conocimiento", "actividades", "jornada", "capacitacion",
"adicional", "vacantes", "empresa")
for (i in 1:19){
assign(paste0(letters[i],letters[i]), 1)
assign(paste0(variables[i], "2"), matrix(, nrow = nnum, ncol = 1))
}
k <- 1
y <- 1
for (y in 1:(npag)){
#for (y in 1:2){
tryCatch({
Sys.sleep(runif(1, 2, 4))# elegir randomicamente un número que sea de 6 a 8 segundo
nclick <- y
if (nclick!=1){
click<-y-1
remDr$findElement(using = "xpath", paste0("//*[@id='formBuscaOferta:pagina']/div[3]/span"))$clickElement()
Sys.sleep(runif(1, 0.1, 0.3))
remDr$findElement(using = "xpath",paste0("//*[@id='formBuscaOferta:pagina_",click,"']"))$clickElement()
}
Sys.sleep(runif(1,2,3))
doc1 <- htmlParse(remDr$getPageSource()[[1]])
##este codigo se hace porque no funciona para todos los casos el codigo 51, pues cuando es importante, empresa es 61
#lista de xpath de los links de las ofertas de trabajo
clase <- c ("//*[@id='formBuscaOferta:listResult:0:j_idt43']/div/a",
"//*[@id='formBuscaOferta:listResult:0:j_idt53']/div/a",
"//*[@id='formBuscaOferta:listResult:1:j_idt43']/div/a",
"//*[@id='formBuscaOferta:listResult:1:j_idt53']/div/a",
"//*[@id='formBuscaOferta:listResult:2:j_idt43']/div/a",
"//*[@id='formBuscaOferta:listResult:2:j_idt53']/div/a",
"//*[@id='formBuscaOferta:listResult:3:j_idt43']/div/a",
"//*[@id='formBuscaOferta:listResult:3:j_idt53']/div/a",
"//*[@id='formBuscaOferta:listResult:4:j_idt43']/div/a",
"//*[@id='formBuscaOferta:listResult:4:j_idt53']/div/a"
)
#lista de xpath de empresa
clase2 <- c ("//*[@id='formBuscaOferta:listResult:0:j_idt43']/legend",
"//*[@id='formBuscaOferta:listResult:0:j_idt53']/legend",
"//*[@id='formBuscaOferta:listResult:1:j_idt43']/legend",
"//*[@id='formBuscaOferta:listResult:1:j_idt53']/legend",
"//*[@id='formBuscaOferta:listResult:2:j_idt43']/legend",
"//*[@id='formBuscaOferta:listResult:2:j_idt53']/legend",
"//*[@id='formBuscaOferta:listResult:3:j_idt43']/legend",
"//*[@id='formBuscaOferta:listResult:3:j_idt53']/legend",
"//*[@id='formBuscaOferta:listResult:4:j_idt43']/legend",
"//*[@id='formBuscaOferta:listResult:4:j_idt53']/legend"
)
for (i in clase2){
empresa <- try(doc1[i])
if (!is.null(empresa)){
empresa2[ss,1] <- try(toString(xmlValue(empresa[[1]][[1]])))
ss <- ss+1
}else {}
}
for (j in clase){
doc1 <- htmlParse(remDr$getPageSource()[[1]])
Sys.sleep(runif(1, 0.3, 0.5))
link <- try(doc1[j])
if (!is.null(link)){
remDr$findElement(using = "xpath", paste0(j))$clickElement()
Sys.sleep(runif (1,0.1,0.2))
page_source <- remDr$getPageSource()
doc <- htmlParse(remDr$getPageSource()[[1]])
Sys.sleep(runif (1,0.1,0.2))
cargo <- try(doc['//*[@id="formBuscaOferta:olGrid"]/tbody/tr[1]/td[2]'])
cargo2[aa,1] <- try(toString(xmlValue(cargo[[1]][[1]])))
base[k,1] <- cargo2[aa,1]
aa <- aa+1
Sys.sleep(runif (1,0.1,0.2))
tipo <- try(doc['//*[@id="formBuscaOferta:j_idt36_label"]'])
tipo2[bb,1] <- try(toString(xmlValue(tipo[[1]][[1]])))
base[k,2] <- tipo2[bb,1]
bb <- bb+1
Sys.sleep(runif (1,0.1,0.2))
relacion <- try(doc['//*[@id="formBuscaOferta:j_idt39_label"]'])
relacion2[cc,1] <- try(toString(xmlValue(relacion[[1]][[1]])))
base[k,3] <- relacion2[cc,1]
cc <- cc+1
Sys.sleep(runif (1,0.1,0.2))
sector <- try(doc['//*[@id="formBuscaOferta:j_idt51_label"]'])
sector2[dd,1] <- try(toString(xmlValue(sector[[1]][[1]])))
base[k,4] <- sector2[dd,1]
dd <- dd+1
Sys.sleep(runif (1,0.1,0.2))
ciudad <- try(doc['//*[@id="formBuscaOferta:j_idt47"]'])
ciudad2[ee,1] <- try(toString(xmlValue(ciudad[[1]][[1]])))
base[k,5] <- ciudad2[ee,1]
ee <- ee+1
Sys.sleep(runif (1,0.1,0.2))
parroquia <- try(doc['//*[@id="formBuscaOferta:j_idt49"]'])
parroquia2[ff,1] <- try(toString(xmlValue(parroquia[[1]][[1]])))
base[k,6] <- parroquia2[ff,1]
ff <- ff+1
Sys.sleep(runif (1,0.1,0.2))
fechai <- try(doc['//*[@id="formBuscaOferta:olGrid"]/tbody/tr[5]/td[2]'])
fechai2[gg,1] <- try(toString(xmlValue(fechai[[1]][[1]])))
base[k,7] <- fechai2[gg,1]
gg <- gg+1
Sys.sleep(runif (1,0.1,0.2))
fechaf <- try(doc['//*[@id="formBuscaOferta:olGrid"]/tbody/tr[5]/td[4]'])
fechaf2[hh,1] <- try(toString(xmlValue(fechaf[[1]][[1]])))
base[k,8] <- fechaf2[hh,1]
hh <- hh+1
Sys.sleep(runif (1,0.1,0.2))
instruccion <- try(doc['//*[@id="formBuscaOferta:j_idt75_label"]'])
instruccion2[ii,1] <- try(toString(xmlValue(instruccion[[1]][[1]])))
base[k,9] <- instruccion2[ii,1]
ii <- ii+1
Sys.sleep(runif (1,0.1,0.2))
remuneracion <- try(doc['//*[@id="formBuscaOferta:j_idt78_label"]'])
remuneracion2[jj,1] <- try(toString(xmlValue(remuneracion[[1]][[1]])))
base[k,10] <- remuneracion2[jj,1]
jj <- jj+1
Sys.sleep(runif (1,0.1,0.2))
experiencia <- try(doc['//*[@id="formBuscaOferta:j_idt84_label"]'])
experiencia2[kk,1] <- try(toString(xmlValue(experiencia[[1]][[1]])))
base[k,11] <- experiencia2[kk,1]
kk <- kk+1
Sys.sleep(runif (1,0.1,0.2))
area <- try(doc['//*[@id="formBuscaOferta:j_idt81_label"]'])
area2[ll,1] <- try(toString(xmlValue(area[[1]][[1]])))
base[k,12] <- area2[ll,1]
ll <- ll+1
Sys.sleep(runif (1,0.1,0.2))
conocimiento <- try(doc['//*[@id="formBuscaOferta:j_idt72"]/tbody/tr[3]/td[2]'])
conocimiento2[mm,1] <- try(toString(xmlValue(conocimiento[[1]][[1]])))
base[k,13] <- conocimiento2[mm,1]
mm <- mm+1
Sys.sleep(runif (1,0.1,0.2))
actividades <- try(doc['//*[@id="formBuscaOferta:j_idt72"]/tbody/tr[3]/td[4]'])
actividades2[nn,1] <- try(toString(xmlValue(actividades[[1]][[1]])))
base[k,14] <- actividades2[nn,1]
nn <- nn+1
Sys.sleep(runif (1,0.1,0.2))
jornada <- try(doc['//*[@id="formBuscaOferta:j_idt94_label"]'])
jornada2[pp,1] <- try(toString(xmlValue(jornada[[1]][[1]])))
base[k,15] <- jornada2[pp,1]
pp <- pp+1
Sys.sleep(runif (1,0.1,0.2))
adicional <- try(doc['//*[@id="formBuscaOferta:j_idt72"]/tbody/tr[5]/td[2]'])
adicional2[qq,1] <- try(toString(xmlValue(adicional[[1]][[1]])))
base[k,16] <- adicional2[qq,1]
qq <- qq+1
Sys.sleep(runif (1,0.1,0.2))
vacantes <- try(doc['//*[@id="formBuscaOferta:j_idt72"]/tbody/tr[5]/td[4]'])
vacantes2[rr,1] <- try(toString(xmlValue(vacantes[[1]][[1]])))
base[k,17] <- vacantes2[rr,1]
rr <- rr+1
Sys.sleep(runif (1,0.1,0.2))
Sys.sleep(runif(1, 1, 2))
remDr$goBack()
Sys.sleep(runif(1, 1, 2))
k <- k +1
}else{}
}
},error = function(e){})
}
save(base, file = "base.rdata")
base <- data.frame(base)
colnames(base) <- toupper(variables[c(1:dim(base)[2])])
for (i in c(1,11:16)){
base[[i]] <- gsub("ñ", "ñ", base[[i]])
base[[i]] <- gsub("óN", "ÓN", base[[i]])
base[[i]] <- gsub("ÓN", "ÓN", base[[i]])
base[[i]] <- gsub("PÓ", "PÓ", base[[i]])
base[[i]] <- gsub("ÑO", "ÑO", base[[i]])
base[[i]] <- gsub("ÑA", "ÑA", base[[i]])
base[[i]] <- gsub("ÑA", "ÑA", base[[i]])
base[[i]] <- gsub("ÑE", "ÑE", base[[i]])
base[[i]] <- gsub("ÑI", "ÑI", base[[i]])
base[[i]] <- gsub("ÑÃÂRIA", "ÑÍA", base[[i]])
base[[i]] <- gsub("/Té", " Té", base[[i]])
base[[i]] <- gsub("É", "É", base[[i]])
base[[i]] <- gsub("PEÚ", "PÉU", base[[i]])
base[[i]] <- gsub("mÃÂ?a", "mía", base[[i]])
base[[i]] <- gsub("iòn", "ión", base[[i]])
base[[i]] <- gsub("IÒN", "IÓN", base[[i]])
base[[i]] <- gsub("Ó", "Ó", base[[i]])
base[[i]] <- gsub("nó", "nó", base[[i]])
base[[i]] <- gsub("ón", "ón", base[[i]])
base[[i]] <- gsub("GÚ", "GÚ", base[[i]])
base[[i]] <- gsub("SÓ", "SÓ", base[[i]])
base[[i]] <- gsub("GÃÂRIA", "GÍA", base[[i]])
base[[i]] <- gsub("AsesorÃÂ?a", "Asesoría", base[[i]])
base[[i]] <- gsub("AsesorÃÂa", "Asesoría", base[[i]])
base[[i]] <- gsub("TÃÂRITULO", "TÍTULO", base[[i]])
base[[i]] <- gsub("CÓ", "CÓ", base[[i]])
base[[i]] <- gsub("ÃÂA", "ÍA", base[[i]])
base[[i]] <- gsub("ÃÂa", "ía", base[[i]])
base[[i]] <- gsub("ÃÂ�a", "ía", base[[i]])
base[[i]] <- gsub("rÃÂ�a", "ía", base[[i]])
base[[i]] <- gsub("rÃÂa", "ría", base[[i]])
base[[i]] <- gsub("RÃÂ", "RÍ", base[[i]])
base[[i]] <- gsub("ÓD", "ÍD", base[[i]])
base[[i]] <- gsub("ÃÂRIC", "ÍC", base[[i]])
base[[i]] <- gsub("LÃÂRI", "LÍ", base[[i]])
base[[i]] <- gsub("TÃÂRI", "TÍ", base[[i]])
base[[i]] <- gsub("ÃÂHOP", "Á", base[[i]])
base[[i]] <- gsub("má", "má", base[[i]])
base[[i]] <- gsub(" ÃÂ", " á", base[[i]])
base[[i]] <- gsub(" À", " Á", base[[i]])
base[[i]] <- gsub("ÃÂRIST", "IST", base[[i]])
base[[i]] <- gsub("•", "-", base[[i]])
base[[i]] <- gsub("Ü", "Ü", base[[i]])
base[[i]] <- gsub("Bá", "Bá", base[[i]])
base[[i]] <- gsub("ÉS", "ÉS", base[[i]])
base[[i]] <- gsub("RÉ", "RÉ", base[[i]])
base[[i]] <- gsub('"°', '-', base[[i]])
base[[i]] <- gsub("PÚ", "PÚ", base[[i]])
base[[i]] <- gsub("ÃÂs", "Ás", base[[i]])
base[[i]] <- gsub("ÃÂS", "ÁS", base[[i]])
base[[i]] <- gsub("IÁ“N", "IÓN", base[[i]])
base[[i]] <- gsub("Áa", "ía", base[[i]])
base[[i]] <- gsub("ÃÂ?", "í", base[[i]])
}
descarga_ex <- function(datos, file){
wb <- createWorkbook(type="xlsx")
# Define some cell styles
# Title and sub title styles
TITLE_STYLE <- CellStyle(wb)+ Font(wb, heightInPoints=16, isBold=TRUE)
SUB_TITLE_STYLE <- CellStyle(wb) + Font(wb, heightInPoints=12,
isItalic=TRUE, isBold=FALSE)
# Styles for the data table row/column names
TABLE_ROWNAMES_STYLE <- CellStyle(wb) + Font(wb, isBold=TRUE)
TABLE_COLNAMES_STYLE <- CellStyle(wb) + Font(wb, isBold=TRUE) +
Alignment(vertical="VERTICAL_CENTER",wrapText=TRUE, horizontal="ALIGN_CENTER") +
Border(color="black", position=c("TOP", "BOTTOM"),
pen=c("BORDER_THICK", "BORDER_THICK"))+Fill(foregroundColor = "lightblue", pattern = "SOLID_FOREGROUND")
sheet <- createSheet(wb, sheetName = "SOCIOEMPLEO")
# Helper function to add titles
xlsx.addTitle<-function(sheet, rowIndex, title, titleStyle){
rows <- createRow(sheet, rowIndex=rowIndex)
sheetTitle <- createCell(rows, colIndex=1)
setCellValue(sheetTitle[[1,1]], title)
setCellStyle(sheetTitle[[1,1]], titleStyle)
}
# Add title and sub title into a worksheet
xlsx.addTitle(sheet, rowIndex=4,
title=paste("Fecha:", format(Sys.Date(), format="%Y/%m/%d")),
titleStyle = SUB_TITLE_STYLE)
xlsx.addTitle(sheet, rowIndex=5,
title="Elaborado por: ",
titleStyle = SUB_TITLE_STYLE)
# Add title
xlsx.addTitle(sheet, rowIndex=7,
paste("SOCIOEMPLEO CORTE -", Sys.Date()),
titleStyle = TITLE_STYLE)
# Add a table into a worksheet
addDataFrame(datos,
sheet, startRow=9, startColumn=1,
colnamesStyle = TABLE_COLNAMES_STYLE,
rownamesStyle = TABLE_ROWNAMES_STYLE,
row.names = FALSE)
# Change column width
setColumnWidth(sheet, colIndex=c(1:ncol(datos)), colWidth=20)
# Save the workbook to a file
saveWorkbook(wb, file)
}
descarga_ex(base, file=paste0("SOCIOEMPLEO-", Sys.Date(), ".xlsx"))
|
81f1ac9973671d0b0d2eba5a40f8a51b4841dc63
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/XML/examples/xmlSubset.Rd.R
|
71aa6b336d25300d6004839d92145fc4da4c06f4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 801
|
r
|
xmlSubset.Rd.R
|
library(XML)
### Name: [.XMLNode
### Title: Convenience accessors for the children of XMLNode objects.
### Aliases: [.XMLNode [[.XMLNode [[.XMLInternalElementNode
### [[.XMLDocumentContent
### Keywords: IO file
### ** Examples
f = system.file("exampleData", "gnumeric.xml", package = "XML")
top = xmlRoot(xmlTreeParse(f))
# Get the first RowInfo element.
top[["Sheets"]][[1]][["Rows"]][["RowInfo"]]
# Get a list containing only the first row element
top[["Sheets"]][[1]][["Rows"]]["RowInfo"]
top[["Sheets"]][[1]][["Rows"]][1]
# Get all of the RowInfo elements by position
top[["Sheets"]][[1]][["Rows"]][1:xmlSize(top[["Sheets"]][[1]][["Rows"]])]
# But more succinctly and accurately, get all of the RowInfo elements
top[["Sheets"]][[1]][["Rows"]]["RowInfo", all = TRUE]
|
18a1fdd97feba9b459703e8bf5138ef88e77040c
|
99cc7be202be967615bbe55606d13731e42e4632
|
/The art of data science_apllied.R
|
fb44eaeecc83465bcf644b0387e36a43c0e54b49
|
[] |
no_license
|
oscarjo89/Coursera_DataScienceSpecialization
|
4e05e765e581e1c37ebdfe57c58362b424611dd5
|
4370c572365abcf1b7857d28e8be259c02c3de6b
|
refs/heads/master
| 2021-01-09T06:11:10.829943
| 2017-02-26T14:31:49
| 2017-02-26T14:31:49
| 80,918,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,093
|
r
|
The art of data science_apllied.R
|
# The art of Data Science - A guide for anyone who works with data
# By Roger D. Peng and Elizabeth Matsui
# In this script I will briefly present the main take aways from the book and apply them
# to a couple of hypothetical data analysis projects based on some of the data sets that come
# with R in order to apply the knowledge from the book and to get practice with using R.
# - Oscar, mon 9th jan 2017
## OBS, check out Google analytics and Bayesian statistics for further study
# Contents (of book)
# 1. Data analysis as art
# 2. Epicircles of analysis
# 3. Stating and refining the question
# 4. Exploratory data analysis
# 5. Using models to explore your data
# 6. Inference: A primer
# 7. Formal modeling
# 8. Inference vs Prediction
# 9. Interpreting your results
# 10. Communication
# EPICIRCLES OF ANALYSIS
# (1) Develop expectations
# (2) Collect data
# (3) Match expectations with data
# [1] Stating the question
# [2] Exploratory data analysis
# [3] Model building
# [4] Interpret
# [5] Communication
################################################################################################
# 1 Set expectations 2 Collect information 3 Revise expectations
################################################################################################
# 1. Question Question of interest Litterature search/ Sharpen question
# to audience experts
# 2. EDA Data are appropriate Make exploratory Refine question or
# for question plots of data collect more data
# 3. Formal model Primary model Fit secondary models, Revise formal model to
# answers question sensitivity analysis include more predictors
# 4. Interpretation Interpretation of Interpret totality of Revise EDA and/or provide
# analysis provides a analysis with focus on specific & interpretable
# specific & meaningful effect sizes and answer
# answer to question uncertainty
# 5. Communication Process and results of Seek feedback Revise analysis or
# analysis are understood approach to presentation
# complete & meaningful
# to audience
################################################################################################
###### 1. Question
# 1.1 Question of interest to audience
# 1.2 Litterature search/experts
# 1.3 Sharpen question
##### 2. EDA
# 2.1 Data are appropriate for question
# 2.2 Make exploratory plots of data
# 2.3 Refine question or collect more data
##### 3. Formal model
# 3.1 Primary model answers question
# 3.2 Fit secondary models, sensitivity analysis
# 3.3 Revise formal model to include more predictors
##### 4. Interpretation
# 4.1 Interpretation of analysis provides a specific & meaningful answer to question
# 4.2 Interpret totality of analysis with focus on effect sizes and uncertainty
# 4.3 Revise EDA and/or provide specific & interpretable answer
##### 5. Communication
# 5.1 Process and results of analysis are understood complete & meaningful to audience
# 5.2 Seek feedback
# 5.3 Revise analysis or approach to presentation
# STATING AND REFINING QUESTION
# - Types of questions
# 1. Descriptive
# 2. Exploratory
# 3. Inferential
# 4. Predictive
# 5. Causal
# 6. Mechanistic
# - Characteristics of a GOOD question
# 1. Of interest
# 2. Not already answered
# 3. Plausable framework
# 4. Answerable
# 5. Specific
# EXPLORATORY DATA ANALYSIS
# (1) Determine if there are any problems with your dataset
# (2) Determine whether the question you are asking can be answered by the data you have
# (3) To develope a sketch of the answere to your question
# Checklist
# 1. Formulate your question
# 2. Read in your data
# 3. Check the packaging
# 4. Look at the top and bottom
# 5. Check your "n"s
# 6. Validate with at least one external data source
# 7. Make a plot
# 8. Try the easy solution first
# 9. Follow up
# - right data?
# - need other data?
# - right question?
# USING MODELS TO EXPLORE YOUR DATA
# Distribution
# Linear relationships
# INFERENCE
# Population
# Sampling
# Model for population
# FORMAL MODELING@
# Primary model
# Secondary models
# 1. outcome
# 2. key predictor
# 3. potential cofounders
# INFERENCE VS PREDICTION
# INTERPRETATION
# 1. Directionaity
# 2. Magnitude
# 3. Uncertainty
# COMMUNICATION
# 1. Audience
# 2. Content
# 3. Style
# 4. Attitude
# The R Datasets package ---------------------------------------------------------------
# Overview of datasets that come with R
# Use list and description to find datasets to preform data analysis on
library(help = "datasets")
# Or go to Packages > datasets
?data
# Potentially interesting datasets:
data("CO2")
CO2
data("ChickWeight")
ChickWeight
data("EuStockMarkets")
EuStockMarkets
data("HairEyeColor")
HairEyeColor
# Project 1 - Chicken weight and diet ---------------------------------------------------------------
rm(list = ls()) # remove all objects in workspace
?ChickWeight
data("ChickWeight")
ChickWeight
# The ChickWeight data frame has 578 rows and 4 columns from an experiment on the
# effect of diet on early growth of chicks.
# I'll use this dataset as the basis for a hypotheical data analysis project
###### 1. Question
# 1.1 Question of interest to audience
# 1.2 Litterature search/experts
# 1.3 Sharpen question
# 1.1 How can we maximize profits? - asks hypothetical chicken farmers.
# 1.2 Lets assume I do some reaserch on chicken farming and discover that increasing chick weight
# is a promising way of increasing profits. And I find the ChickWeight dataset.
# I also find prices for different diets (I'll have to construct hypothetical prices for this project).
chickenDietPrices <- data.frame(diet = c(1,2,3,4), price = c(4, 5, 6, 4.5))
chickenDietPrices
# 1.3 I sharpen the question to - "What diet leads to the fastest weight increase in chickens per dollar?"
# The question is inferential since the experiment is done on a sample of chickens.
# Also it might be causal, because it is an experiment - which studies effect of diet on chicken weight.
# The question is of interest, it is not already answered, it has a plausible framework, is answerable given the data and specific.
##### 2. EDA
# 2.1 Data are appropriate for question
# 2.1 I go to the ChickWeight dataset and chickenDietPrices, and expect I will find the answer here.
?ChickWeight
ChickWeight
chickenDietPrices
summary(ChickWeight$Diet)
# Create one dataset containing all relevant info
ChickWeight1 <- data.frame(ChickWeight, DietPrice = c(rep(4, 220), rep(5, 120), rep(6, 120), rep(4.5, 118)))
ChickWeight1
nrow(ChickWeight1)
ncol(ChickWeight1)
str(ChickWeight1)
summary(ChickWeight1)
head(ChickWeight1)
tail(ChickWeight1)
summary(ChickWeight1$Chick)
# There seems to be 50 chicks. Each measured 12 times, first up to 21st day of their life. Not all chicks have 12 obs, I assume some died earlier.
# In result from summary() function, irst line chick is identifier, second line is number of observations.
?ChickWeight
# Info on Chicken Weight dataset
#Format
#An object of class c("nfnGroupedData", "nfGroupedData", "groupedData", "data.frame") containing the following columns:
#weight
#a numeric vector giving the body weight of the chick (gm).
#Time
#a numeric vector giving the number of days since birth when the measurement was made.
#Chick
#an ordered factor with levels 18 < ... < 48 giving a unique identifier for the chick. The ordering of the levels groups chicks on the same diet together and orders them according to their final weight (lightest to heaviest) within diet.
#Diet
#a factor with levels 1, ..., 4 indicating which experimental diet the chick received.
#Details
#The body weights of the chicks were measured at birth and every second day thereafter until day 20. They were also measured on day 21. There were four groups on chicks on different protein diets.
# Lets assume I have checked with an external source (I have no internet here in Cuba!) and found that weight of chickens are around what it says in the dataset, in grams the first 21 days of their life.
# Link to wikipedia page on chickens.
# 2.2 Make exploratory plots of data
# Make a plot:
?plot
plot(ChickWeight1$Time, ChickWeight1$weight, main = "Chick weight over time", xlab = "Days", ylab = "Grams")
# As expected weight of chicks increase with days. The variance increases with time.
# I would like to see the same plot but differentiating between diet types, preferably with trendline for each diet type. An
# I use filter function in the dplyr package to create subsets of dataset for each diet type. Then plot them each individually.
ChickWeight1_diet1 <- filter(ChickWeight1, Diet == 1)
ChickWeight1_diet2 <- filter(ChickWeight1, Diet == 2)
ChickWeight1_diet3 <- filter(ChickWeight1, Diet == 3)
ChickWeight1_diet4 <- filter(ChickWeight1, Diet == 4)
plot(ChickWeight1_diet1$Time, ChickWeight1_diet1$weight, main = "Chick weight over time", sub = "Diet 1", xlab = "Days", ylab = "Grams")
plot(ChickWeight1_diet2$Time, ChickWeight1_diet2$weight, main = "Chick weight over time", sub = "Diet 2", xlab = "Days", ylab = "Grams")
plot(ChickWeight1_diet3$Time, ChickWeight1_diet3$weight, main = "Chick weight over time", sub = "Diet 3", xlab = "Days", ylab = "Grams")
plot(ChickWeight1_diet4$Time, ChickWeight1_diet4$weight, main = "Chick weight over time", sub = "Diet 4", xlab = "Days", ylab = "Grams")
# Based on just looking at the plots, diet 3 seems most promising.
# Figured out I can use abline and lm to get trendline of all observations, and for each diet! :)
plot(ChickWeight1$Time, ChickWeight1$weight, main = "Chick weight over time", xlab = "Days", ylab = "Grams")
?abline
abline(lm(ChickWeight1$weight ~ ChickWeight1$Time), col = "red")
abline(lm(ChickWeight1_diet1$weight ~ ChickWeight1_diet1$Time), col = "blue")
abline(lm(ChickWeight1_diet2$weight ~ ChickWeight1_diet2$Time), col = "green")
abline(lm(ChickWeight1_diet3$weight ~ ChickWeight1_diet3$Time), col = "orange")
abline(lm(ChickWeight1_diet4$weight ~ ChickWeight1_diet4$Time), col = "yellow")
# The trendlines/least squared fitted lines reveal that that diet 3 is the diet that gives the highest weight increase for chickens in this experiment.
?boxplot
boxplot(ChickWeight1$weight ~ ChickWeight1$Diet)
# Boxplot also shows that diet 3 is the highest mea weight. Note, it also has the most varience.
# I have made some plots and tried the easy solution first. It seems to indicate that diet 3 is the way to go.
# I would like to do morw research on the experiment and the chickens used, and investigate weather or not these can be interprited as representative for the chickens that the farmers I am working for are interested in.
# 2.3 Refine question or collect more data
# EXPLORATORY DATA ANALYSIS
# (1) Determine if there are any problems with your dataset
# (2) Determine whether the question you are asking can be answered by the data you have
# (3) To develope a sketch of the answere to your question
# Checklist
# 1. Formulate your question
# 2. Read in your data
# 3. Check the packaging
# 4. Look at the top and bottom
# 5. Check your "n"s
# 6. Validate with at least one external data source
# 7. Make a plot
# 8. Try the easy solution first
# 9. Follow up
# - right data?
# - need other data?
# - right question?
##### 3. Formal model
# 3.1 Primary model answers question
# 3.2 Fit secondary models, sensitivity analysis
# 3.3 Revise formal model to include more predictors
# First make a simple regression model with weight is dependent and time as independent variable, since it will probably explain most of the weight increase
?lm
model1 = lm(ChickWeight1$weight ~ ChickWeight1$Time)
model1 = lm(weight ~ Time, data = ChickWeight1)
summary(model1)
# Create a model with diet 2-4 as dummy variables.
model2 = lm(weight ~ Time + Diet, data = ChickWeight1)
summary(model2)
weightIncreasePerDollarDiet2 = 16.1661/5
weightIncreasePerDollarDiet3 = 36.4994/6
weightIncreasePerDollarDiet4 = 30.2335/4.5
weightIncreasePerDollarDiet2
weightIncreasePerDollarDiet3
weightIncreasePerDollarDiet4
# Because the primary question was which diet creates the larges increase in chick weight per dollar.
# Diet 3 gave the highest weight increase, but it was also the most expensive.
# When we factor in price, diet 3 is the reccomended diet for our farmers.
##### 4. Interpretation
# 4.1 Interpretation of analysis provides a specific & meaningful answer to question
# 4.2 Interpret totality of analysis with focus on effect sizes and uncertainty
# 4.3 Revise EDA and/or provide specific & interpretable answer
##### 5. Communication
# 5.1 Process and results of analysis are understood complete & meaningful to audience
# 5.2 Seek feedback
# 5.3 Revise analysis or approach to presentation
|
48a490a4d9efb4c4850097716a58d3e7d5589425
|
ee5b8d5819445f2488a048fd6bb79eaf1dc7e1e8
|
/man/trfm_data.Rd
|
85c7bd4b9b62b125b813a1fdd7e8ceccf3272a71
|
[
"MIT"
] |
permissive
|
bioticinteractions/predict2
|
598d84389ae2f81289ea2b3d01a25c2082935ec3
|
3f5d00e783f07d70a3be1e64f1174ae48afbc4c6
|
refs/heads/master
| 2021-08-29T12:09:20.193795
| 2017-12-13T23:17:42
| 2017-12-13T23:17:42
| 106,970,818
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 498
|
rd
|
trfm_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_trfm.r
\docType{data}
\name{trfm_data}
\alias{trfm_data}
\title{Toy data that makes no sense}
\format{An object of class \code{data.frame} with 49 rows and 7 columns.}
\usage{
data(trfm_data)
}
\description{
Data for making transformations arbitrarily created from existing data (contact Dan for additional details)
}
\examples{
library(predict2)
data(trfm_data)
subset(trfm_data, keep == 'y')
}
\keyword{datasets}
|
b8b4b1deb913e11c895cf0510b923fc5229b859c
|
0c61299c0bfab751bfb5b5eac3f58ee2eae2e4b0
|
/Early_attempts/Master.R
|
baefe4bb0a3940809ebaf7748bd9cf7b77581aa0
|
[] |
no_license
|
jwerba14/Species-Traits
|
aa2b383ce0494bc6081dff0be879fc68ed24e9c2
|
242673c2ec6166d4537e8994d00a09477fea3f79
|
refs/heads/master
| 2022-10-13T10:57:54.711688
| 2020-06-12T01:57:21
| 2020-06-12T01:57:21
| 105,941,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 195
|
r
|
Master.R
|
load("Mich_Death_functions.R")
load("dat_simulation.R")
load("clean_sim.R")
load("cleandat.R")
load("Graphing_MM_Algae.R")
load("algaemodeljags.R")
load("Graph_Simulation_DataParameters.R")
|
e318bc544042f38e635ca2eb932b00eaa52b1862
|
67d0524fbfb11cc19f4748cbe1c76af08ff7dd9f
|
/man/sctest.default.Rd
|
728c7e412d5baaf88484b9bbf1f5024507d8b9b8
|
[] |
no_license
|
cran/strucchange
|
c0deea7a6e907c6c81769fe57ee09ad358b583d4
|
97bb4ea4cf76248aeb8abfc36c87b82dc2cee3db
|
refs/heads/master
| 2022-06-26T04:41:50.013743
| 2022-06-15T05:50:02
| 2022-06-15T05:50:02
| 17,700,164
| 7
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,618
|
rd
|
sctest.default.Rd
|
\name{sctest.default}
\alias{sctest.default}
\title{Structural Change Tests in Parametric Models}
\description{
Performs model-based tests for structural change (or parameter instability)
in parametric models.
}
\usage{
\method{sctest}{default}(x, order.by = NULL, functional = maxBB,
vcov = NULL, scores = estfun, decorrelate = TRUE, sandwich = TRUE,
parm = NULL, plot = FALSE, from = 0.1, to = NULL, nobs = NULL,
nrep = 50000, width = 0.15, xlab = NULL, \dots)
}
\arguments{
\item{x}{a model object. The model class can in principle be arbitrary
but needs to provide suitable methods for extracting the \code{scores}
and associated variance-covariance matrix \code{vcov}.}
\item{order.by}{either a vector \code{z} or a formula with a single explanatory
variable like \code{~ z}. The observations in the model
are ordered by the size of \code{z}. If set to \code{NULL} (the
default) the observations are assumed to be ordered (e.g., a
time series).}
\item{functional}{either a character specification of the functional
to be used or an \code{\link{efpFunctional}} object. For a list
of functionals see the details.}
\item{vcov}{a function to extract the covariance matrix
for the coefficients of the fitted model:
\code{vcov(x, order.by = order.by, data = data)}.
Alternatively, the character string \code{"info"}, for details see
below.}
\item{scores}{a function which extracts the scores or estimating
function from the fitted object: \code{scores(x)}, by default
this is \code{\link[sandwich]{estfun}}.}
\item{decorrelate}{logical. Should the process be decorrelated?}
\item{sandwich}{logical. Is the function \code{vcov} the full sandwich
estimator or only the meat?}
\item{parm}{integer or character specifying the component of the estimating
functions which should be used (by default all components are used).}
\item{plot}{logical. Should the result of the test also be visualized?}
\item{from, to}{numeric. In case the \code{functional} is \code{"supLM"}
(or equivalently \code{"maxLM"}), \code{from} and \code{to} can be
passed to the \code{\link{supLM}} functional.}
\item{nobs, nrep}{numeric. In case the \code{functional} is \code{"maxLMo"},
\code{nobs} and \code{nrep} are passed to the \code{\link{catL2BB}} functional.}
\item{width}{numeric. In case the \code{functional} is \code{"MOSUM"},
the bandwidth \code{width} is passed to the \code{\link{maxMOSUM}}
functional.}
\item{xlab, \dots}{graphical parameters passed to the plot method (in case
\code{plot = TRUE}).}
}
\details{
\code{sctest.default} is a convenience interface to \code{\link{gefp}} for
structural change tests (or parameter instability tests) in general
parametric models. It proceeds in the following steps:
\enumerate{
\item The generalized empirical fluctuation process (or score-based CUSUM process)
is computed via \code{scus <- gefp(x, fit = NULL, \dots)} where \code{\dots}
comprises the arguments \code{order.by}, \code{vcov}, \code{scores}, \code{decorrelate},
\code{sandwich}, \code{parm} that are simply passed on to \code{\link{gefp}}.
\item The empirical fluctuation process is visualized (if \code{plot = TRUE}) via
\code{plot(scus, functional = functional, \dots)}.
\item The empirical fluctuation is assessed by the corresponding significance test
via \code{sctest(scus, functional = functional)}.
}
The main motivation for prociding the convenience interface is that these three
steps can be easily carried out in one go along with a two convenience options:
\enumerate{
\item By default, the covariance is computed by an outer-product of gradients
estimator just as in \code{gefp}. This is always available based on the \code{scores}.
Additionally, by setting \code{vcov = "info"}, the corresponding information
matrix can be used. Then the average information is assumed to be provided by
the \code{vcov} method for the model class. (Note that this is only sensible
for models estimated by maximum likelihood.)
\item Instead of providing the \code{functional} by an \code{\link{efpFunctional}}
object, the test labels employed by Merkle and Zeileis (2013) and Merkle, Fan,
and Zeileis (2013) can be used for convenience. Namely, for continuous numeric
orderings, the following functionals are available:
\code{functional = "DM"} or \code{"dmax"} provides the double-maximum test (\code{\link{maxBB}}).
\code{"CvM"} is the Cramer-von Mises functional \code{\link{meanL2BB}}.
\code{"supLM"} or equivalently \code{"maxLM"} is Andrews' supLM test
(\code{\link{supLM}}). \code{"MOSUM"} or \code{"maxMOSUM"} is the MOSUM
functional (\code{\link{maxMOSUM}}), and \code{"range"} is the range
functional \code{\link{rangeBB}}. Furthermore, several functionals suitable
for (ordered) categorical \code{order.by} variables are provided:
\code{"LMuo"} is the unordered LM test (\code{\link{catL2BB}}),
\code{"WDMo"} is the weighted double-maximum test for ordered variables
(\code{\link{ordwmax}}), and \code{"maxLMo"} is the maxLM test for
ordered variables (\code{\link{ordL2BB}}).
}
The theoretical model class is introduced in Zeileis and Hornik (2007) with a
unifying view in Zeileis (2005), especially from an econometric perspective.
Zeileis (2006) introduces the underling computational tools \code{gefp} and
\code{efpFunctional}.
Merkle and Zeileis (2013) discuss the methods in the context of measurement
invariance which is particularly relevant to psychometric models for cross section
data. Merkle, Fan, and Zeileis (2014) extend the results to ordered categorical
variables.
Zeileis, Shah, and Patnaik (2013) provide a unifying discussion in the context
of time series methods, specifically in financial econometrics.
}
\value{
An object of class \code{"htest"} containing:
\item{statistic}{the test statistic,}
\item{p.value}{the corresponding p value,}
\item{method}{a character string with the method used,}
\item{data.name}{a character string with the data name.}
}
\references{
Merkle E.C., Zeileis A. (2013), Tests of Measurement Invariance without Subgroups:
A Generalization of Classical Methods. \emph{Psychometrika}, \bold{78}(1), 59--82.
doi:10.1007/S11336-012-9302-4
Merkle E.C., Fan J., Zeileis A. (2014), Testing for Measurement Invariance with
Respect to an Ordinal Variable. \emph{Psychometrika}, \bold{79}(4), 569--584.
doi:10.1007/S11336-013-9376-7.
Zeileis A. (2005), A Unified Approach to Structural Change Tests Based on
ML Scores, F Statistics, and OLS Residuals. \emph{Econometric Reviews}, \bold{24},
445--466. doi:10.1080/07474930500406053.
Zeileis A. (2006), Implementing a Class of Structural Change Tests: An
Econometric Computing Approach. \emph{Computational Statistics & Data Analysis},
\bold{50}, 2987--3008. doi:10.1016/j.csda.2005.07.001.
Zeileis A., Hornik K. (2007), Generalized M-Fluctuation Tests for Parameter
Instability, \emph{Statistica Neerlandica}, \bold{61}, 488--508.
doi:10.1111/j.1467-9574.2007.00371.x.
Zeileis A., Shah A., Patnaik I. (2010), Testing, Monitoring, and Dating Structural
Changes in Exchange Rate Regimes, \emph{Computational Statistics and Data Analysis},
\bold{54}(6), 1696--1706. doi:10.1016/j.csda.2009.12.005.
}
\seealso{\code{\link{gefp}}, \code{\link{efpFunctional}}}
\examples{
## Zeileis and Hornik (2007), Section 5.3, Figure 6
data("Grossarl")
m <- glm(cbind(illegitimate, legitimate) ~ 1, family = binomial, data = Grossarl,
subset = time(fraction) <= 1800)
sctest(m, order.by = 1700:1800, functional = "CvM")
}
\keyword{htest}
|
fdc767ffbf3f3143e02e2c846e0a588a2f6923bb
|
992e8047ecf70d2daf0e6ce5996b23237f83c05f
|
/PDApp.r
|
fe17af3dc77f88c8920215a0ced8cc88670a0721
|
[] |
no_license
|
DShapero1/Player-Development---WVU-Baseball
|
bc4954d77da0fed4c7263ec095ea07c6a4f705b2
|
be3e4de60c426440740d67bdc8e4a1d74b9de96b
|
refs/heads/main
| 2023-08-26T06:51:50.973315
| 2021-10-22T02:17:38
| 2021-10-22T02:17:38
| 409,386,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,017
|
r
|
PDApp.r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(tidyverse)
library(DT)
library(bslib)
library(janitor)
library(readr)
data <- read.csv("Fall2020.csv")
data$Date <- as.Date(data$Date, "%m/%d/%y")
deg2rad <- function(deg) {(deg * pi) / (180)}
data <- data %>%
mutate(Type = case_when(
TaggedPitchType %in% c("Fastball", "Sinker") ~ "FB",
TaggedPitchType == "ChangeUp" ~ "CH",
TaggedPitchType == "Slider" ~ "SL",
TaggedPitchType == "Curveball" ~ "CB"),
Count = paste(Balls, Strikes, sep = "-"))
squat_jump <- read_csv("squat_jump_master.csv")
squat_jump <- squat_jump %>%
filter(Tags == "Profiling")
squat_jump <- clean_names(squat_jump)
power_output <- squat_jump %>%
select(name, type, peak_propulsive_power, peak_propulsive_force)
mean(power_output$peak_propulsive_power) # 4782.293 is the mean of the dataset
mean(power_output$peak_propulsive_force) # 2105.867 is the mean of the dataset
sd(power_output$peak_propulsive_power) # 837.1419 is the sd of the dataset
sd(power_output$peak_propulsive_force) # 305.2852 is the sd of the dataset
power_output <- power_output %>%
mutate(power_z_score = (peak_propulsive_power - 4805.314) / 930.0187,
force_z_score = (peak_propulsive_force - 2034.903) / 331.229) %>%
filter(name != "Weston Mazey")
power_output_max <- power_output %>%
group_by(name) %>%
slice(which.max(peak_propulsive_power))
sj_power <- squat_jump %>%
filter(name != "Weston Mazey") %>%
group_by(name) %>%
slice(which.max(peak_propulsive_power)) %>%
select(name, type, peak_propulsive_power) %>%
mutate(peak_propulsive_power = round(peak_propulsive_power)) %>%
arrange(desc(peak_propulsive_power))
sj_force <- squat_jump %>%
filter(name != "Weston Mazey") %>%
group_by(name) %>%
slice(which.max(peak_propulsive_force)) %>%
select(name, type, peak_propulsive_force) %>%
arrange(desc(peak_propulsive_force))
# Read in sim games and color code velocity
sim_games <- read_csv("fall_sim_games.csv")
sim_games %>%
separate(Pitcher, c("last", "first")) -> sim_games2
sim_games2$name <- paste(sim_games2$first, sim_games2$last, sep = " ")
merge_df <- sim_games2 %>%
group_by(name) %>%
summarize('Velo' = mean(RelSpeed, na.rm = T))
power_viz <- power_output_max %>%
left_join(merge_df, by = "name")
# Define UI for application that draws a histogram
ui <- navbarPage(title = "WVU Player Development",
tabPanel(title = "Home",
imageOutput("wvu_img"),
imageOutput("home_img", height = "320px"),
br(),
hr(),
h4(strong("App Description")),
p(style="text-align: justify; font-size = 25px",
"This application is to centralize player development information for our program",
em("demonstrating various use of shiny features."),
"Everyone will recieve an account to access the app in the future",
tags$blockquote("WVU PD App is still under continuous development.
Please look forward to future updates!")),
hr()),
tabPanel(title = "Pitching",
fluidPage(
sidebarLayout(
sidebarPanel(
fluidRow(
column(6, selectInput(inputId = "Pitcher", label = "Select Pitcher", choices = sort(unique(data$Pitcher)))),
),
fluidRow(
column(5, selectInput(inputId = "Date", label = "Select Game", choices = ""))
)
# fluidRow(
# column(4, selectInput(inputId = "Count", label = "Select Count", choices = sort(unique(data$Count))))
# )
),
mainPanel(
wellPanel(style = "background: white; border-color:black; border-width:2px",
fluidRow(
column(2, img(src = "wv_pic.png", height = 100, width = 100), align = "center"),
column(4, h2(strong(textOutput("selected_pitcher"))), hr(style="border-color: black;"), style = "padding-right:0px;"),
column(6, h2("Pitching Development"), hr(style="border-color: black;"), h2(textOutput("selected_game")), align = "center", style = "padding-left:0px;"))),
tabsetPanel(
type = "tabs",
tabPanel("Post-Game Report",
wellPanel(style = "background: white; border-color:black; border-width:3px",
fluidRow(
column(width = 10.5, h3(strong("Pitcher Summary Table")), dataTableOutput("pitcher_summary_table"), align = "center")
), br(), br(),
fluidRow(column(4, plotOutput("pitch_loc"), align = "center"),
column(4, plotOutput("pitch_sm"), align = "center"),
column(4, plotOutput("pitch_hh"), align = "center")
), br(), br(), br(), br())),
tabPanel("Game Charts",
wellPanel(style = "background: white; border-color:black; border-width:2px",
fluidRow(
column(6, plotOutput("pitch_velo"), align = "center"),
column(6, plotOutput("move_plot"), align = "center")),
fluidRow(
column(6, plotOutput("release_point"), align = "center")
), br(), br())))
)
)
)),
# tabPanel(title = "Hitting",
# fluidPage(
# sidebarLayout(
# sidebarPanel(
# fluidRow(
# column(6, selectInput(inputId = "Batter", label = "Select Batter", choices = sort(unique(data$Batter))))
# ),
# fluidRow(
# column(5, selectInput(inputId = "Date", label = "Select Game", choices = ""))
# )),
# mainPanel(
# wellPanel(style = "background: white; border-color:black; border-width:2px",
# fluidRow(
# column(2, img(src = "wv_pic.png", height = 100, width = 100), align = "center),
# column(6, h2("Hitting Development"), hr(style="border-color: black;"), h2(textOutput("selected_game")), align = "center", style = "padding-left:0px;"))),
# ))))))),
tabPanel(title = "Sport Sciences",
tabsetPanel(
type = "tabs",
tabPanel("Squat Jump",
fluidRow(plotOutput("force_plot"), align = "center"),
fluidRow(
column(5, h3(strong("Power Leaderboards")), dataTableOutput("power_table"), align = "center"),
column(7, h3(strong("Force Leaderboards")), dataTableOutput("force_table"), align = "center"))
))),
inverse = T
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
output$wvu_img <- renderImage({
list(src = "www/wvu_pic.png",
width = "100%",
height = 330)
}, deleteFile = F)
observeEvent(input$Pitcher,
updateSelectInput(session, inputId = "Date", label = "Select Game",
choices = sort(unique(data$Date[data$Pitcher == input$Pitcher]))))
output$selected_pitcher <- renderText({paste(input$Pitcher)})
output$selected_game <- renderText({paste(input$Date)})
output$selected_batter <- renderText({paste(input$Batter)})
# output$count <- renderText({paste(input$Count)})
#
# observeEvent(input$Count,
# updateSelectInput(session, inputId = "Count", label = "Select Count",
# choices = sort(unique(data$Count[data$Pitcher == input$Pitcher]))))
output$pitcher_summary_table <- renderDataTable({
table <- data %>%
mutate(region = ifelse((PlateLocSide >= -0.558 & PlateLocSide <= 0.558 & PlateLocHeight >= 1.83 & PlateLocHeight <= 3.167), "Heart",
ifelse((PlateLocSide >= -1.11 & PlateLocSide <= 1.11 & PlateLocHeight >= 1.167 & PlateLocHeight <= 3.833) , "Shadow" ,
ifelse((PlateLocSide >= -1.67 & PlateLocSide <= 1.67 & PlateLocHeight >= 0.1667 & PlateLocHeight <= 4.5) , "Chase" , "Waste")))) %>%
mutate('Zone' = ifelse(PlateLocSide >= -0.83 & PlateLocSide <= 0.83 & PlateLocHeight >= 1.5 & PlateLocHeight <= 3.5,1,0),
'ChaseP' = ifelse(region == "Chase",1,0),
'ChaseS' = ifelse(PitchCall == "StrikeSwinging" & region == "Chase", 1,0),
Type = case_when(
TaggedPitchType %in% c("Fastball", "Sinker") ~ "FB",
TaggedPitchType == "ChangeUp" ~ "CH",
TaggedPitchType == "Slider" ~ "SL",
TaggedPitchType == "Curveball" ~ "CB"
)) %>%
filter(Pitcher == input$Pitcher, Date == input$Date, !TaggedPitchType %in% c("Undefined", "Other")) %>%
group_by(Pitch = TaggedPitchType) %>%
summarise('Pitches Thrown' = length(TaggedPitchType),
'Velo.' = paste(round(quantile(RelSpeed, na.rm =T, c(0.25))), round(quantile(RelSpeed, na.rm =T, c(0.75))), sep = "-"),
'Max Velo.' = round(max(RelSpeed, na.rm = T)),
'Avg Spin Rate.' = round(mean(SpinRate, na.rm = T)),
'HB' = round(mean(HorzBreak, na.rm = T),digits = 1),
'VB' = round(mean(InducedVertBreak, na.rm = T),digits = 1),
'VAA' = round(mean(VertApprAngle, na.rm = T),digits = 1),
'Chase%' = 100 * round(sum(ChaseS)/sum(ChaseP),digits = 2),
'Whiff%' = 100 * round(sum(PitchCall %in% c("StrikeSwinging") / sum(PitchCall %in% c("StrikeSwinging", "InPlay", "FoulBall"))),3),
'Strike%' = 100 * round(sum(PitchCall %in% c("StrikeSwinging", "StrikeCalled", "FoulBall", "InPlay"))/ n(),3)) %>%
replace_na(list(`Whiff%`=0,`Chase%`=0)) %>%
arrange(desc(Pitch == "Fastball"))
tableFilter <- reactive({table})
datatable(tableFilter(), options = list(dom = 't', columnDefs = list(list(targets = 0, visible = TRUE))), rownames = FALSE) %>%
formatStyle(c(1,2), `border-left` = "solid 1px") %>% formatStyle(c(2,5,7), `border-right` = "solid 1px")
})
output$pitch_velo <- renderPlot({
pitch_filter <- reactive({
data %>%
filter(Pitcher == input$Pitcher, Date == input$Date, TaggedPitchType != "NA", Type != "NA") %>%
mutate(pitch_count = row_number())
})
ggplot(pitch_filter(), aes(x = pitch_count, y = RelSpeed, color = Type)) +
geom_line(size = 1) +
theme_bw() +
labs(x = "Pitch Count", y = "Velocity (MPH.)", color = "Pitch Type", title = "Pitch Velocity") +
theme(legend.position = "bottom",
legend.title = element_blank(),
plot.title = element_text(size = 16, face = "bold", hjust = 0.5))
}, width = 350, height = 350)
output$pitch_loc <- renderPlot({
pitch_loc_filter <- reactive({
data %>%
filter(Pitcher == input$Pitcher, Date == input$Date, Type != "NA")
})
ggplot(pitch_loc_filter(),aes(x = PlateLocSide*-1, y = PlateLocHeight, color = TaggedPitchType)) +
geom_point(na.rm = T, size = 3) +
geom_rect(xmin = -0.83,
xmax = 0.83,
ymin = 1.5,
ymax = 3.5, color = "black", fill = "transparent") + # Strikezone
geom_rect(xmin = -1.10833333,
xmax = 1.10833333,
ymin = 1.16666667,
ymax = 3.83333333, color = "black", linetype = "dashed", fill = "transparent") +
geom_segment(aes(x = 0.275, y = 1.5, xend = 0.275, yend = 3.5), color = "black") +
geom_segment(aes(x = -0.275, y = 1.5, xend = -0.275, yend = 3.5), color = "black") +
geom_segment(aes(x = -0.83, y = 2.83, xend = 0.83, yend = 2.83), color = "black") +
geom_segment(aes(x = -0.83, y = 2.16, xend = 0.83, yend = 2.16), color = "black") +
geom_segment(aes(x = -0.708, y = 0.15, xend = 0.708, yend = 0.15), size = 0.5, color = "black") +
geom_segment(aes(x = -0.708, y = 0.3, xend = -0.708, yend = 0.15), size = 0.5, color = "black") +
geom_segment(aes(x = -0.708, y = 0.3, xend = 0, yend = 0.5), size = 0.5, color = "black") +
geom_segment(aes(x = 0, y = 0.5, xend = 0.708, yend = 0.3), size = 0.5, color = "black") +
geom_segment(aes(x = 0.708, y = 0.3, xend = 0.708, yend = 0.15), size = 0.5, color = "black") +
coord_equal() +
scale_x_continuous(limits = c(-2,2)) +
scale_y_continuous(limits = c(0,5)) +
theme_bw() +
labs(title = "Pitch Location", color = "Type", y = "", x = "") +
theme(legend.position = "bottom",
plot.title = element_text(size = 16, face = "bold", hjust = 0.5))
},width = 250, height = 350)
output$pitch_sm <- renderPlot({
sm_filter <- reactive({
data %>%
filter(Pitcher == input$Pitcher, Date == input$Date, Type != "NA", PitchCall == "StrikeSwinging")
})
ggplot(sm_filter(), aes(x = PlateLocSide*-1, y = PlateLocHeight, color = TaggedPitchType)) +
geom_point(na.rm = T, size = 3) +
geom_rect(xmin = -0.83,
xmax = 0.83,
ymin = 1.5,
ymax = 3.5, color = "black", fill = "transparent") + # Strikezone
geom_rect(xmin = -1.10833333,
xmax = 1.10833333,
ymin = 1.16666667,
ymax = 3.83333333, color = "black", linetype = "dashed", fill = "transparent") + #Shadow Zone +
geom_segment(aes(x = 0.275, y = 1.5, xend = 0.275, yend = 3.5), color = "black") +
geom_segment(aes(x = -0.275, y = 1.5, xend = -0.275, yend = 3.5), color = "black") +
geom_segment(aes(x = -0.83, y = 2.83, xend = 0.83, yend = 2.83), color = "black") +
geom_segment(aes(x = -0.83, y = 2.16, xend = 0.83, yend = 2.16), color = "black") +
geom_segment(aes(x = -0.708, y = 0.15, xend = 0.708, yend = 0.15), size = 0.5, color = "black") +
geom_segment(aes(x = -0.708, y = 0.3, xend = -0.708, yend = 0.15), size = 0.5, color = "black") +
geom_segment(aes(x = -0.708, y = 0.3, xend = 0, yend = 0.5), size = 0.5, color = "black") +
geom_segment(aes(x = 0, y = 0.5, xend = 0.708, yend = 0.3), size = 0.5, color = "black") +
geom_segment(aes(x = 0.708, y = 0.3, xend = 0.708, yend = 0.15), size = 0.5, color = "black") +
coord_equal() +
scale_x_continuous(limits = c(-2,2)) +
scale_y_continuous(limits = c(0,5)) +
theme_bw() +
labs(title = "Swing and Miss", y = "", x = "", color = "Type") +
theme(legend.position = "bottom",
plot.title = element_text(size = 16, face = "bold", hjust = 0.5))
},width = 250, height = 350)
output$pitch_hh <- renderPlot({
hh_filter <- reactive({
data %>%
filter(Pitcher == input$Pitcher, Date == input$Date, Type != "NA", ExitSpeed >= 95)
})
ggplot(hh_filter(), aes(x = PlateLocSide*-1, y = PlateLocHeight, color = TaggedPitchType)) +
geom_point(na.rm = T, size = 3) +
geom_rect(xmin = -0.83,
xmax = 0.83,
ymin = 1.5,
ymax = 3.5, color = "black", fill = "transparent") + # Strikezone
geom_rect(xmin = -1.10833333,
xmax = 1.10833333,
ymin = 1.16666667,
ymax = 3.83333333, color = "black", linetype = "dashed", fill = "transparent") +
geom_segment(aes(x = 0.275, y = 1.5, xend = 0.275, yend = 3.5), color = "black") +
geom_segment(aes(x = -0.275, y = 1.5, xend = -0.275, yend = 3.5), color = "black") +
geom_segment(aes(x = -0.83, y = 2.83, xend = 0.83, yend = 2.83), color = "black") +
geom_segment(aes(x = -0.83, y = 2.16, xend = 0.83, yend = 2.16), color = "black") +
geom_segment(aes(x = -0.708, y = 0.15, xend = 0.708, yend = 0.15), size = 0.5, color = "black") +
geom_segment(aes(x = -0.708, y = 0.3, xend = -0.708, yend = 0.15), size = 0.5, color = "black") +
geom_segment(aes(x = -0.708, y = 0.3, xend = 0, yend = 0.5), size = 0.5, color = "black") +
geom_segment(aes(x = 0, y = 0.5, xend = 0.708, yend = 0.3), size = 0.5, color = "black") +
geom_segment(aes(x = 0.708, y = 0.3, xend = 0.708, yend = 0.15), size = 0.5, color = "black") +
coord_equal() +
scale_x_continuous(limits = c(-2,2)) +
scale_y_continuous(limits = c(0,5)) +
theme_bw() +
labs(title = "Hard Hit", y = "", x = "", color = "Type") +
theme(legend.position = "bottom",
plot.title = element_text(size = 16, face = "bold", hjust = 0.5))
}, width = 250, height = 350)
output$move_plot <- renderPlot({
move_filter <- reactive({
data %>%
filter(Pitcher == input$Pitcher, Date == input$Date, TaggedPitchType != "NA", Type != "NA") %>%
mutate(pitch_count = row_number())
})
ggplot(move_filter(), aes(x = HorzBreak, y = InducedVertBreak, color = Type)) +
geom_point() +
theme_bw() +
scale_x_continuous(limits = c(-30,30), breaks = seq(-30,30, by = 10)) +
scale_y_continuous(limits = c(-30,30), breaks = seq(-30,30, by = 10)) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
labs(x = "", y = "", title = "Pitch Movement") +
theme(legend.position = "bottom",
legend.title = element_blank(),
plot.title = element_text(size = 16, face = "bold", hjust = 0.5))
},width = 350, height = 350)
output$release_point <- renderPlot({
release_filter <- reactive({
data %>%
filter(Pitcher == input$Pitcher, Date == input$Date, Type != "NA")
})
ggplot(release_filter(), aes(x = RelSide*-1, y = RelHeight, color = Type)) +
geom_point() +
scale_x_continuous(limits = c(-3,3), breaks = seq(-4,4,by=1)) +
scale_y_continuous(limits = c(0,8), breaks = seq(0,8,by=2)) +
geom_vline(xintercept = 0) +
geom_rect(xmin = -0.83,
xmax = 0.83,
ymin = 1.5,
ymax = 3.5, color = "black", fill = "transparent") +
theme_bw() +
labs(x = "", y = "", color = "Pitch Type", title = "Release Point") +
theme(legend.position = "bottom",
legend.title = element_blank(),
plot.title = element_text(size = 16, face = "bold", hjust = 0.5))
},width = 350, height = 350)
output$power_table <- renderDataTable(sj_power, options = list(pagelength=5))
output$force_table <- renderDataTable(sj_force, options = list(pagelength=5))
output$force_plot <- renderPlot(
ggplot(power_viz, aes(power_z_score, force_z_score, color = Velo)) +
geom_point() +
scale_x_continuous("Z-Score Squat Jump Peak Power", limits = c(-4,4), breaks = seq(-4,4, by = 1)) +
scale_y_continuous("Z-Score Peak Force", limits = c(-4,4), breaks = seq(-4,4,by = 1)) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
annotate(x=-3, y = 3, geom = "text", label = "High Force Low Power", color = "red") +
annotate(geom = "text", label = "High Force High Power", color = "red", x = 2, y = 3) +
annotate(geom = "text", label = "Low Force Low Power", x = -3, y = -1, color = "red") +
annotate(geom = "text", label = "Low Force High Power", color = "red", x = 3, y = -2) +
geom_text(aes(label= ifelse(power_z_score > quantile(power_z_score, 0.90),
as.character(name),'')),hjust=0,vjust=0) +
geom_text(aes(label= ifelse(power_z_score < quantile(power_z_score, 0.10),
as.character(name),'')),hjust=0,vjust=0) +
labs(title = "Net Peak Force vs Squat Jump Peak Power") +
theme_bw()
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
8f250764d33cae4904b378687acc5aac2588ac02
|
060bf24ff4534c7a63e4a229b0eaeef1ffaeb88b
|
/Server/uploadpackage.R
|
1b17f6f34e1b47d5ffe2d080a559228f0f819cfa
|
[
"MIT"
] |
permissive
|
andersonjaraujo/risk_assessment
|
71f68b3d7ad55178fb933d6079110660198c8997
|
32f2865f97166db2a36734564eb3ebd62bf88453
|
refs/heads/master
| 2023-05-12T01:41:51.599990
| 2021-05-28T19:32:54
| 2021-05-28T19:32:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,990
|
r
|
uploadpackage.R
|
#####################################################################################################################
# uploadpackage.R - upload pacakge Source file for server Module.
# Author: K Aravind Reddy
# Date: July 13th, 2020
# License: MIT License
#####################################################################################################################
# Implement the intro logic. Sidebar steps are listed in global.r
# this dataset is also static... perhaps it should be sourced from global.r?
upload_pkg_initial_steps <- reactive(
data.frame(
# Note that we access chooseCSVtext with '.' instead of '#', because we track its class and not its id.
element = c("#help", ".chooseCSVtext", ".sample_dataset_link"),
intro = c(
"Click here anytime you need help.",
"Upload a CSV file with the package(s) you would like to assess.",
"You can use this sample dataset to explore the app."
),
position = c("right", rep("top", 2))
)
)
upload_pkg_steps <- reactive(
if(values$upload_complete == "upload_complete"){
data.frame(
element = c("#upload_summary_text", "#upload_summary"),
intro = c(
"Text description of packages uploaded. Counts by type: 'Total', 'New', 'Undiscovered', 'Duplicate'.",
"Confirm uploaded packages list, filter by type"
),
position = c("bottom", "top")
)
} else {
data.frame(element = character(0) , intro = character(0), position = character(0))
}
)
# Start introjs when help button is pressed.
observeEvent(input$help,
introjs(session,
options = list(
steps =
upload_pkg_initial_steps() %>%
union(upload_pkg_steps()) %>%
union(sidebar_steps),
"nextLabel" = "Next",
"prevLabel" = "Previous",
"skipLabel" = "Close"
)
)
)
# Sample csv file content.
data <- reactive({
data.table(read_csv(file.path("Data", "upload_format.csv")))
})
# Load the columns from DB into reactive values.
observeEvent(list(input$total_new_undis_dup,input$uploaded_file), {
req(values$upload_complete == "upload_complete")
# After upload complete, update db dash screen with new package(s)
values$db_pkg_overview <- update_db_dash()
if (input$total_new_undis_dup == "All") {
values$Total_New_Undis_Dup <- values$Total
} else if (input$total_new_undis_dup == "New") {
values$Total_New_Undis_Dup <- values$New
} else if (input$total_new_undis_dup == "Undiscovered") {
values$Total_New_Undis_Dup <- values$Undis
} else if (input$total_new_undis_dup == "Duplicates") {
values$Total_New_Undis_Dup <- values$Dup
}
}, ignoreInit = TRUE) # End of the observe.
# 2. Observe to disable the input widgets while the packages uploading into DB.
observeEvent(input$uploaded_file, {
# req(input$uploaded_file)
values$uploaded_file_status <- file_upload_error_handling(input$uploaded_file)
if (values$uploaded_file_status != "no_error") {
shinyjs::hide("upload_summary_text")
shinyjs::hide("upload_summary_select")
shinyjs::hide("total_new_undis_dup_table")
reset("uploaded_file")
return()
} else{
shinyjs::show("upload_summary_text")
shinyjs::show("upload_summary_select")
shinyjs::show("total_new_undis_dup_table")
}
file_to_read <- input$uploaded_file
pkgs_file <-
read.csv(file_to_read$datapath,
sep = ",",
stringsAsFactors = FALSE)
names(pkgs_file) <- tolower(names(pkgs_file))
pkgs_file$package <- trimws(pkgs_file$package)
pkgs_file$version <- trimws(pkgs_file$version)
values$Total <- pkgs_file
pkgs_db1 <- db_fun("SELECT name FROM package")
values$Dup <- filter(values$Total, values$Total$package %in% pkgs_db1$name)
values$New <- filter(values$Total, !(values$Total$package %in% pkgs_db1$name))
withProgress(message = "Uploading Packages to DB:", value = 0, {
if (nrow(values$New) != 0) {
for (i in 1:nrow(values$New)) {
incProgress(1 / (nrow(values$New) + 1), detail = values$New[i, 1])
new_package<-values$New$package[i]
get_packages_info_from_web(new_package)
metric_mm_tm_Info_upload_to_DB(new_package)
metric_cum_Info_upload_to_DB(new_package)
}
}
})
pkgs_db2 <- db_fun("SELECT name FROM package")
values$Undis <-
filter(values$New,!(values$New$package %in% pkgs_db2$name))
values$packsDB <- db_fun("SELECT name FROM package")
updateSelectizeInput(
session,
"select_pack",
choices = c("Select", values$packsDB$name),
selected = "Select"
)
showNotification(id = "show_notification_id", "Upload completed", type = "message")
values$upload_complete <- "upload_complete"
# Show the download reports buttons after all the packages have been loaded
# and the information extracted.
loggit("INFO", paste("Summary of the uploaded file:",input$uploaded_file$name,
"Total Packages:", nrow(values$Total),
"New Packages:", nrow(values$New),
"Undiscovered Packages:", nrow(values$Undis),
"Duplicate Packages:", nrow(values$Dup)), echo = FALSE)
}, ignoreInit = TRUE) # End of the Observe.
# 1. Render Output to download the sample format dataset.
output$upload_format_download <- downloadHandler(
filename = function() {
paste("Upload_file_structure", ".csv", sep = "")
},
content = function(file) {
write.csv(read_csv(file.path("Data", "upload_format.csv")), file, row.names = F)
}
)
# 2. Render Output to show the summary of the uploaded csv into application.
output$upload_summary_text <- renderText({
if (values$upload_complete == "upload_complete") {
paste(
"<br><br><hr>",
"<h3><b>Summary of uploaded package(s) </b></h3>",
"<h4>Total Packages: ", nrow(values$Total), "</h4>",
"<h4>New Packages:", nrow(values$New), "</h4>",
"<h4>Undiscovered Packages:", nrow(values$Undis), "</h4>",
"<h4>Duplicate Packages:", nrow(values$Dup), "</h4>",
"<h4><b>Note: The assessment will be performed on the latest version of each package, irrespective of the uploaded version."
)
}
}) # End of the render Output.
# 3. Render Output to show the select input to select the choices to display the table.
output$upload_summary_select <- renderUI({
if (values$upload_complete == "upload_complete") {
removeUI(selector = "#Upload")
selectInput(
"total_new_undis_dup",
"",
choices = c("All", "New", "Undiscovered", "Duplicates")
)
}
}) # End of the render Output.
# 4. Render Output to show the data table of uploaded csv.
output$total_new_undis_dup_table <- DT::renderDataTable({
if (values$upload_complete == "upload_complete") {
datatable(
values$Total_New_Undis_Dup,
escape = FALSE,
class = "cell-border",
selection = 'none',
extensions = 'Buttons',
options = list(
searching = FALSE,
sScrollX = "100%",
lengthChange = FALSE,
aLengthMenu = list(c(5, 10, 20, 100,-1), list('5', '10', '20', '100', 'All')),
iDisplayLength = 5
)
)
}
}) # End of the render Output
# End of the Render Output's'.
# View sample dataset
observeEvent(input$upload_format, {
dataTableOutput("sampletable")
showModal(modalDialog(
output$sampletable <- DT::renderDataTable(
datatable(
data(),
escape = FALSE,
class = "cell-border",
editable = FALSE,
filter = "none",
selection = 'none',
extensions = 'Buttons',
options = list(
sScrollX = "100%",
aLengthMenu = list(c(5, 10, 20, 100, -1), list('5', '10', '20', '100', 'All')),
iDisplayLength = 5,
dom = 't'
)
)
),
downloadButton("upload_format_download", "Download", class = "btn-secondary")
))
}) # End of the observe event for sample button.
# End of the upload package Source file for server Module.
|
a6666db178eba40afb7115d29f00dc9a494de633
|
ed17b93b41ebc74b11ef01963dd3374968aa51ec
|
/man/Get.Def.Par.QLearningPersExpPath.Legacy.ThesisOpt.RNN.Rd
|
a868a3a55fa526d0a2dad37636c9e785d366dc05
|
[] |
no_license
|
MartinKies/RLR
|
64fe378ebad8e5a83628efe7bde7f8a628375cb6
|
3b8b1bfd4b4766b1e612de85a96c8d24e95d33e6
|
refs/heads/master
| 2022-03-24T09:32:00.219749
| 2019-12-17T16:23:26
| 2019-12-17T16:23:26
| 108,415,469
| 1
| 1
| null | 2018-03-19T13:48:34
| 2017-10-26T13:30:58
|
R
|
UTF-8
|
R
| false
| true
| 432
|
rd
|
Get.Def.Par.QLearningPersExpPath.Legacy.ThesisOpt.RNN.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DefaultFunctions.R
\name{Get.Def.Par.QLearningPersExpPath.Legacy.ThesisOpt.RNN}
\alias{Get.Def.Par.QLearningPersExpPath.Legacy.ThesisOpt.RNN}
\title{Default Parameters QLearningPersExpPath Recurrent NN v.0.1.6}
\usage{
Get.Def.Par.QLearningPersExpPath.Legacy.ThesisOpt.RNN()
}
\description{
Default Parameters QLearningPersExpPath Recurrent NN v.0.1.6
}
|
efa9b69c736a39ed8698eba88c4022a883520798
|
7defd18b9d3a85fd52a2353acf536ddca5113146
|
/day-06-r/main.r
|
f5192e3692f3617fc48e21555d11b5f7ad07d844
|
[] |
no_license
|
DenSinH/AOC2020
|
257f1e083016adeee30ed782d59d99c075e8b0b1
|
35a7a31efe0dd3b1611b0d73a7fec17d07bccb1f
|
refs/heads/master
| 2023-02-04T19:41:49.519901
| 2020-12-19T12:48:21
| 2020-12-19T12:48:21
| 303,654,786
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,095
|
r
|
main.r
|
con = file("input.txt", "r")
any_yes <- list()
any_count <- 0
all_yes <- NULL
all_count <- 0
chars <- function(string) {
return(strsplit(string,'')[[1]])
}
get_unique <- function(lst) {
return(lst[!duplicated(lst)])
}
repeat {
answers = readLines(con, n = 1, warn=FALSE)
if (nchar(answers) == 0 || length(answers) == 0) {
# new group / EOF
# add old group values to the count
any_count <- any_count + length(get_unique(any_yes))
any_yes <- list()
all_count <- all_count + length(all_yes)
all_yes <- NULL
if (length(answers) == 0) {
# end of file reached
break
}
}
else {
# union
any_yes <- append(any_yes, chars(answers))
if (is.null(all_yes)) {
# new group: don't intersect yet or we'll get the empty list
all_yes <- chars(answers)
# remove potential duplicates (not sure if possible)
all_yes <- get_unique(all_yes)
}
else {
# intersection
all_yes <- intersect(all_yes, chars(answers))
}
}
}
cat('PART 1', any_count, '\n')
cat('PART 2', all_count, '\n')
close(con)
|
7b0d2fcc60931e7074f88248a229162827e84a3d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/trueskill/examples/trueskill-package.Rd.R
|
79f80597a534e37ef741ed873f8354d7f2a69204
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,282
|
r
|
trueskill-package.Rd.R
|
library(trueskill)
### Name: trueskill-package
### Title: Implementation of the TrueSkill algorithm
### Aliases: trueskill-package trueskill
### Keywords: package trueskill
### ** Examples
# Example 1.
# set default values for BETA, EPSILON and GAMMA where BETA is sigma / 2
# EPSILON is DrawProbability(0.1)
# GAMMA is sigma / 100
parameters <- Parameters$new()
Alice <- Player(rank = 1, skill = Gaussian(mu = 25, sigma = 25 / 3), name = "1")
Bob <- Player(rank = 2, skill = Gaussian(mu = 25, sigma = 25 / 3), name = "2")
Chris <- Player(rank = 2, skill = Gaussian(mu = 25, sigma = 25 / 3), name = "3")
Darren <- Player(rank = 4, skill = Gaussian(mu = 25, sigma = 25 / 3), name = "4")
players <- list(Alice, Bob, Chris, Darren)
players <- AdjustPlayers(players, parameters)
PrintList(players)
print(Alice$skill)
# Relying on positional arguments looks much cleaner:
Alice <- Player(1, Gaussian(25, 8.3), "Alice")
Bob <- Player(2, Gaussian(25, 8.3), "Bob")
Chris <- Player(2, Gaussian(25, 8.3), "Chris")
Darren <- Player(4, Gaussian(25, 8.3), "Darren")
# Example 2 - see https://gist.github.com/bhoung/5596282
# the example applies trueskill to tennis tournament data
# (runtime is approx 50 secs)
|
33ff7374afb3f00af61ef47589d1c0d305cb6b32
|
c34d464b69e2d5946c9dd97337b16a0192e42093
|
/8_visualizacion_datos/enriquezHerradorRafael.R
|
d6aad548b74d789144c9f7c0d67566b71d39c5e5
|
[] |
no_license
|
renqherr/utad-peds
|
9a8b633946ec562be5a48c5e8784d9dcd65b7b6e
|
7e020c37fa7f59edc7ed8410c2ba2374726af201
|
refs/heads/master
| 2021-01-10T05:06:04.178828
| 2016-03-21T21:06:32
| 2016-03-21T21:06:32
| 54,330,048
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,628
|
r
|
enriquezHerradorRafael.R
|
#############################################################################
## Rafael Enriquez-Herrador
## U-TAD - PEDS 2ED - 2015
## 8 - Visualizacion de Datos
## Practica
#############################################################################
library(ggplot2)
library(data.table)
dt.SFCSS <- read.table(file = "datasets/SFCC/train.csv",
header = T,
sep = ",",
stringsAsFactors = F,
numerals = c("no.loss"))
options(digits = 16)
dt.SFCSS$X <- as.numeric(dt.SFCSS$X)
dt.SFCSS$Y <- as.numeric(dt.SFCSS$Y)
dt.SFCSS$Dates <- as.Date(dt.SFCSS$Dates)
dt.SFCSS$Category <- as.factor(dt.SFCSS$Category)
dt.SFCSS$DayOfWeek <- as.factor(dt.SFCSS$DayOfWeek)
dt.SFCSS$PdDistrict <- as.factor(dt.SFCSS$PdDistrict)
dt.SFCSS$Resolution <- as.factor(dt.SFCSS$Resolution)
dt.SFCSS <- as.data.table(dt.SFCSS)
dt.SFCSS$Years <- year(dt.SFCSS$Dates)
levels(dt.SFCSS$Category)
ggplot(dt.SFCSS, aes(x = Category, fill = Years)) +
geom_bar(binwidth = 0.75, show_guide = F) +
coord_flip() + facet_wrap(~Years)
thefts <- subset(dt.SFCSS, dt.SFCSS$Category == c("LARCENY/THEFT",
"ASSAULT",
"BURGLARY",
"ROBBERY",
"VEHICLE THEFT"))
thefts$Category <- as.factor(as.character(thefts$Category))
ggplot(thefts, aes(x = PdDistrict, colour = Years)) +
geom_freqpoly(aes(group = Years), show_guide = F) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
arrests <- subset(thefts, Resolution == "ARREST, BOOKED" | Resolution == "ARREST, CITED")
arrests$Resolution = as.factor(as.character(arrests$Resolution))
#install.packages("ggmap")
library(ggmap)
## Obtener mapa
sfmap <- get_map(location = "san francisco",
zoom = 12,
maptype = "watercolor",
source = "osm",
color = "bw")
## Iniciar mapa
SFMap <- ggmap(sfmap, extent = "device", legend = "topleft")
## Localización de arrestos 2010 ~ 2015
SFMap <- SFMap +
stat_bin2d(
data = arrests,
aes(x = X, y = Y, colour = Category, fill = Category),
size = .5,
bins = 100,
alpha = .4) +
theme_bw()
## Concentracion de robos por Año 2010 ~ 2015
SFMap <- SFMap +
stat_density2d(
data = thefts,
aes(x = X, y = Y, fill = Category, alpha = ..level..),
size = 1,
bins = 10,
geom = "polygon",
na.rm = T) +
facet_grid(Category~Years) +
theme_bw()
## Pintar mapa
SFMap
|
7fbf7b29c6395ef32af6f1e48e184b6ee8a15be5
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/jvnVaR/R/jNoComb.R
|
58c7d042631be3bfac612664901c1d453217b987
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
jNoComb.R
|
jNoComb <-
function(n,k,alpha){
# Number of combination of k elements from a n-set
C <- 1
# n!/k!(n-k)! * alpha^k * (1-alpha)^(n-k)
for (i in 1:k){
#C <- C * (n-i+1)/i
C <- C * (n-i+1)/i*alpha*(1-alpha)
}
C <- C * (1-alpha)^(n-k-k)
return(C)
}
|
b63e9ea3752531ef8cd0be3b30741affbc001a13
|
c6c0083fb9b334b9cfaee4378d144ea64910200f
|
/Prepocessing/Train-kmer.R
|
f10f3e42767246c0d7dcdf5f2150eb8d8280427e
|
[] |
no_license
|
vicely07/KmerResearch
|
4c026c9f330420785a9cff1ac2b4cbac2c548988
|
255fa0bc64ae5918bc1fe5cd7b47965da07ca1b6
|
refs/heads/master
| 2020-04-02T05:01:41.791563
| 2019-02-15T18:46:06
| 2019-02-15T18:46:06
| 154,048,808
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,346
|
r
|
Train-kmer.R
|
source("https://bioconductor.org/biocLite.R")
biocLite("Biostrings")
# Run these the first time if you need to install the biostrings library
library("Biostrings")
# Here we load the files that will run through the Kmer counting package.
dna1 = readDNAStringSet("C:/Users/lykha/Documents/bioinformatic research/dataset/dm3.kc167.tads.boundary.test.fa")
# Pretty simple application for counting the Kmer Frequency. All of the heavy lifting
# is done in the biostrings library.
kmercounts = oligonucleotideFrequency(dna1, width = 4, step = 1, with.labels = TRUE)
rownames(kmercounts) <- labels(dna1)
#Put "1" as train data in a new column
kmercounts = data.frame(kmercounts) #convert matrix to data frame
dim(kmercounts)
kmercounts = cbind(kmercounts, rep(1, 1000))
names(kmercounts)[length(kmercounts)] <- "Class" #rename the last column of the data frame
head(kmercounts)
# Finally, we output the matrix of Kmer counts to a text file, for later manipulation
write.table(kmercounts, file = "C:/Users/lykha/Documents/bioinformatic research/4mer/boundary.test.txt", row.names = TRUE, col.names = TRUE, sep = ',')
dna1 = readDNAStringSet("C:/Users/lykha/Documents/bioinformatic research/dataset/dm3.kc167.tads.inside.test.fa")
# Pretty simple application for counting the Kmer Frequency. All of the heavy lifting
# is done in the biostrings library.
kmercounts2 = oligonucleotideFrequency(dna1, width = 4, step = 1, with.labels = TRUE)
rownames(kmercounts2) <- labels(dna1)
#Put "1" as train data in a new column
kmercounts2 = data.frame(kmercounts2) #convert matrix to data frame
kmercounts2 = cbind(kmercounts2, rep(0, 1000))
names(kmercounts2)[length(kmercounts2)] <- "Class" #rename the last column of the data frame
head(kmercounts2)
# Finally, we output the matrix of Kmer counts to a text file, for later manipulation
write.table(kmercounts, file = "C:/Users/lykha/Documents/bioinformatic research/4mer/inside.test.txt", row.names = TRUE, col.names = TRUE, sep = ',')
dim(kmercounts)
dim(kmercounts2)
train.table = rbind(kmercounts,kmercounts2)
write.table(train.table, file = "C:/Users/lykha/Documents/bioinformatic research/4mer/4mertable.test.txt", row.names = TRUE, col.names = TRUE, sep = ',')
head(train.table)
|
536bd883bb1f2cb1ce2a05519b80ffbfb7e8fbc0
|
eef54759eaf73ad38a790944ad44b3ac1a0a9b1e
|
/3_downstream_analysis/antibiotics/antibiotics.R
|
e89dcba7375211cc49a38a193e932323285c1f2b
|
[] |
no_license
|
Seny-l/MOFA_microbiome
|
99b7aebc2ceee8f51fbbbe2816e92287dec082c7
|
5d2ce7179a4ec6475a5e769a044f4211aae19c3a
|
refs/heads/master
| 2023-03-13T20:31:56.502493
| 2021-02-25T14:32:18
| 2021-02-25T14:32:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,142
|
r
|
antibiotics.R
|
library(ggpubr)
################
## Load model ##
################
source("/Users/ricard/mofa/MOFA_microbiome/3_downstream_analysis/load_model.R")
#####################
## Define settings ##
#####################
io$outdir <- paste0(io$basedir,"/results/antibiotics")
antibiotics <- c("Penicillins", "Cephalosporins", "Carbapenems",
"Clavulanic_acid", "Macrolides", "Aminoglycosides",
"Quinolones", "Co_trimoxazole", "Metronidazole", "Vancomycin")
############################################################
## Plot correlation between antibiotics and factor values ##
############################################################
pdf(sprintf("%s/revision3_Factors_vs_antibiotics_pearson_reverse_colour.pdf",io$outdir), width=7, height=6, useDingbats = F)
correlate_factors_with_covariates(mofa,
covariates = antibiotics,
plot = "r",
col = colorRampPalette(c("blue","white","red"))(200)
)
dev.off()
pdf(sprintf("%s/Factors_vs_antibiotics_logpval.pdf",io$outdir), width=7, height=6)
correlate_factors_with_covariates(mofa,
covariates = antibiotics,
plot = "log_pval",
cluster_rows = F, cluster_cols = F
)
dev.off()
|
504433a04ef2090c4e9f332aecaa7fe450c0ca56
|
f54f98a77c9c91ce82a012f89b85eabc2e84a9f1
|
/R/BM_reconciliation/with_SQL_refresh/app.R
|
202bba176c9114b5a5e0b2f6bd06b62c5fd26b87
|
[] |
no_license
|
rdprad/R-projects
|
4a058f15d89316fc84ff918a93da118145c36e1a
|
53510b572ee4f6d611f32513badf67350a0a03a5
|
refs/heads/master
| 2020-03-24T10:11:52.926935
| 2018-09-15T00:12:16
| 2018-09-15T00:12:16
| 142,649,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,719
|
r
|
app.R
|
library(ggplot2)
library(dplyr)
library(shiny)
library(shinycssloaders)
library(knitr)
library(shinythemes)
library(here)
library(ggiraph)
library(shinyWidgets)
library(plotly)
BM_df <- readRDS(here::here("objects", "BM_df.rds"))
ui <- fluidPage( theme = shinytheme("lumen"),
titlePanel("Block Model Spatial Reconciliation"),
sidebarLayout(
sidebarPanel(
actionButton("refreshSQL", "Press to refresh SQL con, and wait until bar below shows 100%"),
progressBar(id = "pb2", value = 0, total = 100, title = "", display_pct = TRUE),
selectInput(inputId = "phaseselect", label = strong("Phase"),
choices = levels(factor(unique(BM_df$PhaseID))),
selected = "Phase 4A"),
sliderInput("elevationselect", label = h3("Elevation"), min(BM_df$elevation),
max(BM_df$elevation), value = 960, step=15),
radioButtons("gradeitemselect", label = h3("Grade Item Selection"),
choices = list("Copper" = "CU", "Gold" = "AU"),
selected = NULL),
tableOutput("table"),
width=4
),
mainPanel(
fluidPage(
fluidRow(
column(width = 12, class = "well",
h4("Side by Side Block Model Comparison"),
fluidRow(
column(width = 6, h5("Resource Model"),
ggiraphOutput("Resource_plot", height = 450) %>% withSpinner(type = 8)),
column(width = 6, h5("Ore Control Model"),
ggiraphOutput("OC_plot", height = 450) %>% withSpinner(type = 8)),
column(width = 6, h5("BM diff. (OC - RSC)"),
ggiraphOutput("diff_plot", height = 450) %>% withSpinner(type = 8))
)),
column(width = 12, class = "well",
fluidRow(
column(width = 6, h5("Grade Tonnage Curve"),
plotlyOutput("GTcurve_plot", height = 450) %>% withSpinner(type = 8)),
column(width = 6, h5("Grade Tonnage Curve"),
plotlyOutput("GTcurve_metal", height = 450) %>% withSpinner(type = 8))
)))
)
)
)
)
server <- function(input, output, session) {
observeEvent(input$refreshSQL,{
source(here::here("refreshSQL.R"))
for (i in 1:100) {
updateProgressBar(
session = session,
id = "pb2",
value = i, total = 100,
title = paste("Process", trunc(i/10))
)
Sys.sleep(0.1)
}
BM_df <- readRDS(here::here("objects", "BM_df.rds"))
})
# Subset data
selected_blocks <- reactive({
req(input$phaseselect, input$elevationselect, input$gradeitemselect)
BM_df %>%
filter(PhaseID == input$phaseselect,
elevation == input$elevationselect) %>%
mutate(grade_sel = input$gradeitemselect,
diff = ifelse(grade_sel == "CU",BCU-CU,
ifelse(grade_sel=="AU",BAU-AU,
ifelse(grade_sel=="AG",BAG-AG,
ifelse(grade_sel=="AS",BAS-AS,NA)))),
RSCgrade = ifelse(grade_sel=="CU",CU,
ifelse(grade_sel=="AU",AU,
ifelse(grade_sel=="AG",AG,
ifelse(grade_sel=="AS",AS,NA)))),
OCgrade = ifelse(grade_sel=="CU",BCU,
ifelse(grade_sel=="AU",BAU,
ifelse(grade_sel=="AG",BAG,
ifelse(grade_sel=="AS",BAS,NA)))),
RSCbin = ifelse(RSCgrade>=1.0,1.0,
ifelse(RSCgrade>=0.9,0.9,
ifelse(RSCgrade>=0.8,0.8,
ifelse(RSCgrade>=0.7,0.7,
ifelse(RSCgrade>=0.6,0.6,
ifelse(RSCgrade>=0.5,0.5,
ifelse(RSCgrade>=0.4,0.4,
ifelse(RSCgrade>=0.3,0.3,
ifelse(RSCgrade>=0.2,0.2,
ifelse(RSCgrade>=0.1,0.1,0)))))))))),
OCbin = ifelse(OCgrade>=1.0,1.0,
ifelse(OCgrade>=0.9,0.9,
ifelse(OCgrade>=0.8,0.8,
ifelse(OCgrade>=0.7,0.7,
ifelse(OCgrade>=0.6,0.6,
ifelse(OCgrade>=0.5,0.5,
ifelse(OCgrade>=0.4,0.4,
ifelse(OCgrade>=0.3,0.3,
ifelse(OCgrade>=0.2,0.2,
ifelse(OCgrade>=0.1,0.1,0)))))))))),
RSClegendbin = ifelse(RSCgrade>=1.0,"1.0 - max",
ifelse(RSCgrade>=0.8,"0.8 - 1.0",
ifelse(RSCgrade>=0.6,"0.6 - 0.8",
ifelse(RSCgrade>=0.4,"0.4 - 0.6",
ifelse(RSCgrade>=0.3,"0.3 - 0.4",
ifelse(RSCgrade>=0.2,"0.2 - 0.3","0.0 - 0.2")))))),
OClegendbin = ifelse(OCgrade>=1.0,"1.0 - max",
ifelse(OCgrade>=0.8,"0.8 - 1.0",
ifelse(OCgrade>=0.6,"0.6 - 0.8",
ifelse(OCgrade>=0.4,"0.4 - 0.6",
ifelse(OCgrade>=0.3,"0.3 - 0.4",
ifelse(OCgrade>=0.2,"0.2 - 0.3","0.0 - 0.2")))))),
ttip1 = paste0("grade = ",round(RSCgrade,2)),
ttip2 = paste0("grade = ",round(OCgrade,2)),
ttip3 = paste0("grade diff = ",round(diff,2))
)
})
#plot
output$Resource_plot <- renderggiraph({
a <-ggplot(selected_blocks(),aes(x=xcentre, y=ycentre))+
geom_tile_interactive(aes(fill= RSClegendbin, tooltip = ttip1))+
scale_fill_manual(values = c("grey", "green", "yellow", "orange", "red", "darkred","purple"))+
coord_equal(ratio=1)+
theme_bw()+
labs(fill = "LEGEND")
ggiraph(code = {print(a)}, tooltip_opacity = 0.5 , selection_type = "single")
})
output$OC_plot <- renderggiraph({
b <- ggplot(selected_blocks(),aes(x=xcentre, y=ycentre))+
geom_tile_interactive(aes(fill= OClegendbin, tooltip = ttip2))+
scale_fill_manual(values = c("grey", "green", "yellow", "orange", "red", "darkred","purple"))+
coord_equal(ratio=1)+
theme_bw()+
labs(fill = "LEGEND")
ggiraph(code = {print(b)}, tooltip_opacity = 0.5 , selection_type = "single")
})
output$diff_plot <- renderggiraph({
c <- ggplot(selected_blocks(),aes(x=xcentre, y=ycentre))+
geom_tile_interactive(aes(fill= diff, tooltip = ttip3))+
scale_fill_gradientn(colours = c("darkred", "red", "grey", "green", "darkgreen"),
values = scales::rescale(c(-1.5, -0.5, -0.2, 0, 0.2, 0.5, 1.5)),
guide = "colorbar", limits=c(-1.5,1.5))+
coord_equal(ratio=1)+
theme_bw()+
labs(fill = "DIFFERENCE")
ggiraph(code = {print(c)}, tooltip_opacity = 0.5 , selection_type = "single")
})
GT_table <- reactive({
req(input$phaseselect, input$elevationselect, input$gradeitemselect)
merge(
selected_blocks() %>%
group_by(elevation,RSCbin) %>%
mutate(massRSC = 10*10*15*BSG,
gradeitemRSC = ifelse(input$gradeitemselect=="CU",CU,AU)) %>%
summarise(RSC_tonnes = sum(massRSC),
RSC_metal = sum(gradeitemRSC*massRSC)) %>%
arrange(desc(RSCbin)) %>%
mutate(RSC_cumtonnes = cumsum(RSC_tonnes),
RSC_ave_grade = cumsum(RSC_metal)/RSC_cumtonnes,
RSC_select = ifelse(input$gradeitemselect=="CU","CU","AU"),
RSC_metal = ifelse(RSC_select=="CU", round(RSC_cumtonnes*RSC_ave_grade/100000,1),
round(RSC_cumtonnes*RSC_ave_grade/31.10348/1000,1))) %>%
arrange(RSCbin) %>%
rename(grade_bin=RSCbin),
selected_blocks() %>%
group_by(elevation,OCbin) %>%
mutate(massOC = 10*10*15*BSG,
gradeitemOC = ifelse(input$gradeitemselect=="CU",BCU,BAU)) %>%
summarise(OC_tonnes = sum(massOC),
OC_metal = sum(gradeitemOC*massOC)) %>%
arrange(desc(OCbin)) %>%
mutate(OC_cumtonnes = cumsum(OC_tonnes),
OC_ave_grade = cumsum(OC_metal)/OC_cumtonnes,
OC_select = ifelse(input$gradeitemselect=="CU","CU","AU"),
OC_metal = ifelse(OC_select=="CU", round(OC_cumtonnes*OC_ave_grade/100000,1),
round(OC_cumtonnes*OC_ave_grade/31.10348/1000,1))) %>%
arrange(OCbin) %>%
rename(grade_bin=OCbin),
by=c("elevation","grade_bin"))
})
#---
# output$GTcurve_plot <- renderPlot({
# ggplot(data= GT_table(), aes(x=grade_bin))+
# geom_line(aes(y=OC_cumtonnes, col="OC tonnes (solid)"), linetype = "solid")+
# geom_line(aes(y=RSC_cumtonnes, col="Resource tonnes (dash)"), linetype = "dashed")+
# geom_line(aes(y=OC_ave_grade*max(GT_table()$RSC_cumtonnes), col="OC grades (solid)"), linetype = "solid")+
# geom_line(aes(y=RSC_ave_grade*max(GT_table()$RSC_cumtonnes), col="Resource grades (dash)"), linetype = "dashed")+
# scale_color_manual(values=c("red","blue","red","blue"))+
# scale_x_continuous(breaks = seq(min(GT_table()$grade_bin), max(GT_table()$grade_bin), by=0.1))+
# scale_y_continuous(labels = scales::comma, breaks = seq(0,max(GT_table()$RSC_cumtonnes),by=500000),
# sec.axis = sec_axis(~./(max(GT_table()$RSC_cumtonnes)), name = "Average Grade",
# breaks = seq(0,round(max(GT_table()$RSC_ave_grade),1),by=0.1)))+
# labs(x="cut-off head grade",
# y="Tonnage",
# col="LEGENDS")+
# theme_bw()+
# theme(legend.position = "bottom")
# })
#---
#---
output$GTcurve_plot <- renderPlotly({
plot_ly() %>%
add_lines(x = GT_table()$grade_bin, y = GT_table()$OC_cumtonnes, name = "OC tonnes", line = list(color = 'rgb(22, 96, 167)')) %>%
add_lines(x = GT_table()$grade_bin, y = GT_table()$RSC_cumtonnes, name = "Resource tonnes", line = list(color = 'rgb(22, 96, 167)', dash = 'dot')) %>%
add_lines(x = GT_table()$grade_bin, y = GT_table()$OC_ave_grade, name = "OC grades", yaxis = "y2", line = list(color = 'rgb(205, 12, 24)')) %>%
add_lines(x = GT_table()$grade_bin, y = GT_table()$RSC_ave_grade, name = "Resource grades", yaxis = "y2", line = list(color = 'rgb(205, 12, 24)', dash = 'dot')) %>%
layout(legend = list(orientation = 'h')) %>%
config(displayModeBar = F) %>%
layout(xaxis=list(fixedrange=TRUE,
autotick = FALSE,
ticks = "outside",
tick0 = 0,
dtick = 0.1,
showspikes = TRUE)) %>%
layout(yaxis=list(fixedrange=TRUE,
showspikes = TRUE,
tickformat = ",.0f")) %>%
layout(yaxis2=list(fixedrange=TRUE,
autotick = FALSE,
tick0 = 0,
dtick = 0.1,
tickformat = ".2f",
overlaying = "y",
side = "right",
showspikes = TRUE))
})
#---
#---
output$GTcurve_metal <- renderPlotly({
plot_ly() %>%
add_lines(x = GT_table()$grade_bin, y = GT_table()$OC_metal, name = "OC metal", line = list(color = 'rgb(22, 96, 167)')) %>%
add_lines(x = GT_table()$grade_bin, y = GT_table()$RSC_metal, name = "Resource metal", line = list(color = 'rgb(205, 12, 24)', dash = 'dot')) %>%
layout(legend = list(orientation = 'h')) %>%
config(displayModeBar = F) %>%
layout(xaxis=list(fixedrange=TRUE,
autotick = FALSE,
ticks = "outside",
tick0 = 0,
dtick = 0.1,
showspikes = TRUE)) %>%
layout(yaxis=list(fixedrange=TRUE,
showspikes = TRUE,
tickformat = ",.2f"))
})
#---
output$table <- function(){
GT_table() %>%
mutate(RSC_tonnes = round(RSC_cumtonnes,0),
OC_tonnes = round(OC_cumtonnes,0)) %>%
select(grade_bin, RSC_tonnes, RSC_ave_grade, OC_tonnes, OC_ave_grade, RSC_metal, OC_metal) %>%
kable("html",format.args = list(decimal.mark = ".", big.mark = ","), digits = 3)
}
}
shinyApp(ui, server)
|
114ba59ed5d5fbd3f3cbb61ef4e656e96945e423
|
8210e7cd4bc1ec62bdfcbb0ab6388b660e9d167a
|
/Pre-Lab-3.R
|
88c13f32a0981cdc8bdee1206cd2857e0bae92fe
|
[] |
no_license
|
dr-noo/UTAustinX-UT.7.10x-1T2016
|
0b1ecef87a2f7bbb9e935d2f3b6695ca5b556484
|
d24c87be3106907343a71c7c93b8be6254a5a87b
|
refs/heads/master
| 2016-08-11T10:16:05.392258
| 2016-03-01T09:24:36
| 2016-03-01T09:24:36
| 51,247,050
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,867
|
r
|
Pre-Lab-3.R
|
library(SDSFoundations)
bull <- BullRiders
#How many of the first 10 riders in the dataset have been pro for 10 years or more?
bull_first_10 <- bull[1:10,]
bull_first_10_pro_10_more <-bull_first_10[bull_first_10$YearsPro>=10,]
bull_first_10_pro_10_more
#Subset for riders that participated in at least one event in 2013
new_bull <- bull[bull$Events13 > 0 ,]
# Visualize and describe the first variable of interest
hist(new_bull$Rides13)
fivenum(new_bull$Rides13)
mean(new_bull$Rides13)
sd(new_bull$Rides13)
# Visualize and describe the second variable of interest
hist(new_bull$Top10_13)
fivenum(new_bull$Top10_13)
mean(new_bull$Top10_13)
sd(new_bull$Top10_13)
# Create a scatterplot
plot(new_bull$Rides13,new_bull$Top10_13)
# Add line of best fit
abline(lm(new_bull$Top10_13~new_bull$Rides13))
# Calculate the correlation coefficient
cor(new_bull$Rides13,new_bull$Top10_13)
# Create a correlation matrix
vars <- c("Top10_13", "Rides13")
cor(new_bull[,vars])
plot(bull$Events12, bull$BuckOuts12)
abline(lm(bull$Events12~bull$BuckOuts12))
abline(lm(bull$BuckOuts12~bull$Events12))
# -------------
library(SDSFoundations)
bull <- BullRiders
#Subset for riders that participated in at least one event in 2013
new_bull <- bull[bull$Events13 > 0 ,]
# Visualize and describe the first variable of interest
hist(new_bull$Rides13)
fivenum(new_bull$Rides13)
mean(new_bull$Rides13)
sd(new_bull$Rides13)
# Visualize and describe the second variable of interest
hist(new_bull$Top10_13)
fivenum(new_bull$Top10_13)
mean(new_bull$Top10_13)
sd(new_bull$Top10_13)
# Create a scatterplot
plot(new_bull$Rides13,new_bull$Top10_13)
# Add line of best fit
abline(lm(new_bull$Top10_13~new_bull$Rides13))
# Calculate the correlation coefficient
cor(new_bull$Rides13,new_bull$Top10_13)
# Create a correlation matrix
vars <- c("Top10_13", "Rides13")
cor(new_bull[,vars])
|
d8bac311b375a5446c91fbe0ff46270606c170e2
|
84e410eb828ca190804d256ba93b446e51739f30
|
/R/check_password.R
|
138ae5a041c877f73b26abea1d9853c9aac01d0a
|
[] |
no_license
|
jumpingrivers/zxcvbnR
|
f5afca825231a8aefb3c8675f7a1d4788755b309
|
59fa21b5ef3155af93b12a7ebeb8848905c4d760
|
refs/heads/master
| 2021-01-18T15:10:22.873263
| 2017-08-15T13:28:38
| 2017-08-15T13:28:38
| 100,377,726
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,383
|
r
|
check_password.R
|
#' Check password
#'
#' zxcvbn is a password strength estimator inspired by password crackers.
#' Through pattern matching and conservative estimation, it recognizes and weighs 30k common passwords,
#' common names and surnames according to US census data, popular English words from
#' Wikipedia and US television and movies, and other common patterns like dates,
#' repeats (aaa), sequences (abcd), keyboard patterns (qwertyuiop), and l33t speak.
#'
#' The main result is returned as an invisible list.
#'
#' This package provides an R interface to the underlying Javascript code. See
#' the [GitHub](https://github.com/dropbox/zxcvbn) homepage for further details about the JS library.
#' @details
#' The function returns a list with the following components
#' * guesses: estimated guesses needed to crack password
#' * guesses_log10: order of magnitude of guesses
#' * crack_times_seconds : dictionary of back-of-the-envelope crack time estimations, in seconds, based on a few scenarios:
#' - online_throttling_100_per_hour: online attack on a service that ratelimits password auth attempts
#' - online_no_throttling_10_per_second: online attack on a service that doesn't ratelimit, or
#' where an attacker has outsmarted ratelimiting
#' - offline_slow_hashing_1e4_per_second: offline attack.
#' Assumes multiple attackers, proper user-unique salting, and a slow hash function
#' w/ moderate work factor, such as bcrypt, scrypt, PBKDF2
#' - offline_fast_hashing_1e10_per_second: offline attack with user-unique salting but a fast hash function like SHA-1, SHA-256 or MD5.
#' A wide range of reasonable numbers anywhere from one billion - one trillion
#' guesses per second, depending on number of cores and machines. ballparking at 10B/sec.
#'
#' * crack_times_display: same keys as crack_times_seconds, with friendlier display string values:
#' "less than a second", "3 hours", "centuries", etc.
#' * score: Integer from 0-4 (useful for implementing a strength bar)
#' - 0 too guessable: risky password. (guesses < 10^3)
#' - 1 very guessable: protection from throttled online attacks. (guesses < 10^6)
#' - 2 somewhat guessable: protection from unthrottled online attacks. (guesses < 10^8)
#' - 3 safely unguessable: moderate protection from offline slow-hash scenario. (guesses < 10^10)
#' - 4 very unguessable: strong protection from offline slow-hash scenario. (guesses >= 10^10)
#' * feedback: verbal feedback to help choose better passwords. set when score <= 2.
#' - feedback.warning: explains what's wrong, eg. 'this is a top-10 common password'.
#' Sometimes an empty string
#' - feedback.suggestions a possibly-empty list of suggestions to help choose a less
#' guessable password. eg. 'Add another word or two'
#' * sequence: the list of patterns that zxcvbn based the guess calculation on.
#' * calc_time: how long it took zxcvbn to calculate an answer, in milliseconds.
#'
#' @param password Character string to assess
#' @seealso https://github.com/dropbox/zxcvbn and
#' this [blog post](https://blogs.dropbox.com/tech/2012/04/zxcvbn-realistic-password-strength-estimation/)
#' @export
#' @examples
#' res = check_password("ABC")
#' res$feedback
#'
check_password = function(password) {
res = ct$call("zxcvbn", password)
message(nice_message(res$score))
invisible(res)
}
|
a4e6a9227b778de22a61219d0486318ff3b6f3f8
|
27bfd7b5e4bd0db3208473603f9103d190d74b7c
|
/man/convertRDI.Rd
|
46f0804be466de8283bddbf363450f47d948902b
|
[] |
no_license
|
cran/rdi
|
4d5cf7f6ed71ed0e3254a973301d35ca2fc721cc
|
42bf39573e7b127eb5fae8777d634ee6327bbe6d
|
refs/heads/master
| 2020-03-16T01:51:26.203579
| 2018-05-07T10:14:21
| 2018-05-07T10:14:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,070
|
rd
|
convertRDI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rdi_functions.R
\name{convertRDI}
\alias{convertRDI}
\title{Convert RDI measures}
\usage{
convertRDI(d, models = NULL, calcSD = FALSE)
}
\arguments{
\item{d}{Distance matrix (as produced by \link{calcRDI}), or a vector of
distances.}
\item{models}{Set of RDI models, as produced by \link{rdiModel}. If \code{NULL},
RDI models will be calculated based on the attributes in the distance
matrix.}
\item{calcSD}{logical; if \code{TRUE}, standard deviations for each estimate will be returned.}
}
\value{
A list containing either one or two features:
\tabular{rl}{
\emph{pred} \tab The converted predictions; same length as \code{d}. \cr
\emph{sd} \tab If \code{calcSD==T}, a set of standard deviation estimates for each
prediction.
}
}
\description{
Method to convert RDI values to fold/percent change
}
\details{
The convertRDI function works by first generating a model for the RDI values at a given
repertoire size and feature count using the \link{rdiModel} function (see that
method's help file for more details). The RDI models predict the average
log-fold/percent change across a range of RDI values, and allows us to convert RDI to
a more stable and interpretable metric.
In addition to the average log-fold or percent change value, \link{rdiModel}
also generates models for the standard deviation at each RDI value. This is useful for
understanding the confidence intervals around the fold change estimate.
}
\examples{
#create genes
genes = sample(letters, 10000, replace=TRUE)
#create sequence annotations
seqAnnot = data.frame(donor = sample(1:4, 10000, replace=TRUE))
#calculate RDI
d = rdi(genes, seqAnnot)
##convert RDI to actual 'lfc' estimates and compare
dtrue = convertRDI(d)$pred
plot(d, dtrue)
##look at SD ranges around lfc estimates
dtrue = convertRDI(d, calcSD=TRUE)
##plot using ggplot2
library(ggplot2)
x = as.numeric(d)
y = as.numeric(dtrue$pred)
sd = as.numeric(dtrue$sd)
qplot(x,y)+geom_errorbar(aes(x=x, ymin=y-sd, ymax=y+sd))
}
|
19be386b5c4cf439d612209473c1056889fcd6ee
|
641a7c4b9773bf11428b23e25d6ad52d67721527
|
/inst/seedlings.r
|
cab7246eb4bc27ae47359628b3c26dfd223fb9d4
|
[
"MIT"
] |
permissive
|
mtalluto/tree_seedling_metamodel
|
cffadc1ae534c7a29cf28f7fdac7927785e0880d
|
05f6710b55e4059737ba9703e2882d4aab2101b9
|
refs/heads/master
| 2021-01-23T11:32:28.007604
| 2019-04-05T12:24:40
| 2019-04-05T12:24:40
| 93,144,591
| 0
| 1
|
MIT
| 2019-04-05T09:37:31
| 2017-06-02T08:27:56
|
R
|
UTF-8
|
R
| false
| false
| 2,688
|
r
|
seedlings.r
|
## Main script for running the model integration examples
library("treeSeedlingMetamodelData")
library("treeSeedlingMetamodel")
library(LaplacesDemon)
data(seedlings, envir = environment())
sp <- seedlings$species[1]
# do the following for each species
naiveDat <- naive_ld_dat(seedlings$sdm_adults[[sp]])
# note that this is an EXAMPLE - it is impossible to do this automatically, because fitting LD models
# is done by trial and error
naiveMod <- LaplacesDemon::LaplacesDemon(naive_lp, naiveDat, LaplacesDemon::GIV(naive_lp, naiveDat, PGF=TRUE),
Algorithm = "AM", specs=list(Adaptive = 50, Periodicity = 50), Iterations = 100)
# for some guidance, use consort()
Consort(naiveMod)
# set initial values and re-fit with a non-adaptive algorithm
Initial.Values <- as.initial.values(naiveMod)
naiveMod <- LaplacesDemon::LaplacesDemon(naive_lp, naiveDat, Initial.Values, Covar=naiveMod$Covar, Iterations=100000,
Status=5000, Algorithm="RWM")
## survival model
survDat <- survival_ld_dat(seedlings$survival[[sp]])
survMod <- LaplacesDemon::LaplacesDemon(survival_lp, survDat, LaplacesDemon::GIV(survival_lp, survDat, PGF=TRUE),
Algorithm = "AM", specs=list(Adaptive = 50, Periodicity = 50), Iterations = 100)
# set initial values and re-fit with a non-adaptive algorithm
Initial.Values <- as.initial.values(survMod)
survMod <- LaplacesDemon::LaplacesDemon(survival_lp, survDat, Initial.Values, Covar=survMod$Covar, Iterations=100000,
Status=5000, Algorithm="RWM")
## recruitment model
popDat <- recruitment_ld_dat(seedlings$population[[sp]])
popMod <- LaplacesDemon::LaplacesDemon(recruitment_lp, popDat, LaplacesDemon::GIV(recruitment_lp, popDat, PGF=TRUE),
Algorithm = "AM", specs=list(Adaptive = 50, Periodicity = 50), Iterations = 100, Status=1000)
# set initial values and re-fit with a non-adaptive algorithm
Initial.Values <- as.initial.values(popMod)
popMod <- LaplacesDemon::LaplacesDemon(recruitment_lp, popDat, Initial.Values, Covar=popMod$Covar, Iterations=100000,
Status=5000, Algorithm="RWM")
# integrated model
intDat <- integrated_ld_dat(seedlings$sdm_adults[[sp]], seedlings$survival[[sp]], seedlings$population[[sp]])
intMod <- LaplacesDemon::LaplacesDemon(integrated_lp, intDat, LaplacesDemon::GIV(integrated_lp, intDat, PGF=TRUE),
Algorithm = "AM", specs=list(Adaptive = 50, Periodicity = 50), Iterations = 100)
# set initial values and re-fit with a non-adaptive algorithm
Initial.Values <- as.initial.values(intMod)
intMod <- LaplacesDemon::LaplacesDemon(integrated_lp, intDat, Initial.Values, Covar=intMod$Covar, Iterations=100000,
Status=5000, Algorithm="AFSS", Specs=list(A=Inf, B=Null, m=100, n=0, w=1))
|
7ccd16663e97a6e8518fbefb6e5c17cfa9b879eb
|
e93d573e0a6d8fdf22bab955684c66cbda6cd1dd
|
/R/fsim_mixed_warped_curves.R
|
4851e5a05ed750c892f87c58120271326db83639
|
[] |
no_license
|
eric-f/mixedWarpedCurves2
|
ecfe08ee25e8fad62fa3923cd08b3a0c5f8e92ef
|
f8c496beb96f2f8520859adc23d35609972254be
|
refs/heads/master
| 2020-03-14T10:02:00.327450
| 2018-05-16T19:17:33
| 2018-05-16T19:17:33
| 131,557,746
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,486
|
r
|
fsim_mixed_warped_curves.R
|
#' Model-based curve registration and clustering with unknown base shape
#'
#' This function fits the model \deqn{Y_i(t) = a_{i,sh} + a_{i,sc} f \circ h_i(y) + error}
#' by maximum likelihood via a stochastic approximation EM algorithm. In the model,
#' $f$ is a B-spline representing a common shape whereas \eqn{h_i:[0, 1] \to [0, 1]}
#' is a monotone B-spline representing a random time transformation, referred to as a
#' warping function or registration function. The vector of (warping) coefficients of $h_i$
#' follows a Dirichlet distributions. The function also supports a finite mixture of
#' Dirichlet distributions for the warping coefficient for simultaneous curve clustering and
#' registration.
#' @param y vector of observed curves
#' @param obs_time vector of the observation times
#' @param curve_id vector of curve IDs
#' @param init_clust vector of inital clutering label with length equals to the number of curves
#' @param n_clust integer, number of clusters (set to 1 if not clustering)
#' @param saem_control a list of values to control the MCMC and stochastic approximation. See control_saem().
#' @param trace if TRUE, tracing information of the estimated parameters are printed
#' @return \describe{
#' \item{pars}{List of estimated or fixed model parameters
#' \describe{
#' \item{alpha}{Estimated B-spline basis coefficient for the common base shape}
#' \item{mu_a}{Fixed mean vector for the Gaussian amplitude effects}
#' \item{sigma2}{Estimated error variance}
#' \item{sigma2_a}{Estimated variance-covariance matrix of the Gaussian amplitude effect}
#' \item{p_clusters}{Estimated mixing proportion}
#' \item{kappa_id}{Fixed Dirichlet mean vector of the first component where the mean warping function is the identity function}
#' \item{tau1}{Overall concentration of the Dirichlet distribution for the first component}
#' \item{kappa_clusters}{Estimated Dirichlet concentration parameters}
#' }
#' }
#' \item{curves}{List of curves with stochastic approximation to sufficient statistics, each curves has the following components
#' \describe{
#' \item{curve_id}{Curve ID. (Caution: this might be different from the inputted curve id, if the original id's is not a sequence from 1 to n.}
#' \item{x}{Inputted observation time}
#' \item{y}{Inputted observed curves}
#' \item{y}{Inputted or random initial cluster label}
#' \item{warped_x}{Estimated warped time}
#' \item{fitted_y}{Fitted curve}
#' \item{sapprox_residual_sum_of_squares}{Stochastic approximation to residual sum of squares}
#' \item{sapprox_a}{Stochastic approximation to the conditional expectation of amplitude effects given data}
#' \item{sapprox_w}{Stochastic approximation to the conditional expectation of warping coefficients given data}
#' \item{sapprox_log_dw}{Stochastic approximation to sufficient statistics for SAEM}
#' \item{sapprox_cluster_membership}{Stochastic approximation to predictive probabilities of cluster membership}
#' }
#' }
#' \item{aux}{List of auxiliary information and intermediate variables for MCMC-SAEM}
#' \item{pars_track}{Sequence of estimated parameters for convergence diagnostics}
#' \item{se_info}{Not currently implemented}
#' \item{y_scaling_factor}{Maximum absolute value of the observed curve}
#' }
#' @seealso See https://github.com/eric-f/mixedWarpedCurves2 for examples
#' @references Fu, E. and Heckman, N. (2017). Model-based curve registration via stochastic approximation EM algorithm. https://arxiv.org/abs/1712.07265
#' @useDynLib mixedWarpedCurves2
#' @importFrom splines splineDesign
#' @export
fsim_mixed_warped_curves <- function(y,
obs_time,
curve_id,
init_clust=NULL,
n_clust=1,
saem_control = control_saem(),
trace=FALSE){
## --------------------------------------------------------------------------
## Scale reponses and pack data into a data.frame
## --------------------------------------------------------------------------
if(min(obs_time)!=0 | max(obs_time)!=1){
stop("observation time needs to be within 0 and 1")
}
y_scaling_factor <- max(abs(y))
data <- data.frame(y = y / y_scaling_factor,
x = obs_time,
id = curve_id)
## --------------------------------------------------------------------------
## Initialize model parameters ----------------------------------------------
## --------------------------------------------------------------------------
pars <- NULL
# mu (fixed)
pars$mu <- c(0, 1)
# kappa (<-> identity)
tmp_y <- tmp_x <- seq(0, 1, length=1000)
h_knots <- sort(c(rep(range(saem_control$h_knots), saem_control$h_order-1), saem_control$h_knots))
bhx <- splineDesign(h_knots, tmp_x, saem_control$h_order, rep(0, 1000))
warping_ols <- lm(tmp_y ~ bhx - 1, data=data)
pars$kappa <- diff(unname(warping_ols$coefficients))
pars$kappa <- pars$kappa / sum(pars$kappa)
# f and sigma2
f_knots <- sort(c(rep(range(saem_control$f_knots), saem_control$f_order-1), saem_control$f_knots))
bfx <- splineDesign(f_knots, data$x, saem_control$f_order, rep(0, length(data$x)))
shape_ols <- lm(y ~ bfx - 1, data=data)
pars$alpha <- unname(shape_ols$coefficients)
pars$sigma2 <- var(shape_ols$residuals)
# big_sigma to be initialized in C++...
# tau_1 and (tau_2, kappa_2), ..., (tau_M, kappa_M)
# to be initialized in C++...
pars$num_clusters <- n_clust
## --------------------------------------------------------------------------
## Initialize common auxiliary objects --------------------------------------
## --------------------------------------------------------------------------
saem_control$n_total = length(curve_id)
saem_control$n_curve = length(unique(curve_id))
## --------------------------------------------------------------------------
## Convert data to list of curves -------------------------------------------
## --------------------------------------------------------------------------
data_lst <- split(data, data$id)
n_curve <- length(data_lst)
if(is.null(init_clust)){
print("Randomizing initial cluster labels...")
init_clust <- sample(n_clust, n_curve, replace=TRUE)
}
else{
if(length(init_clust) != n_curve |
any(is.na(init_clust)) |
min(init_clust, na.rm = T) < 1 |
max(init_clust, na.rm = T) > n_clust){
print("Invalid initial cluster labels. Will use random clustering configuration as starting value...")
init_clust <- sample(n_clust, n_curve, replace=TRUE)
}
}
for(i in seq(along=data_lst)){
data_lst[[i]]$init_clust = init_clust[i]
}
## --------------------------------------------------------------------------
## --------------------------------------------------------------------------
## Run SAEM in C++ ----------------------------------------------------------
## --------------------------------------------------------------------------
## --------------------------------------------------------------------------
out <- saem_fit_mixed_warped_curves(data_lst, pars, saem_control,
y_scaling_factor, trace)
out$y_scaling_factor = y_scaling_factor
return(out)
}
|
bbcc0e8c085570bbbbaf3fade03ed1d25335aa75
|
5431b3ea0fbd6d673396398491631a71ab757e30
|
/plot1.R
|
7e7f9c8a634e0c71a7356a412c14d2025458173e
|
[] |
no_license
|
BentalSh/ExData_Plotting1
|
f73936761fca89bf54a6f658de8975d00353cf7e
|
641fe9e11bd28730eb427e452dd2fed865fe4171
|
refs/heads/master
| 2021-01-21T23:53:56.999473
| 2015-05-10T17:35:44
| 2015-05-10T17:35:44
| 35,142,625
| 0
| 0
| null | 2015-05-06T06:16:59
| 2015-05-06T06:16:59
| null |
UTF-8
|
R
| false
| false
| 236
|
r
|
plot1.R
|
source("loadAndPlot.R")
activePower=dataToPlot$Global_active_power
png(filename="plot1.png",width = 480, height = 480)
hist(activePower, breaks=12, col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
41fd7187924e0ea34a6c3fc5eccfbe036cdc5706
|
df5b20d6f0958f4e2d817cc2d17c1d7397235cf9
|
/R/ScannerSubscription.R
|
df728160e14820e459da41fd34cc9eab17fd0918
|
[] |
no_license
|
joshuaulrich/IBrokers
|
95e29522f1f9cd6bd2eb9a615b00c1b29aaa582a
|
ac8f12cff2f884044061fb458d4902372be881c4
|
refs/heads/master
| 2023-07-06T13:40:11.976460
| 2023-06-30T15:09:12
| 2023-06-30T15:09:12
| 32,220,781
| 65
| 61
| null | 2023-04-20T15:18:07
| 2015-03-14T16:23:55
|
R
|
UTF-8
|
R
| false
| false
| 2,179
|
r
|
ScannerSubscription.R
|
twsScannerSubscription <- function(numberOfRows=-1,
instrument="",
locationCode="",
scanCode="",
abovePrice="",
belowPrice="",
aboveVolume="",
averageOptionVolumeAbove="",
marketCapAbove="",
marketCapBelow="",
moodyRatingAbove="",
moodyRatingBelow="",
spRatingAbove="",
spRatingBelow="",
maturityDateAbove="",
maturityDateBelow="",
couponRateAbove="",
couponRateBelow="",
excludeConvertible="",
scannerSettingPairs="",
stockTypeFilter="")
{
if(missing(scanCode))
warning("'scanCode' needs to be specified")
structure(
list(numberOfRows=numberOfRows,
instrument=instrument,
locationCode=locationCode,
scanCode=scanCode,
abovePrice=abovePrice,
belowPrice=belowPrice,
aboveVolume=aboveVolume,
averageOptionVolumeAbove=averageOptionVolumeAbove,
marketCapAbove=marketCapAbove,
marketCapBelow=marketCapBelow,
moodyRatingAbove=moodyRatingAbove,
moodyRatingBelow=moodyRatingBelow,
spRatingAbove=spRatingAbove,
spRatingBelow=spRatingBelow,
maturityDateAbove=maturityDateAbove,
maturityDateBelow=maturityDateBelow,
couponRateAbove=couponRateAbove,
couponRateBelow=couponRateBelow,
excludeConvertible=excludeConvertible,
scannerSettingPairs=scannerSettingPairs,
stockTypeFilter=stockTypeFilter),
class="twsScannerSubscription")
}
print.twsScannerSubscription <- function(x, ...) {
str(x)
}
|
b6334f662e9197a0e2e060dbf44782761e4ac34d
|
e2c7181ed4e32ad6375160811fc1e13a6c5c1752
|
/man/chg_traj.Rd
|
159447e4d401d1db93dd1eb1e95eadd589f907d2
|
[] |
no_license
|
mauricioromero86/teamlucc
|
c6bbef6beff5bb19ba068db500e6c6e483086507
|
b41fdd3135dd58c45a0a76c8c568768104267eaa
|
refs/heads/master
| 2020-12-24T11:33:13.644455
| 2015-09-10T14:14:44
| 2015-09-10T14:14:44
| 40,678,059
| 0
| 2
| null | 2015-08-13T19:33:49
| 2015-08-13T19:33:48
|
R
|
UTF-8
|
R
| false
| false
| 2,534
|
rd
|
chg_traj.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{chg_traj}
\alias{chg_traj}
\title{Calculate change-trajectory image}
\usage{
chg_traj(chg_mag, chg_dir, chg_threshold, filename, overwrite = FALSE, ...)
}
\arguments{
\item{chg_mag}{change magnitude \code{RasterLayer} from \code{CVAPS}}
\item{chg_dir}{change direction \code{RasterLayer} from \code{CVAPS}}
\item{chg_threshold}{the threshold to use as a minimum when determining change
areas (can use \code{DFPS} to determine this value).}
\item{filename}{filename to save the output \code{RasterLayer} to disk
(optional)}
\item{overwrite}{whether to overwrite existing files (otherwise an error
will be raised)}
\item{...}{additional parameters to pass to rasterEngine}
}
\value{
a {RasterLayer} of change trajectories, with change trajectories
coded as in the \code{lut} output by \code{traj_lut}
}
\description{
This function will calculate trajectories of land cover change using the
Change Vector Analysis in Posterior Probability Space (CVAPS) approach of
comparing posterior probabilities of class membership with an automatically
determined threshold. Areas of no change are coded as -1. A lookup table for
the codes output by \code{chg_traj} can be calculated with \code{traj_lut}.
}
\details{
This function will run in parallel if a parallel backend is registered with
\code{\link{foreach}}.
}
\examples{
\dontrun{
t0_train_data <- get_pixels(L5TSR_1986, L5TSR_1986_2001_training, "class_1986",training=.6)
t0_model <- train_classifier(t0_train_data)
t0_preds <- classify(L5TSR_1986, t0_model)
t1_train_data <- get_pixels(L5TSR_2001, L5TSR_1986_2001_training, "class_2001", training=.6)
t1_model <- train_classifier(t1_train_data)
t1_preds <- classify(L5TSR_2001, t1_model)
t0_t1_chgmag <- chg_mag(t0_preds$probs, t1_preds$probs)
t0_t1_chgdir <- chg_dir(t0_preds$probs, t1_preds$probs)
lut <- traj_lut(t0_preds$codes$code, t0_preds$codes$class)
t0_t1_chgtraj <- chg_traj(lut, t0_t1_chgmag, t0_t1_chgdir, .5)
# Change areas are coded following the above lookup-table (lut):
plot(t0_t1_chgtraj)
# No change areas are -1:
plot(t0_t1_chgtraj == -1)
}
}
\references{
Chen, J., P. Gong, C. He, R. Pu, and P. Shi. 2003.
Land-use/land-cover change detection using improved change-vector analysis.
Photogrammetric Engineering and Remote Sensing 69:369-380.
Chen, J., X. Chen, X. Cui, and J. Chen. 2011. Change vector analysis in
posterior probability space: a new method for land cover change detection.
IEEE Geoscience and Remote Sensing Letters 8:317-321.
}
|
8adffb66718feccc2e51f475cc038c0106f3436e
|
64132db473e79d516861cb158871c431c78e2f50
|
/man/getdata_to_r.Rd
|
4b4a5480a2c31faf69489a964995bc16d0d89e72
|
[] |
no_license
|
EdenYZhu/SPSStoR
|
bf688bfce1f4bd1e689893e9fda6cc51d29c8b05
|
aa79f2887a5839e151c23773df5b7e2cb37af105
|
refs/heads/master
| 2023-01-31T20:45:53.448747
| 2019-11-20T18:34:47
| 2019-11-20T18:34:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 607
|
rd
|
getdata_to_r.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getToR.r
\name{getdata_to_r}
\alias{getdata_to_r}
\title{Get Data to R}
\usage{
getdata_to_r(x, nosave = FALSE)
}
\arguments{
\item{x}{SPSS syntax - read in by SPSStoR function}
\item{nosave}{A value of FALSE processes the save commands (default),
a value of TRUE continues processing within R, overriding
default x object. Extreme care with this feature as
get commands will be ignored. (currently not supported).}
}
\description{
Converst SPSS Get Data command to R syntax. Available for delimited or
excel data files.
}
|
19df7bec7f67c68052dfd31b068f729edd13e3f9
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/valgrind_test_dir/to_mu_sd_weib_C-test.R
|
c0ec448c2d3df09f2861b7370833969ce331ed03
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 215
|
r
|
to_mu_sd_weib_C-test.R
|
function (k, lambda)
{
e <- get("data.env", .GlobalEnv)
e[["to_mu_sd_weib_C"]][[length(e[["to_mu_sd_weib_C"]]) +
1]] <- list(k = k, lambda = lambda)
.Call("_mixR_to_mu_sd_weib_C", k, lambda)
}
|
7b52d56fca8b1cdfa7b02c32346ceb38b7cc1244
|
05b698ebe661e7fde47992172f4d72130bbc738e
|
/man/upclassifymodel.Rd
|
d761a2ff191797edfebc37d1816c77abf0eaac86
|
[] |
no_license
|
cran/upclass
|
b28d5caba03390f4d01caeb39e2408824a95c413
|
a85c1abbea766d7b536d2ac40157c6e80310756b
|
refs/heads/master
| 2018-12-29T06:57:42.908233
| 2013-11-26T00:00:00
| 2013-11-26T00:00:00
| 17,700,692
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,944
|
rd
|
upclassifymodel.Rd
|
\name{upclassifymodel}
\alias{upclassifymodel}
\title{
Updated Classification Method using Labeled and Unlabeled Data
}
\description{
This function implements the EM algorithm by iterating over the E-step and M-step. The initial values are obtained from the labeled data then both steps are further iterated over the complete data, labeled and unlabeled data combined.
}
\usage{
upclassifymodel(Xtrain, cltrain, Xtest, cltest = NULL,
modelName = "EEE", tol = 10^-5, iterlim = 1000,
Aitken = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Xtrain}{
A numeric matrix of observations where rows correspond to observations and columns correspond to variables. The group membership of each observation is known - labeled data.
}
\item{cltrain}{
A numeric vector with distinct entries representing a classification of the corresponding observations in \code{Xtrain}.
}
\item{Xtest}{
A numeric matrix of observations where rows correspond to observations and columns correspond to variables. The group membership of each observation may not be known - unlabeled data.
}
\item{cltest}{
A numeric vector with distinct entries representing a classification of the corresponding observations in \code{Xtest}. By default, these are not supplied and the function sets out to obtain them.
}
\item{modelName}{
A character string indicating the model, with default "EEE".
The models available for selection are described in \code{\link{modelvec}}
}
\item{tol}{
A positive number, with default \code{10^{-5}}, which is a measure of how strictly convergence is defined.
}
\item{iterlim}{
A positive integer, with default 1000, which is the desired limit on the maximum number of iterations.
}
\item{Aitken}{
A logical value with default \code{TRUE} which tests for convergence using Aitken acceleration. If value is set to \code{FALSE}, convergence is tested by comparing \code{tol} to the change in log-likelihood between two consecutive iterations. For further information on Aitken acceleration, see \code{\link{Aitken}}
}
\item{\dots}{
Arguments passed to or from other methods.
}
}
\details{
This is an updated approach to typical classification methods. Initially, the M-step is performed on the labeled (training) data to obtain parameter estimates for the model. These are used in an E-step to obtain group memberships for the unlabeled (test) data. The training data labels and new probability estimates for test data labels are combined to form the complete data. From here, the M-step and E-step are iterated over the complete data, with continuous updating until convergence has been reached. This has been shown to result in lower misclassification rates, particularly in cases where only a small proportion of the total data is labeled.
}
\value{
The return value is a list with the following components:
\item{call}{The function call from \code{upclassifymodel}.}
\item{Ntrain}{The number of observations in the training data.}
\item{Ntest}{The number of observations in the test data.}
\item{d}{The dimension of the data.}
\item{G}{The number of groups in the data}
\item{iter}{The number of iterations required to reach convergence. If convergence was not obtained, this is equal to \code{iterlim}.}
\item{converged}{A logical value where \code{TRUE} indicates convergence was reached and \code{FALSE} means \code{iter} reached \code{iterlim} without obtaining convergence.}
\item{modelName}{A character string identifying the model (same as the input argument).}
\item{parameters pro}{A vector whose \emph{k}th component is the mixing proportion for the \emph{k}th component of the mixture model. If the model includes a Poisson term for noise, there should be one more mixing proportion than the number of Gaussian components.}
\item{mean}{The mean for each component. If there is more than one component, this is a matrix whose \emph{k}th column is the mean of the \emph{k}th component of the mixture model.}
\item{variance}{A list of variance parameters for the model. The components of this list depend on the model specification.}
\item{train/test z}{A matrix whose \code{[i,k]}th entry is the conditional probability of the \emph{i}th observation belonging to the \emph{k}th component of the mixture.}
\item{cl}{A numeric vector with distinct entries representing a classification of the corresponding observations in \code{Xtrain}/\code{Xtest}.}
\item{rate}{The number of misclassified observations.}
\item{Brierscore}{The Brier score measuring the accuracy of the probabilities (\code{z}s) obtained.}
\item{tab}{A table of actual and predicted group classifications.}
\item{ll}{The log-likelihood for the data in the mixture model.}
\item{bic}{The Bayesian Information Criterion for the model.}
}
\references{
C. Fraley and A.E. Raftery (2002). Model based clustering, discriminant analysis, and density estimation. \emph{Journal of the American Statistical Association} 97:611-631.
Fraley, C. and Raftery, A.E. (2006).
MCLUST Version for R: Normal Mixture Modeling and Model-Based Clustering,
Technical Report no. 504, Department of Statistics,
University of Washington.
Dean, N., Murphy, T.B. and Downey, G (2006). Using unlabelled data to update classification rules with applications in food authenticity studies. \emph{Journal of the royal Statistical Society: Series C} 55 (1), 1-14.
}
\author{
Niamh Russell
}
\seealso{
\code{\link{upclassify}}, \code{\link{Aitken}}, \code{\link{modelvec}}
}
\examples{
# This function is not designed to be used on its own,
# but to be called by \code{upclassify}
data(wine, package = "gclus")
X <- as.matrix(wine[, -1])
cl <- unclass(wine[, 1])
indtrain <- sort(sample(1:178, 120))
indtest <- setdiff(1:178, indtrain)
fitup <- upclassifymodel(X[indtrain,], cl[indtrain], X[indtest,], cl[indtest])
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Classification}
|
37680db32ecc2deb0f16f248202ae102d1b76263
|
d5ea85feed4c01ce9db8c019d9142f30a0c68a0e
|
/man/basis_bs.Rd
|
b09037d1798741d8dc770f2b7a1a7c5c9aec971c
|
[] |
no_license
|
yixuan/fdaplus
|
4b59d15d4a0501a4b66f21d0f6adab407107cc98
|
51abb6d5d6a0060a8117060135a8167642eb4b56
|
refs/heads/master
| 2016-09-06T14:19:30.570338
| 2015-05-16T00:59:17
| 2015-05-16T00:59:17
| 24,311,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,157
|
rd
|
basis_bs.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/10_bspline.R
\name{basis_bs}
\alias{basis_bs}
\title{Creating B-spline Basis Functions}
\usage{
basis_bs(range = c(0, 1), nbasis = NULL, order = 4, breaks = NULL,
dropind = NULL)
}
\arguments{
\item{range}{A length-two numeric vector to define the interval on which
basis functions can be evaluated. Default is \code{c(0, 1)}.}
\item{nbasis}{The total number of basis functions, including the ones that need
to be dropped. See section \strong{Details}.}
\item{order}{The order of the B-spline functions, which is the degree of
splines plus one. Default is 4, standing for cubic splines.}
\item{breaks}{A vector of break points (inner knots) to define the B-spline basis.
This does not include the end points, which are defined by
the parameter \code{range}. See section \strong{Details}.}
\item{dropind}{Indices of basis functions that need to be dropped. Default is
\code{NULL}, meaning no basis will be dropped.}
}
\value{
A \code{\link[=bspline+-class]{bspline+}} object representing the
basis functions.
}
\description{
This function constructs a \code{\link[=bspline+-class]{bspline+}} object
that represents a series of B-spline basis functions.
}
\details{
\code{nbasis}, \code{order} and \code{breaks} are related to each other,
satisfying the condition \code{nbasis == order + length(breaks)}. Hence when
any one of \code{nbasis} and \code{breaks} is properly specified,
the other one can be computed accordingly. For details,
\itemize{
\item If both \code{nbasis} and \code{breaks} are \code{NULL} (the default),
\code{nbasis} will be set to \code{order} and there is no inner knot.
\item If \code{nbasis} is \code{NULL} and \code{breaks} is given, then
\code{nbasis} will be calculated using the equation above.
\item If \code{nbasis} is given and \code{breaks} unspecified, a series
of equally spaced inner knots will be calculated.
\item If both \code{nbasis} and \code{breaks} are provided, then the program
will check their validity.
}
}
\author{
Yixuan Qiu <\url{http://statr.me/}>
}
|
958b38430ea778f2cd262eb4452d7ddf59f5c365
|
626c15d119fdf36000a927b0ee603793f394ead1
|
/miscellaneous/plots.r
|
9c2aa6ac13e75a4895fe19a1e53d83d8a9a42394
|
[] |
no_license
|
sarnthil/thesis
|
ab0aea553fb142dff1c1f7a3a1a2c79f4df8e4b1
|
4549dd781e85b6834addc7a0f16a792e817a4034
|
refs/heads/master
| 2020-05-21T08:58:24.401359
| 2019-05-06T20:32:59
| 2019-05-06T20:32:59
| 84,606,114
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,515
|
r
|
plots.r
|
require(scales)
library(ggplot2)
library(RColorBrewer)
# top ingredients
ings <- read.csv("top20_ingredients.tsv", header=F, sep="\t")
ings <- transform(ings, V1=reorder(V1, -V2))
ggplot(ings, aes(x=V1, y=V2, fill=V1)) + geom_bar(stat="identity") + theme(axis.text.x = element_blank()) + labs(x="ingredients", y="# occurrences in recipes", fill="top ingredients") + scale_fill_manual(values=getPalette(32))+ scale_y_continuous(labels = comma)
# top verbs
events <- read.csv("top20_events.tsv", header=F, sep="\t")
events <- transform(events, V2=reorder(V2, -V1))
ggplot(events, aes(y=V1, x=V2, fill=V2)) + geom_bar(stat="identity") + theme(axis.text.x = element_blank()) + labs(x="verbs", y="# occurrences in recipes", fill="top verbs") + scale_fill_manual(values=getPalette(32))+ scale_y_continuous(labels = comma)
# ings by cuisine
cc <- read.csv("top10_ings_per_cuisine.tsv", sep="\t")
ggplot(cc, aes(x=ingredient, fill=ingredient, color=ingredient, y=fraction, label=ingredient)) + geom_bar(stat="identity") + facet_wrap(~ cuisine, scales="free_x") + scale_fill_manual(values=getPalette(37)) + theme(axis.text.x = element_blank()) + scale_color_manual(values=c("black", "grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey", "black","grey","black","grey","black","grey", "black","grey", "black", "grey")) + scale_y_continuous(labels=percent)
|
2d4dcf9667d5f00e7f85d8e046136af7c95c204a
|
897cc77f111e7110e22efe7f9b7ad6f3ef576693
|
/scripts/download_spec.R
|
4e66b5b7c109538bbed9ee94477eb0c650fc8fcb
|
[
"MIT"
] |
permissive
|
nuest/stevedore
|
8823a0d05caf6a1d54fe84015ede93ba27490455
|
845587fb6dbb13487808799fcc071d5a7836e87a
|
refs/heads/master
| 2022-01-19T13:09:08.599178
| 2019-01-02T08:32:56
| 2019-01-02T08:33:29
| 198,364,789
| 0
| 0
| null | 2019-07-23T06:17:42
| 2019-07-23T06:17:42
| null |
UTF-8
|
R
| false
| false
| 593
|
r
|
download_spec.R
|
devtools::load_all()
docker_spec_fetch <- function(dest) {
vcapply(swagger_spec_versions(), docker_spec_fetch1, dest)
}
docker_spec_fetch1 <- function(version, dest) {
url <- sprintf("https://docs.docker.com/engine/api/v%s/swagger.yaml", version)
dest_file <- file.path(dest, sprintf("v%s.yaml", version))
download_file(url, dest_file)
bzip_file(dest_file)
}
bzip_file <- function(path) {
dest <- paste0(path, ".bz2")
dat <- read_binary(path)
con <- bzfile(dest, "wb", compression = 9L)
on.exit(close(con))
writeBin(dat, con)
dest
}
docker_spec_fetch("inst/spec")
|
3c6d80d8313f682b0b8c831ee0f6eeb8177eebe7
|
01114541c33a31ff4b1134788ff0815fef397329
|
/plots/plots_for_paper/vegan_shannon_diversity_etc.r
|
8482cd2550ac20ed8308bd8ca02a7d42e3554cb1
|
[] |
no_license
|
RJ333/R_scripts
|
06b31ad1459bafc68e0c212aa55eb83e5f354be9
|
a882732aeb86b10a44f5fedf86401bf20d4618f6
|
refs/heads/master
| 2021-04-26T22:55:19.096526
| 2019-07-22T08:30:33
| 2019-07-22T08:30:33
| 123,895,394
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,719
|
r
|
vegan_shannon_diversity_etc.r
|
#shannon indeces
#otus in spalten/proben als rownames
Examples
data(BCI)
H <- diversity(BCI)
simp <- diversity(BCI, "simpson")
invsimp <- diversity(BCI, "inv")
r.2 <- rarefy(BCI, 2)
alpha <- fisher.alpha(BCI)
pairs(cbind(H, simp, invsimp, r.2, alpha), pch="+", col="blue")
## Species richness (S) and Pielou's evenness (J):
S <- specnumber(BCI) ## rowSums(BCI > 0) does the same...
J <- H/log(S)
#transformed normalized count tables
head(tnorm_water_dna)
h_water_dna<-diversity(tnorm_water_dna)
s_water_dna<-specnumber(tnorm_water_dna)
j_water_dna<-h_water_dna/log(s_water_dna)
write.csv(h_water_dna,file="h_water_dna.csv")
write.csv(s_water_dna,file="s_water_dna.csv")
write.csv(j_water_dna,file="j_water_dna.csv")
#gemittelte Proben
vegan_dna_mean_V2<-read.csv(file.choose(),sep=";",row.names=1)
h_water_dna_mean_V2_paper<-diversity(vegan_dna_mean_V2)
s_water_dna_mean_V2_paper<-specnumber(vegan_dna_mean_V2)
j_water_dna_mean_V2_paper<-h_water_dna_mean_V2_paper/log(s_water_dna_mean_V2_paper)
write.csv(h_water_dna_mean_V2_paper,file="h_water_dna_mean_V2_paper.csv")
write.csv(s_water_dna_mean_V2_paper,file="s_water_dna_mean_V2_paper.csv")
write.csv(j_water_dna_mean_V2_paper,file="j_water_dna_mean_V2_paper.csv")
vegan_cdna_mean_V2<-read.csv(file.choose(),sep=";",row.names=1)
h_water_cdna_mean_V2_paper<-diversity(vegan_cdna_mean_V2)
s_water_cdna_mean_V2_paper<-specnumber(vegan_cdna_mean_V2)
j_water_cdna_mean_V2_paper<-h_water_cdna_mean_V2_paper/log(s_water_cdna_mean_V2_paper)
write.csv(h_water_cdna_mean_V2_paper,file="h_water_cdna_mean_V2_paper.csv")
write.csv(s_water_cdna_mean_V2_paper,file="s_water_cdna_mean_V2_paper.csv")
write.csv(j_water_cdna_mean_V2_paper,file="j_water_cdna_mean_V2_paper.csv")
|
2b751d43a1f9906309c83a3ada055a43ff7af6d8
|
dec9ee1bf4686166cdca12eadd4a225ee8c7f043
|
/tests/testthat/test-tidy.calc_genoprob.R
|
11a29a12c9d635e399dd218851577733c80c4ada
|
[] |
no_license
|
tavareshugo/qtl2helper
|
0f72aaa86e3c84c34873d3b20bfa57d25e25bb4d
|
bc40ab2a8ced1b2b436ad2e445ddb7d7c2944e7e
|
refs/heads/master
| 2023-04-03T12:53:08.495166
| 2023-03-25T15:01:51
| 2023-03-25T15:01:51
| 207,848,319
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,246
|
r
|
test-tidy.calc_genoprob.R
|
library(qtl2)
# Read example data and run scan
DOex <- read_cross2("https://raw.githubusercontent.com/rqtl/qtl2data/master/DOex/DOex.zip")
DOex_probs <- calc_genoprob(DOex)
# number of markers * samples * genotypes
n <- sum(unlist(lapply(DOex_probs, function(i) prod(dim(i)))))
# marker names
markers <- unlist(lapply(DOex_probs, function(i) dimnames(i)[[3]]))
test_that("coercion of calc_genoprob to tibble - no map",
{
probs_tbl <- tidy(DOex_probs)
# expectations
expect_true(tibble::is_tibble(probs_tbl))
expect_true(all(probs_tbl$marker %in% markers))
expect_equal(nrow(probs_tbl), n)
expect_equal(ncol(probs_tbl), 4)
expect_equal(colnames(probs_tbl), c("marker", "id", "genotype", "probability"))
})
test_that("coercion of calc_genoprob to tibble - with map",
{
probs_tbl <- tidy(DOex_probs, map = DOex$gmap)
# expectations
expect_true(tibble::is_tibble(probs_tbl))
expect_equal(nrow(probs_tbl), n)
expect_equal(ncol(probs_tbl), 6)
expect_equal(colnames(probs_tbl), c("marker", "chrom", "pos", "id", "genotype", "probability"))
})
|
91d904b72ffbc5445587079b5590bd1995a96e5b
|
b0926556620ce1a2105fcc10d821f263b721ea5e
|
/R/biol_figs.R
|
1ade62f70c17d1e08eadca80a4e989d855abc043
|
[] |
no_license
|
ices-eg/wk_WKGSS
|
68745ead97b4b30f171b0b40f77dab37926f346c
|
f5667d02e5a9f4e7dee5bd7b5a9696a7094f0bf6
|
refs/heads/master
| 2022-01-25T07:55:07.076356
| 2022-01-20T10:29:25
| 2022-01-20T10:29:25
| 238,208,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133,005
|
r
|
biol_figs.R
|
#install.packages(c('tidyverse','purrr','broom', 'sf', 'readxl','devtools', 'mapplots'))
#devtools::install_github('hafro/geo')
library(tidyverse)
#library(fishmethods)#needed for "alternative growth moedels" and "age-length key" part of the code
library(sf)
ia <- read_sf("R/gisdata/ICES_Areas_20160601_cut_dense_3857.gpkg") %>%
st_simplify(dTolerance = 10000) %>%
st_transform(4326) %>%
#st_geometry() #gets rid of all columns except geometry
select(Area_Full)
### Get data ###
Nor <-
readxl::read_excel("data_received/Template_data_sharing_NOR.xls", sheet = "biological data") %>%
rename(gender = gender...10, species = specie) %>%
mutate(maturity_stage = as.numeric(ifelse(maturity_stage=='NA', NA, maturity_stage))) %>%
dplyr::select(-c(gender...7, number, samplenumber)) %>%
mutate(source = ifelse(source=='Reffleet_ocea', 'Reffleet_ocean',
ifelse(source=='fishery', 'Fishery', source)),
spawning = ifelse(maturity_stage==6, 'yes', 'no')) %>%
filter((weight_g < 2000 | is.na(weight_g)))
Nor_st <-
readxl::read_excel("data_received/Template_data_sharing_NOR.xls", sheet = "station") %>%
dplyr::select(-c(specie)) %>%
mutate(depth = as.numeric(ifelse(depth=='NA', NA, depth)),
lat = as.numeric(ifelse(lat=='NA', NA, lat)),
long = as.numeric(ifelse(long=='NA', NA, long))) %>%
rename(lon = long, depth_m = depth) %>%
mutate(division = as.character(division)) %>%
mutate(division = ifelse(division == '27.2a', '27.2.a',
ifelse(division == '27.2b', '27.2.b',
ifelse(division=='27.4a', '27.4.a',
ifelse(division == '27.3a', '27.3.a',
ifelse(division == '27.4b', '27.4.b', division)))))) %>%
filter(!is.na(division), division != 'NA')
Ice <-
read_csv("data_received/BiolData_ARU.27.5a14.csv", col_types = cols(weight_g = 'd')) %>%
mutate(maturity = ifelse(maturity == 'mature', 'Mature',
ifelse(maturity== 'immature', 'Immature', maturity))) %>%
filter((weight_g < 2000 | is.na(weight_g)))
Ice_st <-
read_csv("data_received/StationsData_ARU.27.5a14.csv") %>%
rename(depth_m = depth)
Far <-
read_csv("data_received/BiologicalData_ARU_27.5.b_Faroes.csv", col_types = cols(weight_g = 'd',
age = 'd',
gender = 'c',
maturity_stage = 'd',
maturity = 'c',
spawning = 'c'
)) %>%
mutate(maturity = ifelse(maturity == 'mature', 'Mature',
ifelse(maturity== 'immature', 'Immature', maturity)),
gender = ifelse(gender == 'm', 'M',
ifelse(gender== 'f', 'F', gender)),
person = 'Lise H. Ofstad')
Far_st <-
read_csv("data_received/StationsData_ARU_27.5.b_Faroes.csv") %>%
rename(person = PERSON, source = SOURCE, country = COUNTRY, division = DIVISION, day = DAY, month = MONTH, year = YEAR, lat = LAT, lon = LON, depth_m = DEPTH_M, haul_id = HAUL_ID) %>%
mutate(division = ifelse(division %in% c('27.5.b.1', '27.5.b.2', '27.6.a'), '27.5.b', division))
Gre_raw <-
readxl::read_xlsx('../../data/Argentina silus_lengthraised_Pamela Woods.xlsx') %>%
tidyr::uncount(weights = CountRaised)
Gre <-
Gre_raw %>%
mutate(person = 'Julius Nielsen', source = 'GS', haul_id = KeyStationSubGear, age = NA, length_cm = Length, weight_g = NA, gender = NA, maturity_stage = NA, maturity = NA, spawning = NA) %>%
select(person, source, haul_id, age, length_cm, weight_g, gender, maturity_stage, maturity, spawning)
Gre_st <-
Gre_raw %>%
mutate(person = 'Julius Nielsen', source = 'GS', country = 'Greenland', division = '27.14', haul_id = KeyStationSubGear, day = NA, month = NA, year = Year) %>%
select(person, source, country, division, haul_id, day, month, year)
all <-
Ice %>%
bind_rows(Nor) %>%
bind_rows(Far) %>%
filter(!(person=='Elvar Hallfredsson' & age==2 & length_cm>25), !(person=='Elvar Hallfredsson' & age==1 & length_cm>20)) %>%
filter(!(person=='Pamela J. Woods' & length_cm < 20 & weight_g > 500)) %>%
mutate(gender = ifelse(is.na(gender) & person=='Lise H. Ofstad', 'U', gender))
all_st <-
Ice_st %>%
bind_rows(Nor_st) %>%
bind_rows(Far_st) %>%
mutate(yr = year,
year = ifelse(yr %in% c(1994:2000), 19942000,
ifelse(yr %in% c(2001:2005), 20012005,
ifelse(yr %in% c(2006:2010), 20062010,
ifelse(yr %in% c(2011:2015), 20112015,
ifelse(yr %in% c(2016:2019), 20162019,
ifelse(yr < 1994, 19940000, yr)))))))
####--------------Von Bertalanffy growth curves -------------####
####--------------By division and overall -------------####
vb_pars <-
all %>%
left_join(all_st) %>%
filter(!is.na(age), !is.na(length_cm), !is.na(gender), length_cm > 0, !is.na(division)) %>%
mutate(age = age + (month-1)/12) %>%
unite(div_gen, division, gender, remove = F) %>%
split(., .$div_gen) %>% #.[[1]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
nls(log(length_cm)~log(Linf*(1-exp(-K*(age-t0)))), data=x, start=list(Linf=50, K=0.2, t0=-0.5)) %>%
broom::tidy() %>%
mutate(division = unique(x$division),
gender = unique(x$gender))
}) %>%
bind_rows() %>%
write_csv('R/biol_figs_output/vbpars_bydivision.csv')
vb_pars_2018 <-
all %>%
left_join(all_st) %>%
filter(year==20162019,!is.na(age), !is.na(length_cm), !is.na(gender), division!='NA', length_cm > 0) %>%
mutate(age = age + (month-1)/12) %>%
unite(div_gen, division, gender, remove = F) %>%
split(., .$div_gen) %>% #.[[1]]->x
purrr::map(function(x){
print(paste0(unique(x$division), '_', unique(x$gender)))
nls(log(length_cm)~log(Linf*(1-exp(-K*(age-t0)))), data=x, start=list(Linf=50, K=0.2, t0=-0.5)) %>%
broom::tidy() %>%
mutate(division = unique(x$division),
gender = unique(x$gender))
}) %>%
bind_rows() %>%
write_csv('R/biol_figs_output/vbpars_bydivision_2018.csv')
vb_pars %>%
bind_cols(vb_pars_2018 %>% right_join(vb_pars %>% select(term, division, gender))) %>%
write_csv('R/biol_figs_output/vbpars_bydivision_both.csv')
####--------------VB by stock-------------####
vb_pars_bystock <-
all %>%
left_join(all_st) %>%
filter(!is.na(age), !is.na(length_cm), !is.na(gender), length_cm > 0, !is.na(division)) %>%
mutate(age = age + (month-1)/12,
person_sh = person %>% substr(., 1, 3)) %>%
unite(st_gen, person_sh, gender, remove = F) %>%
split(., .$st_gen) %>% #.[[1]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
prL<-seq(0,120,1)
prA<-seq(0,60,1)
mod <- nls(log(length_cm)~log(Linf*(1-exp(-K*(age-t0)))), data=x, start=list(Linf=50, K=0.2, t0=-0.5))
fit <-exp(predict(mod, data.frame(age=prA),type="response"))
y <-
list(
mod = mod %>%
broom::tidy() %>%
mutate(person = unique(x$person),
gender = unique(x$gender)),
x = full_join(x, data.frame(age = prA, fit)) %>% select(-c(st_gen, person_sh))
)
return(y)
})
vb_pars_bystock %>%
flatten() %>%
keep(., names(.)=="mod") %>%
bind_rows() %>%
write_csv('R/biol_figs_output/vbpars_bystock.csv')
vb_plot_bystock <-
vb_pars_bystock %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender)))) %>%
filter(!is.na(stock)) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30) %>%
ggplot() +
geom_boxplot(aes(x = Age, y = `Length (cm)`, group = age_stock, color = Stock, fill = Stock), alpha = 0.1) +
geom_line(aes(x = Age, y = fit, color = Stock),
data = vb_pars_bystock %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender)))) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30) )+
theme_bw() +
facet_wrap(~Gender, ncol = 1)
vb_pars_bystock_2018 <-
all %>%
left_join(all_st) %>%
filter(!is.na(age), !is.na(length_cm), !is.na(gender), length_cm > 0,
year==20162019, !is.na(division)) %>%
mutate(age = age + (month-1)/12,
person_sh = person %>% substr(., 1, 3)) %>%
unite(st_gen, person_sh, gender, remove = F) %>%
split(., .$st_gen) %>% #.[[1]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
prL<-seq(0,120,1)
prA<-seq(0,60,1)
mod <- nls(log(length_cm)~log(Linf*(1-exp(-K*(age-t0)))), data=x, start=list(Linf=50, K=0.2, t0=-0.5))
fit <-exp(predict(mod, data.frame(age=prA),type="response"))
y <-
list(
mod = mod %>%
broom::tidy() %>%
mutate(person = unique(x$person),
gender = unique(x$gender)),
x = full_join(x, data.frame(age = prA, fit)) %>% select(-c(st_gen, person_sh))
)
return(y)
})
vb_pars_bystock_2018 %>%
flatten() %>%
keep(., names(.)=="mod") %>%
bind_rows() %>%
write_csv('R/biol_figs_output/vbpars_bystock_2018.csv')
vb_plot_bystock_2018 <-
vb_pars_bystock_2018 %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender)))) %>%
filter(!is.na(stock)) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30) %>%
ggplot() +
geom_boxplot(aes(x = Age, y = `Length (cm)`, group = age_stock, color = Stock, fill = Stock)) +
geom_line(aes(x = Age, y = fit, color = Stock),
data = vb_pars_bystock %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender)))) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, year==20162019) )+
theme_bw() +
facet_wrap(~Gender, ncol = 1)
vb_pars_bystock_overtime <-
all %>%
left_join(all_st) %>%
filter(!is.na(age), !is.na(length_cm), !is.na(gender), length_cm > 0, !is.na(division)) %>%
mutate(age = age + (month-1)/12,
person_sh = person %>% substr(., 1, 3)) %>%
unite(st_gen, person_sh, gender, year, remove = F) %>%
split(., .$st_gen) %>% #.[[19]]->x
purrr::map(function(x){
print(paste0(unique(x$division), '_', unique(x$gender)))
prL<-seq(0,120,1)
prA<-seq(0,60,1)
mod <- NULL; fit <- NULL
try(
{mod <- nls(log(length_cm)~log(Linf*(1-exp(-K*(age-t0)))), data=x, start=list(Linf=50, K=0.2, t0=-0.5))
fit <- exp(predict(mod, data.frame(age=prA),type="response"))},
silent = TRUE)
if(!is.null(mod)){
y <-
list(
mod = mod %>%
broom::tidy() %>%
mutate(person = unique(x$person),
gender = unique(x$gender),
year = unique(x$year)),
x = full_join(x, data.frame(age = prA,
fit,
person = unique(x$person),
gender = unique(x$gender),
year = unique(x$year))) %>% select(-c(st_gen, person_sh))
)
} else { y <- list (mod = NULL, x = x %>% select(-c(st_gen, person_sh)))}
return(y)
})
vb_plot_overtime_aru.27.123a4 <-
vb_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year)) %>%
filter(!is.na(stock)) %>%
unite(age_year, age, year, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock== 'aru.27.123a4') %>%
ggplot() +
geom_boxplot(aes(x = Age, y = `Length (cm)`, group = age_year, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = Age, y = fit, color = Year),
data = vb_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock=='aru.27.123a4') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
vb_plot_overtime_aru.27.5b6a <-
vb_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year)) %>%
filter(!is.na(stock)) %>%
unite(age_year, age, year, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock== 'aru.27.5b6a') %>%
ggplot() +
geom_boxplot(aes(x = Age, y = `Length (cm)`, group = age_year, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = Age, y = fit, color = Year),
data = vb_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock=='aru.27.5b6a') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
vb_plot_overtime_aru.27.5a14 <-
vb_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year)) %>%
filter(!is.na(stock)) %>%
unite(age_year, age, year, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock== 'aru.27.5a14') %>%
ggplot() +
geom_boxplot(aes(x = Age, y = `Length (cm)`, group = age_year, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = Age, y = fit, color = Year),
data = vb_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male', gender)),
Year = as.factor(year)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock=='aru.27.5a14') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
####--------------Exponential length-weigt curves -------------####
####--------------By division and overall -------------####
lw_pars <-
all %>%
left_join(all_st) %>%
filter(!is.na(weight_g), !is.na(length_cm), !is.na(gender), length_cm > 0, !is.na(division)) %>%
unite(div_gen, division, gender, remove = F) %>%
split(., .$div_gen) %>% #.[[1]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
lm(log(weight_g/1e3)~log(length_cm),x) %>%
broom::tidy() %>%
mutate(term= ifelse(term=='(Intercept)', 'Intercept', term),
term= ifelse(term=='log(length_cm)', 'Log Length (cm)', term)) %>%
mutate(division = unique(x$division),
gender = unique(x$gender),
estimate = ifelse(term=='Intercept', exp(estimate), estimate))
}) %>%
bind_rows() %>%
write_csv('R/biol_figs_output/lwpars_bydivision.csv')
lw_pars_2018 <-
all %>%
left_join(all_st) %>%
filter(year==20162019,!is.na(weight_g), !is.na(length_cm), !is.na(gender), length_cm > 0) %>%
unite(div_gen, division, gender, remove = F) %>%
split(., .$div_gen) %>% #.[[1]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
lm(log(weight_g/1e3)~log(length_cm),x) %>%
broom::tidy() %>%
mutate(term= ifelse(term=='(Intercept)', 'Intercept', term),
term= ifelse(term=='log(length_cm)', 'Log Length (cm)', term)) %>%
mutate(division = unique(x$division),
gender = unique(x$gender),
estimate = ifelse(term=='Intercept', exp(estimate), estimate))
}) %>%
bind_rows() %>%
write_csv('R/biol_figs_output/lwpars_bydivision_2018.csv')
lw_pars %>%
bind_cols(lw_pars_2018 %>% right_join(lw_pars %>% select(term, division, gender))) %>%
write_csv('R/biol_figs_output/lwpars_bydivision_both.csv')
####--------------By stock -------------####
lw_pars_bystock <-
all %>%
left_join(all_st) %>%
filter(!is.na(weight_g), !is.na(length_cm), !is.na(gender), length_cm > 0, !is.na(division)) %>%
mutate(age = age + (month-1)/12,
person_sh = person %>% substr(., 1, 3)) %>%
unite(st_gen, person_sh, gender, remove = F) %>%
split(., .$st_gen) %>% #.[[1]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
prL<-seq(0,120,1)
prA<-seq(0,60,1)
mod <- lm(log(weight_g/1e3)~log(length_cm),x)
fit <-exp(predict(mod, data.frame(length_cm=prL),type="response"))
y <-
list(
mod = mod %>%
broom::tidy() %>%
mutate(term= ifelse(term=='(Intercept)', 'Intercept', term),
term= ifelse(term=='log(length_cm)', 'Log Length (cm)', term)) %>%
mutate(person = unique(x$person),
gender = unique(x$gender),
estimate = ifelse(term=='Intercept', exp(estimate), estimate)),
x = full_join(x, data.frame(length_cm = prL,
fit,
person = unique(x$person),
gender = unique(x$gender))) %>%
select(-c(st_gen, person_sh))
)
return(y)
})
lw_pars_bystock %>%
flatten() %>%
keep(., names(.)=="mod") %>%
bind_rows() %>%
write_csv('R/biol_figs_output/lwpars_bystock.csv')
lw_plot_bystock <-
lw_pars_bystock %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
`Weight (kg)` = weight_g/1000,
length_cm = round(length_cm)) %>%
filter(!is.na(stock)) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10) %>%
ggplot() +
geom_boxplot(aes(x = `Length (cm)`, y = `Weight (kg)`, group = len_stock, color = Stock, fill = Stock), alpha = 0.1) +
geom_line(aes(x = `Length (cm)`, y = fit, color = Stock),
data = lw_pars_bystock %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10) )+
theme_bw() +
facet_wrap(~Gender, ncol = 1)
lw_pars_bystock_overtime <-
all %>%
left_join(all_st) %>%
filter(!is.na(weight_g), !is.na(length_cm), !is.na(gender), length_cm > 0, !is.na(division)) %>%
mutate(age = age + (month-1)/12,
person_sh = person %>% substr(., 1, 3)) %>%
unite(st_gen, person_sh, gender, year, remove = F) %>%
split(., .$st_gen) %>% #.[[19]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
prL<-seq(0,120,1)
prA<-seq(0,60,1)
mod <- NULL; fit <- NULL
try(
{mod <- lm(log(weight_g/1e3)~log(length_cm),x)
fit <-exp(predict(mod, data.frame(length_cm=prL),type="response"))
},
silent = TRUE)
if(!is.null(mod)){
y <-
list(
mod = mod %>%
broom::tidy() %>%
mutate(term= ifelse(term=='(Intercept)', 'Intercept', term),
term= ifelse(term=='log(length_cm)', 'Log Length (cm)', term)) %>%
mutate(person = unique(x$person),
gender = unique(x$gender),
year = unique(x$year),
estimate = ifelse(term=='Intercept', exp(estimate), estimate)),
x = full_join(x, data.frame(length_cm = prL,
fit,
person = unique(x$person),
gender = unique(x$gender),
year = unique(x$year))) %>%
select(-c(st_gen, person_sh))
)
} else { y <- list (mod = NULL, x = x %>% select(-c(st_gen, person_sh)))}
return(y)
})
lw_plot_overtime_aru.27.123a4 <-
lw_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
`Weight (kg)` = weight_g/1000,
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_year, length_cm, year, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock== 'aru.27.123a4') %>%
ggplot() +
geom_boxplot(aes(x = `Length (cm)`, y = `Weight (kg)`, group = len_year, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = `Length (cm)`, y = fit, color = Year),
data = lw_pars_bystock_overtime%>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock=='aru.27.123a4') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
lw_plot_overtime_aru.27.5a14 <-
lw_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
`Weight (kg)` = weight_g/1000,
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_year, length_cm, year, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock== 'aru.27.5a14') %>%
ggplot() +
geom_boxplot(aes(x = `Length (cm)`, y = `Weight (kg)`, group = len_year, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = `Length (cm)`, y = fit, color = Year),
data = lw_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock=='aru.27.5a14') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
lw_plot_overtime_aru.27.5b6a <-
lw_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
`Weight (kg)` = weight_g/1000,
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_year, length_cm, year, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock== 'aru.27.5b6a') %>%
ggplot() +
geom_boxplot(aes(x = `Length (cm)`, y = `Weight (kg)`, group = len_year, color = Year, fill = Year), alpha = 1) +
geom_line(aes(x = `Length (cm)`, y = fit, color = Year),
data = lw_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock=='aru.27.5b6a') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
####--------------Maturity ogives -------------####
####--------------By division and overall -------------####
mat_pars <-
all %>%
left_join(all_st) %>%
mutate(maturity_stage = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity_stage),
maturity = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity),
spawning = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, spawning)) %>%
filter(!is.na(maturity), !is.na(length_cm), !is.na(gender), length_cm > 0, gender != 'U', !is.na(division)) %>%
unite(div_gen, division, gender, remove = F) %>%
split(., .$div_gen) %>% #.[[1]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
tmp <-
glm(mat~length_cm, data=x %>% mutate(mat = ifelse(maturity=='Mature', 1, 0)), family=binomial(link=logit)) %>%
broom::tidy() %>%
mutate(division = unique(x$division),
gender = unique(x$gender),
term = ifelse(term=='(Intercept)', 'Intercept',
ifelse(term=='length_cm', 'Length (cm)', term))
)
bind_rows(tmp,tibble(term = 'L50',
estimate = - tmp$estimate[tmp$term=='Intercept']/tmp$estimate[tmp$term=='Length (cm)'],
division = unique(tmp$division),
gender = unique(tmp$gender)))
}) %>%
bind_rows() %>%
write_csv('R/biol_figs_output/matpars_bydivision.csv')
mat_pars_2018 <-
all %>%
left_join(all_st) %>%
mutate(maturity_stage = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity_stage),
maturity = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity),
spawning = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, spawning)) %>%
filter(year==20162019, !is.na(maturity), !is.na(length_cm), !is.na(gender), length_cm > 0, gender != 'U') %>%
unite(div_gen, division, gender, remove = F) %>%
split(., .$div_gen) %>% #.[[1]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
tmp <-
glm(mat~length_cm, data=x %>% mutate(mat = ifelse(maturity=='Mature', 1, 0)), family=binomial(link=logit)) %>%
broom::tidy() %>%
mutate(division = unique(x$division),
gender = unique(x$gender),
term = ifelse(term=='(Intercept)', 'Intercept',
ifelse(term=='length_cm', 'Length (cm)', term))
)
bind_rows(tmp,tibble(term = 'L50',
estimate = - tmp$estimate[tmp$term=='Intercept']/tmp$estimate[tmp$term=='Length (cm)'],
division = unique(tmp$division),
gender = unique(tmp$gender)))
}) %>%
bind_rows() %>%
write_csv('R/biol_figs_output/matpars_bydivision_2018.csv')
mat_pars %>%
bind_cols(mat_pars_2018 %>% right_join(mat_pars %>% select(term, division, gender))) %>%
write_csv('R/biol_figs_output/matpars_bydivision_both.csv')
####--------------By stock -------------####
mat_pars_bystock <-
all %>%
left_join(all_st) %>%
mutate(maturity_stage = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity_stage),
maturity = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity),
spawning = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, spawning)) %>%
filter(!is.na(maturity), !is.na(length_cm), !is.na(gender), length_cm > 0, gender != 'U', !is.na(division)) %>%
mutate(age = age + (month-1)/12,
person_sh = person %>% substr(., 1, 3)) %>%
unite(st_gen, person_sh, gender, remove = F) %>%
split(., .$st_gen) %>% #.[[1]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
prL<-seq(0,120,1)
prA<-seq(0,60,1)
mod <- glm(mat~length_cm, data=x %>% mutate(mat = ifelse(maturity=='Mature', 1, 0)), family=binomial(link=logit))
fit <- predict(mod, data.frame(length_cm=prL),type="response")
y <-
list(
mod = mod %>%
broom::tidy() %>%
mutate(term= ifelse(term=='(Intercept)', 'Intercept', term),
term= ifelse(term=='log(length_cm)', 'Log Length (cm)', term)) %>%
mutate(person = unique(x$person),
gender = unique(x$gender),
estimate = ifelse(term=='Intercept', exp(estimate), estimate)) %>%
bind_rows(tibble(term = 'L50',
estimate = - coef(mod)[1]/coef(mod)[2],
person = unique(x$person),
gender = unique(x$gender))),
x = full_join(x, data.frame(length_cm = prL,
fit,
person = unique(x$person),
gender = unique(x$gender))) %>%
select(-c(st_gen, person_sh))
)
return(y)
})
mat_pars_bystock %>%
flatten() %>%
keep(., names(.)=="mod") %>%
bind_rows() %>%
write_csv('R/biol_figs_output/matpars_bystock.csv')
mat_plot_bystock <-
mat_pars_bystock %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
length_cm = round(length_cm)) %>%
filter(!is.na(stock)) %>%
group_by(stock, gender, length_cm, maturity) %>%
count() %>%
filter(maturity == 'Mature') %>%
left_join(mat_pars_bystock %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
length_cm = round(length_cm)) %>%
group_by(stock, gender, length_cm) %>%
count(name = 'n_tot')) %>%
mutate(p = n/n_tot) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender, `Proportion mature` = p) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10) %>%
ggplot() +
geom_point(aes(x = `Length (cm)`, y = `Proportion mature`, color = Stock, fill = Stock)) +
geom_line(aes(x = `Length (cm)`, y = fit, color = Stock),
data = mat_pars_bystock %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10) )+
theme_bw() +
facet_wrap(~Gender, ncol = 1)
mat_pars_bystock_overtime <-
all %>%
left_join(all_st) %>%
mutate(maturity_stage = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity_stage),
maturity = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity),
spawning = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, spawning)) %>%
filter( !is.na(length_cm), !is.na(gender), length_cm > 0, gender != 'U') %>%
mutate(age = age + (month-1)/12,
person_sh = person %>% substr(., 1, 3)) %>%
unite(st_gen, person_sh, gender, year, remove = F) %>%
split(., .$st_gen) %>% #.[[19]]->x
purrr::map(function(x){
#print(paste0(unique(x$division), '_', unique(x$gender)))
prL<-seq(0,120,1)
prA<-seq(0,60,1)
mod <- NULL; fit <- NULL
try(
{mod <- glm(mat~length_cm, data=x %>% mutate(mat = ifelse(maturity=='Mature', 1, 0)), family=binomial(link=logit))
fit <- predict(mod, data.frame(length_cm=prL),type="response")
},
silent = TRUE)
if(!is.null(mod)){
y <-
list(
mod = mod %>%
broom::tidy() %>%
mutate(term= ifelse(term=='(Intercept)', 'Intercept', term),
term= ifelse(term=='log(length_cm)', 'Log Length (cm)', term)) %>%
mutate(person = unique(x$person),
gender = unique(x$gender),
estimate = ifelse(term=='Intercept', exp(estimate), estimate)) %>%
bind_rows(tibble(term = 'L50',
estimate = - coef(mod)[1]/coef(mod)[2],
person = unique(x$person),
gender = unique(x$gender),
year = unique(x$year))),
x = full_join(x, data.frame(length_cm = prL,
fit,
person = unique(x$person),
gender = unique(x$gender),
year = unique(x$year))) %>%
select(-c(st_gen, person_sh))
)
} else { y <- list (mod = NULL, x = x %>% select(-c(st_gen, person_sh)))}
return(y)
})
mat_plot_overtime_aru.27.123a4 <-
mat_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
filter(!is.na(stock)) %>%
group_by(stock, gender, length_cm, maturity, Year) %>%
count() %>%
filter(maturity == 'Mature') %>%
left_join(mat_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
group_by(stock, gender, length_cm, Year) %>%
count(name = 'n_tot')) %>%
mutate(p = n/n_tot) %>%
unite(len_year, length_cm, Year, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender, `Proportion mature` = p) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock== 'aru.27.123a4') %>%
ggplot() +
geom_point(aes(x = `Length (cm)`, y = `Proportion mature`, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = `Length (cm)`, y = fit, color = Year),
data = mat_pars_bystock_overtime%>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock=='aru.27.123a4') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
mat_plot_overtime_aru.27.5b6a <-
mat_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
filter(!is.na(stock)) %>%
group_by(stock, gender, length_cm, maturity, Year) %>%
count() %>%
filter(maturity == 'Mature') %>%
left_join(mat_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
group_by(stock, gender, length_cm, Year) %>%
count(name = 'n_tot')) %>%
mutate(p = n/n_tot) %>%
unite(len_year, length_cm, Year, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender, `Proportion mature` = p) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock== 'aru.27.5b6a') %>%
ggplot() +
geom_point(aes(x = `Length (cm)`, y = `Proportion mature`, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = `Length (cm)`, y = fit, color = Year),
data = mat_pars_bystock_overtime%>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock=='aru.27.5b6a') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
mat_plot_overtime_aru.27.5a14 <-
mat_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
filter(!is.na(stock)) %>%
group_by(stock, gender, length_cm, maturity, Year) %>%
count() %>%
filter(maturity == 'Mature') %>%
left_join(mat_pars_bystock_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
group_by(stock, gender, length_cm, Year) %>%
count(name = 'n_tot')) %>%
mutate(p = n/n_tot) %>%
unite(len_year, length_cm, Year, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender, `Proportion mature` = p) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock== 'aru.27.5a14') %>%
ggplot() +
geom_point(aes(x = `Length (cm)`, y = `Proportion mature`, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = `Length (cm)`, y = fit, color = Year),
data = mat_pars_bystock_overtime%>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(year),
length_cm = round(length_cm)) %>%
filter(!is.na(stock), !is.na(fit)) %>%
unite(len_stock, length_cm, stock, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Stock = stock, Gender = gender) %>%
filter(`Length (cm)` < 54, `Length (cm)` > 10, Stock=='aru.27.5a14') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
####--------------Mean length at age tables -------------####
ml_age <-
all %>%
left_join(all_st) %>%
mutate(quarter = ifelse(month %in% c(1,2,3), 1,
ifelse(month %in% c(4,5,6), 2,
ifelse(month %in% c(7,8,9), 3,
ifelse(month %in% c(10,11,12), 4, month)))),
age = ifelse(age < 11, age + (quarter-1)/4, age)) %>%
filter(age < 21, year==20162019) %>%
group_by(country, division, source, age, year) %>%
filter(!is.na(age), !is.na(length_cm), length_cm > 0, !is.na(division)) %>%
summarise(ml = round(mean(length_cm, na.rm = T), 1), sdl = round(sd(length_cm, na.rm = T), 1)) %>%
arrange(country, division, source, year, age)
ml_age %>%
select(country, division, source, age, ml) %>%
unite('CDS', country, division, source) %>%
spread(value = ml, key = CDS) %>%
write_csv('R/biol_figs_output/meanlength_at_age_bydivision.csv')
ml_age_plot <-
ml_age %>%
ungroup() %>%
mutate(source = ifelse(source=='Fishery', 'commercial', source)) %>%
unite(`Country/Division/Source`, country, division, source, sep = '/') %>%
rename(Age = age, `Mean Length (cm)` = ml, Year = year) %>%
group_by(`Country/Division/Source`) %>%
ggplot() +
geom_line(aes(x = Age, y = `Mean Length (cm)`, color = `Country/Division/Source`)) +
theme_bw()+
facet_wrap(~Year)
####--------------Size and depth relationships -------------####
#size x depth
size_depth_plot <-
all %>%
left_join(all_st) %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
Depth = -depth_m,
Year = as.factor(yr)) %>%
rename(Length = length_cm, Stock = stock) %>%
filter(yr > 1999) %>%
ggplot() +
geom_point(aes(y = Depth, x = Length, color = Year))+
facet_wrap(~Stock) +
theme_bw()
size_depth <-
all %>%
left_join(all_st) %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
filter(!is.na(length_cm), length_cm > 0, !is.na(division)) %>%
lm(length_cm ~ depth_m + division + depth_m*division, data=.) %>%
broom::tidy() %>%
write_csv('R/biol_figs_output/size_depth_lm.csv')
size_depth_latlon <-
all %>%
left_join(all_st) %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
filter(!is.na(length_cm), length_cm > 0, !is.na(division)) %>%
lm(length_cm ~ depth_m + lat + lon + depth_m*lat + depth_m*lon + lat*lon, data=.) %>%
broom::tidy() %>%
write_csv('R/biol_figs_output/size_depth_latlon_lm.csv')
size_depth_2018 <-
all %>%
left_join(all_st) %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
filter(year==20162019,!is.na(length_cm), length_cm > 0) %>%
lm(length_cm ~ depth_m + division + depth_m*division, data=.) %>%
broom::tidy() %>%
write_csv('R/biol_figs_output/size_depth_lm_2018.csv')
size_depth_latlon_2018 <-
all %>%
left_join(all_st) %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
filter(year==20162019, !is.na(length_cm), length_cm > 0) %>%
lm(length_cm ~ depth_m + lat + lon + depth_m*lat + depth_m*lon + lat*lon, data=.) %>%
broom::tidy() %>%
write_csv('R/biol_figs_output/size_depth_latlon_lm_2018.csv')
####--------------Maps showing expected and residuals from overall relationships -------------####
tmp_vb <-
all %>%
left_join(all_st) %>%
filter(!is.na(age), !is.na(length_cm), length_cm > 0, !is.na(division)) %>%
mutate(rect = mapplots::ices.rect2(lon, lat),
age = age + (month-1)/12)
overall_vb <-
nls(log(length_cm)~log(Linf*(1-exp(-K*(age-t0)))), data=tmp_vb, start=list(Linf=50, K=0.2, t0=-0.5))
overall_vb %>%
broom::tidy()%>%
write_csv('R/biol_figs_output/overall_vb.csv')
yr_min <- 2005; yr_max <- 2018
#needs to be replaced by length distribution plots
growth_expected_plot <-
tmp_vb %>%
mutate(expected_length = exp(fitted(overall_vb)), residuals = length_cm - expected_length) %>%
group_by(rect, yr) %>%
summarise(`Expected Length (cm)` = mean(expected_length, na.rm = T),
`Residual Length (cm)` = mean(residuals, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`Expected Length (cm)`),interpolate = FALSE) +
#geom_tile(aes(fill=`Expected Length (cm)`)) +
# geom_polygon(data=gisland::iceland,aes(long,lat,group=group),
# fill='white',col='black') +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)') +
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1))+
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
growth_residuals_plot <-
tmp_vb %>%
mutate(expected_length = exp(fitted(overall_vb)), residuals = length_cm - expected_length) %>%
group_by(rect, yr) %>%
summarise(`Expected Length (cm)` = mean(expected_length, na.rm = T),
`Residual Length (cm)` = mean(residuals, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`Residual Length (cm)`),interpolate = FALSE) +
#geom_tile(aes(fill=`Expected Length (cm)`)) +
# geom_polygon(data=gisland::iceland,aes(long,lat,group=group),
# fill='white',col='black') +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
tmp_lw <-
all %>%
left_join(all_st) %>%
filter(!is.na(weight_g), !is.na(length_cm), length_cm > 0, !is.na(division)) %>%
mutate(rect = mapplots::ices.rect2(lon, lat),
age = age + (month-1)/12)
# weight at length variation
overall_lw <-
lm(log(weight_g/1e3)~log(length_cm),tmp_lw)
overall_lw %>%
broom::tidy()%>%
write_csv('R/biol_figs_output/overall_lw.csv')
#needs to be replaced by length distribution plots
weight_expected_plot <-
tmp_lw %>%
mutate(expected_weight = exp(fitted(overall_lw)), residuals = weight_g - expected_weight) %>%
group_by(rect, yr) %>%
summarise(`Expected Weight (kg)` = mean(expected_weight, na.rm = T),
`Residual Weight (kg)` = mean(residuals, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`Expected Weight (kg)`),interpolate = FALSE) +
#geom_tile(aes(fill=`Expected Length (cm)`)) +
# geom_polygon(data=gisland::iceland,aes(long,lat,group=group),
# fill='white',col='black') +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)') +
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1))+
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
weight_residuals_plot <-
tmp_lw %>%
mutate(expected_weight = exp(fitted(overall_lw)), residuals = weight_g/1e3 - expected_weight) %>%
filter(residuals < 1) %>%
group_by(rect, yr) %>%
summarise(`Expected Weight (kg)` = mean(expected_weight, na.rm = T),
`Residual Weight (kg)` = mean(residuals, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`Residual Weight (kg)`),interpolate = FALSE) +
#geom_tile(aes(fill=`Expected Length (cm)`)) +
# geom_polygon(data=gisland::iceland,aes(long,lat,group=group),
# fill='white',col='black') +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
####--------------Expected and residual for ages 8 and 9 only -------------####
#length at ages 8 & 9 (most frequent age)
all %>%
left_join(all_st) %>%
group_by(age) %>%
count()
tmp_l8 <-
all %>%
left_join(all_st) %>%
filter(!is.na(length_cm), length_cm > 0, age %in% c(8,9, !is.na(division))) %>%
mutate(rect = mapplots::ices.rect2(lon, lat),
age = age + (month-1)/12)
# weight at length variation
overall_l8 <-
lm(length_cm~age,tmp_l8)
plot(length_cm~age,tmp_l8) + abline(coef(overall_l8))
overall_l8 %>%
broom::tidy()%>%
write_csv('R/biol_figs_output/overall_l8.csv')
#needs to be replaced by length distribution plots
l8_expected_plot <-
tmp_l8 %>%
mutate(expected_length = fitted(overall_l8), residuals = length_cm - expected_length) %>%
group_by(rect, yr) %>%
summarise(`Expected Length (cm)` = mean(expected_length, na.rm = T),
`Residual Length (cm)` = mean(residuals, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`Expected Length (cm)`),interpolate = FALSE) +
#geom_tile(aes(fill=`Expected Length (cm)`)) +
# geom_polygon(data=gisland::iceland,aes(long,lat,group=group),
# fill='white',col='black') +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)') +
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1))+
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
l8_residual_plot <-
tmp_l8 %>%
mutate(expected_length = fitted(overall_l8), residuals = length_cm - expected_length) %>%
group_by(rect, yr) %>%
summarise(`Expected Length (cm)` = mean(expected_length, na.rm = T),
`Residual Length (cm)` = mean(residuals, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`Residual Length (cm)`),interpolate = FALSE) +
#geom_tile(aes(fill=`Expected Length (cm)`)) +
# geom_polygon(data=gisland::iceland,aes(long,lat,group=group),
# fill='white',col='black') +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)') +
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1))+
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
####-------------- Max, 95%, median ages and lengths, also by depth -------------####
#max age
tmp_maxage <-
all %>%
left_join(all_st) %>%
filter(!is.na(age), !is.na(division)) %>%
mutate(rect = mapplots::ices.rect2(lon, lat),
age = age + (month-1)/12)
maxage_plot <-
tmp_maxage %>%
group_by(rect, yr) %>%
summarise(`Max. age` = max(age, na.rm = T),
`95% age` = quantile(age, 0.95, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`Max. age`),interpolate = FALSE) +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
age95_plot <-
tmp_maxage %>%
group_by(rect, yr) %>%
summarise(`Max. age` = max(age, na.rm = T),
`95% age` = quantile(age, 0.95, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`95% age`),interpolate = FALSE) +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
#95% length - need to get from length distributions
#max age
tmp_maxl <-
all %>%
left_join(all_st) %>%
filter(!is.na(length_cm), length_cm > 0, !is.na(division)) %>%
mutate(rect = mapplots::ices.rect2(lon, lat),
age = age + (month-1)/12)
maxl_plot <-
tmp_maxl %>%
group_by(rect, yr) %>%
summarise(`Max. length (cm)` = max(length_cm, na.rm = T),
`95% length (cm)` = quantile(length_cm, 0.95, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`Max. length (cm)`),interpolate = FALSE) +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
l95_plot <-
tmp_maxl %>%
group_by(rect, yr) %>%
summarise(`Max. length (cm)` = max(length_cm, na.rm = T),
`95% length (cm)` = quantile(length_cm, 0.95, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`95% length (cm)`),interpolate = FALSE) +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
# Median lengths
l50_plot_0 <-
tmp_maxl %>%
filter(depth_m <= 300) %>%
group_by(rect, yr) %>%
summarise(`Mean length (cm)` = mean(length_cm, na.rm = T),
`50% length (cm)` = quantile(length_cm, 0.50, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`50% length (cm)`),interpolate = FALSE) +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
l50_plot_300 <-
tmp_maxl %>%
filter(depth_m > 300, depth_m <= 500) %>%
group_by(rect, yr) %>%
summarise(`Mean length (cm)` = mean(length_cm, na.rm = T),
`50% length (cm)` = quantile(length_cm, 0.50, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`50% length (cm)`),interpolate = FALSE) +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
l50_plot_500 <-
tmp_maxl %>%
filter(depth_m > 500) %>%
group_by(rect, yr) %>%
summarise(`Mean length (cm)` = mean(length_cm, na.rm = T),
`50% length (cm)` = quantile(length_cm, 0.50, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`50% length (cm)`),interpolate = FALSE) +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
####-------------- Maturity and spawning maps -------------####
#L50 maturity
tmp_l50 <-
all %>%
left_join(all_st) %>%
mutate(maturity_stage = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity_stage),
maturity = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity),
spawning = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, spawning)) %>%
filter(!is.na(length_cm), length_cm > 0, !is.na(maturity), !is.na(division)) %>%
mutate(rect = mapplots::ices.rect2(lon, lat),
age = age + (month-1)/12)
l50_plot <-
tmp_l50 %>%
group_by(rect, year, length_cm, maturity) %>%
count() %>%
ungroup %>%
filter(maturity=='Mature') %>%
rename(n_mat = n) %>%
left_join(tmp_l50 %>%
group_by(rect, yr, length_cm) %>%
count()) %>%
mutate(p = n_mat/n) %>%
filter(p <= 0.5, length_cm > 10) %>%
group_by(rect, yr) %>%
summarise(L50 = max(length_cm, na.rm = T)) %>%
filter(yr > yr_min-1, yr < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=L50),interpolate = FALSE) +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~yr, ncol = 3) +
theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 0, y = 77, label = yr)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
#spawning
tmp_sp <-
all %>%
left_join(all_st) %>%
mutate(maturity_stage = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity_stage),
maturity = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, maturity),
spawning = ifelse(person=='Lise H. Ofstad' & (haul_id %in% c(18080027, 18080028) | year %in% c(1994:1998)), NA, spawning)) %>%
filter(!is.na(length_cm), length_cm > 0, !is.na(spawning), !is.na(division)) %>%
mutate(rect = mapplots::ices.rect2(lon, lat),
age = age + (month-1)/12,
quarter = ifelse(month %in% c(1,2,3), 1,
ifelse(month %in% c(4,5,6), 2,
ifelse(month %in% c(7,8,9), 3,
ifelse(month %in% c(10,11,12), 4, month)))))
spawning_plot <-
tmp_sp %>%
group_by(rect, quarter, spawning) %>%
count() %>%
ungroup %>%
filter(spawning=='yes') %>%
rename(n_sp = n) %>%
left_join(tmp_sp %>%
group_by(rect, quarter) %>%
count()) %>%
mutate(`Proportion spawners` = n_sp/n) %>%
#filter(year > yr_min-1, year < yr_max+1) %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
#separate(sq,c("lon","lat"), sep=':',convert = TRUE) %>%
ggplot() +
#coord_quickmap(xlim = c(-38, 18),ylim = c(55, 74))+
geom_tile(aes(lon, lat, fill=`Proportion spawners`),interpolate = FALSE) +
geom_polygon(data = map_data('world','Greenland'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = map_data('world','Norway'), aes(long, lat, group=group),
fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
geom_polygon(data = geo::faeroes, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
#mapplots::draw.rect() %>%
theme_bw()+
theme(strip.background = element_blank(),
strip.text.x = element_blank()) +
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~quarter) +
#theme(legend.position = c(0.9, 0.1)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(quarter = 1:4, x= 0, y = 77, label = quarter)) +
geom_sf(data = ia, colour = 'black', fill = NA, lwd = 0.05) +
coord_sf(xlim = c(-34, 18),ylim = c(57, 80))
tmp_sp %>%
group_by(country, division, source, month, spawning) %>%
count() %>%
ungroup %>%
filter(spawning=='yes') %>%
rename(n_sp = n) %>%
left_join(tmp_sp %>%
group_by(country, division, source, month) %>%
count()) %>%
mutate(p = n_sp/n) %>%
filter(p > 0.2) %>%
select(country, division, source, month, p) %>%
arrange(country, desc(p)) %>%
write_csv('R/biol_figs_output/sp_bydivision.csv')
#samples
tmp_samples <-
all %>%
left_join(all_st) %>%
filter(!is.na(length_cm), length_cm > 0,!is.na(division)) %>%
mutate(rect = mapplots::ices.rect2(lon, lat),
quarter = ifelse(month %in% c(1,2,3), 1,
ifelse(month %in% c(4,5,6), 2,
ifelse(month %in% c(7,8,9), 3,
ifelse(month %in% c(10,11,12), 4, month))))) %>%
group_by(division, person, source) %>%
count() %>%
mutate(ifelse(person=='Elvar Hallfredsson', 'Norway',
ifelse(person == 'Pamela J. Woods', 'Iceland',
ifelse(person=='Lise H. Ofstad', 'Faroe Islands', person)))) %>%
ungroup %>%
select(-c(person)) %>%
write_csv('R/biol_figs_output/sample_origin.csv')
####-------------- Figure output -------------####
png_dims <- c(1000, 675)
png(paste0('R/biol_figs_output/growth_expected_plot.png'), height = png_dims[1], width = png_dims[1])
print(growth_expected_plot)
dev.off()
png(paste0('R/biol_figs_output/growth_residuals_plot.png'), height = png_dims[1], width = png_dims[1])
print(growth_residuals_plot)
dev.off()
png(paste0('R/biol_figs_output/weight_expected_plot.png'), height = png_dims[1], width = png_dims[2])
print(weight_expected_plot)
dev.off()
png(paste0('R/biol_figs_output/weight_residuals_plot.png'), height = png_dims[1], width = png_dims[2])
print(weight_residuals_plot)
dev.off()
png(paste0('R/biol_figs_output/l8_expected_plot.png'), height = png_dims[1], width = png_dims[2])
print(l8_expected_plot)
dev.off()
png(paste0('R/biol_figs_output/l8_residual_plot.png'), height = png_dims[1], width = png_dims[2])
print(l8_residual_plot)
dev.off()
png(paste0('R/biol_figs_output/age95_plot.png'), height = png_dims[1], width = png_dims[2])
print(age95_plot)
dev.off()
png(paste0('R/biol_figs_output/l95_plot.png'), height = png_dims[1], width = png_dims[2])
print(l95_plot)
dev.off()
png(paste0('R/biol_figs_output/l50_plot.png'), height = png_dims[1], width = png_dims[2])
print(l50_plot)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_0.png'), height = png_dims[1], width = png_dims[2])
print(l50_plot_0)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_300.png'), height = png_dims[1], width = png_dims[2])
print(l50_plot_300)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_500.png'), height = png_dims[1], width = png_dims[2])
print(l50_plot_500)
dev.off()
png(paste0('R/biol_figs_output/spawning_plot.png'), height = 500, width = 500)
print(spawning_plot)
dev.off()
png(paste0('R/biol_figs_output/vb_plot_bystock.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(vb_plot_bystock)
dev.off()
png(paste0('R/biol_figs_output/vb_plot_bystock_2018.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(vb_plot_bystock_2018)
dev.off()
png(paste0('R/biol_figs_output/vb_plot_overtime_aru.27.123a4.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(vb_plot_overtime_aru.27.123a4)
dev.off()
png(paste0('R/biol_figs_output/vb_plot_overtime_aru.27.5a14.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(vb_plot_overtime_aru.27.5a14)
dev.off()
png(paste0('R/biol_figs_output/vb_plot_overtime_aru.27.5b6a.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(vb_plot_overtime_aru.27.5b6a)
dev.off()
png(paste0('R/biol_figs_output/lw_plot_bystock.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(lw_plot_bystock)
dev.off()
png(paste0('R/biol_figs_output/lw_plot_overtime_aru.27.123a4.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(lw_plot_overtime_aru.27.123a4)
dev.off()
png(paste0('R/biol_figs_output/lw_plot_overtime_aru.27.5a14.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(lw_plot_overtime_aru.27.5a14)
dev.off()
png(paste0('R/biol_figs_output/lw_plot_overtime_aru.27.5b6a.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(lw_plot_overtime_aru.27.5b6a)
dev.off()
png(paste0('R/biol_figs_output/mat_plot_bystock.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(mat_plot_bystock)
dev.off()
png(paste0('R/biol_figs_output/mat_plot_overtime_aru.27.123a4.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(mat_plot_overtime_aru.27.123a4)
dev.off()
png(paste0('R/biol_figs_output/mat_plot_overtime_aru.27.5a14.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(mat_plot_overtime_aru.27.5a14)
dev.off()
png(paste0('R/biol_figs_output/mat_plot_overtime_aru.27.5b6a.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(mat_plot_overtime_aru.27.5b6a)
dev.off()
png(paste0('R/biol_figs_output/size_depth_plot.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(size_depth_plot)
dev.off()
png(paste0('R/biol_figs_output/ml_age_plot.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(ml_age_plot)
dev.off()
#ICELAND ONLY
library(mar)
mar <- connect_mar()
spat_ind <-
tbl(mar,paste0("raw_index_calc_",19)) %>%
rename(year=ar) %>%
#filter((synaflokkur == 30 & tognumer %in% c(1:39, NA))|(synaflokkur == 35 & )) %>%
filter(!(year==2011&synaflokkur==35)) %>%
mutate(synaflokkur = ifelse(synaflokkur == 30, 'Spring survey','Autumn survey'),
GRIDCELL = as.character(GRIDCELL)) %>%
#filter(year>1992,year<tyr) %>%
collect(n=Inf)
all_30_map <-
spat_ind %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
#filter(lengd < 25) %>%
filter(N > 0) %>%
group_by(rect, year, synaflokkur) %>%
summarise(logN = log(mean(N, na.rm = T))) %>%
rename(Survey = synaflokkur) %>%
filter(Survey=='Spring survey') %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
ggplot() +
coord_quickmap(xlim = c(-28, -10),ylim = c(62, 68))+
geom_tile(aes(lon, lat, fill=logN)) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
theme_bw()+
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~year, ncol = 5)
l25_30_map <-
spat_ind %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
filter(lengd < 25) %>%
filter(N > 0) %>%
group_by(rect, year, synaflokkur) %>%
summarise(logN = log(mean(N, na.rm = T))) %>%
rename(Survey = synaflokkur) %>%
filter(Survey=='Spring survey') %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
ggplot() +
coord_quickmap(xlim = c(-28, -10),ylim = c(62, 68))+
geom_tile(aes(lon, lat, fill=logN)) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
theme_bw()+
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~year, ncol = 5)
l40_30_map <-
spat_ind %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
filter(lengd > 40) %>%
filter(N > 0) %>%
group_by(rect, year, synaflokkur) %>%
summarise(logN = log(mean(N, na.rm = T))) %>%
rename(Survey = synaflokkur) %>%
filter(Survey=='Spring survey') %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
ggplot() +
coord_quickmap(xlim = c(-28, -10),ylim = c(62, 68))+
geom_tile(aes(lon, lat, fill=logN)) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
theme_bw()+
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~year, ncol = 5)
png(paste0('R/biol_figs_output/all_30_map.png'), height = png_dims[2], width = png_dims[1])
print(all_30_map)
dev.off()
png(paste0('R/biol_figs_output/l25_30_map.png'), height = png_dims[2], width = png_dims[1])
print(l25_30_map)
dev.off()
png(paste0('R/biol_figs_output/l40_30_map.png'), height = png_dims[2], width = png_dims[1])
print(l40_30_map)
dev.off()
all_35_map <-
spat_ind %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
#filter(lengd < 25) %>%
filter(N > 0) %>%
group_by(rect, year, synaflokkur) %>%
summarise(logN = log(mean(N, na.rm = T))) %>%
rename(Survey = synaflokkur) %>%
filter(Survey=='Autumn survey') %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
ggplot() +
coord_quickmap(xlim = c(-35, -7),ylim = c(62, 68))+
geom_tile(aes(lon, lat, fill=logN)) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
theme_bw()+
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~year, ncol = 5)
l25_35_map <-
spat_ind %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
filter(lengd < 25) %>%
filter(N > 0, year > 1999, year!=2011) %>%
group_by(rect, year, synaflokkur) %>%
summarise(logN = log(mean(N, na.rm = T))) %>%
rename(Survey = synaflokkur) %>%
filter(Survey=='Autumn survey') %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
ggplot() +
coord_quickmap(xlim = c(-35, -7),ylim = c(62, 68))+
geom_tile(aes(lon, lat, fill=logN)) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
theme_bw()+
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~year, ncol = 4)
l40_35_map <-
spat_ind %>%
mutate(rect = mapplots::ices.rect2(lon, lat)) %>%
filter(lengd > 40) %>%
filter(N > 0, year > 1999, year!=2011) %>%
group_by(rect, year, synaflokkur) %>%
summarise(logN = log(mean(N, na.rm = T))) %>%
rename(Survey = synaflokkur) %>%
filter(Survey=='Autumn survey') %>%
bind_cols(mapplots::ices.rect(.$rect)) %>%
ggplot() +
coord_quickmap(xlim = c(-35, -7),ylim = c(62, 68))+
geom_tile(aes(lon, lat, fill=logN)) +
geom_polygon(data = geo::bisland, aes(lon, lat), fill = 'gray',col='black',lwd=0.1) +
theme_bw()+
scale_fill_viridis_c(direction = -1)+
xlab('Longitude (W)') +
ylab('Latitude (N)')+
facet_wrap(~year, ncol = 4)
png(paste0('R/biol_figs_output/all_35_map.png'), height = png_dims[2], width = png_dims[1])
print(all_35_map)
dev.off()
png(paste0('R/biol_figs_output/l25_35_map.png'), height = png_dims[2], width = png_dims[1])
print(l25_35_map)
dev.off()
png(paste0('R/biol_figs_output/l40_35_map.png'), height = png_dims[2], width = png_dims[1])
print(l40_35_map)
dev.off()
vb_pars_bystock_byyear_overtime <-
all %>%
left_join(all_st) %>%
filter(!is.na(age), !is.na(length_cm), !is.na(gender), length_cm > 0, !is.na(division)) %>%
mutate(age = age + (month-1)/12,
person_sh = person %>% substr(., 1, 3)) %>%
unite(st_gen, person_sh, gender, yr, remove = F) %>%
split(., .$st_gen) %>% #.[[19]]->x
purrr::map(function(x){
print(paste0(unique(x$division), '_', unique(x$gender)))
prL<-seq(0,120,1)
prA<-seq(0,60,1)
mod <- NULL; fit <- NULL
try(
{mod <- nls(log(length_cm)~log(Linf*(1-exp(-K*(age-t0)))), data=x, start=list(Linf=50, K=0.2, t0=-0.5))
fit <- exp(predict(mod, data.frame(age=prA),type="response"))},
silent = TRUE)
if(!is.null(mod)){
y <-
list(
mod = mod %>%
broom::tidy() %>%
mutate(person = unique(x$person),
gender = unique(x$gender),
yr = unique(x$yr)),
x = full_join(x, data.frame(age = prA,
fit,
person = unique(x$person),
gender = unique(x$gender),
yr = unique(x$yr))) %>% select(-c(st_gen, person_sh))
)
} else { y <- list (mod = NULL, x = x %>% select(-c(st_gen, person_sh)))}
return(y)
})
vb_plot_overtime_byyear_aru.27.5a14 <-
vb_pars_bystock_byyear_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(yr)) %>%
filter(!is.na(stock), yr > 2004) %>%
unite(age_year, age, yr, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock== 'aru.27.5a14') %>%
ggplot() +
geom_boxplot(aes(x = Age, y = `Length (cm)`, group = age_year, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = Age, y = fit, color = Year),
data = vb_pars_bystock_byyear_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male', gender)),
Year = as.factor(yr)) %>%
filter(!is.na(stock), !is.na(fit), yr > 2004) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock=='aru.27.5a14') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
vb_plot_overtime_byyear_aru.27.123a4 <-
vb_pars_bystock_byyear_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(yr)) %>%
filter(!is.na(stock), yr > 2004) %>%
unite(age_year, age, yr, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock== 'aru.27.123a4') %>%
ggplot() +
geom_boxplot(aes(x = Age, y = `Length (cm)`, group = age_year, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = Age, y = fit, color = Year),
data = vb_pars_bystock_byyear_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male', gender)),
Year = as.factor(yr)) %>%
filter(!is.na(stock), !is.na(fit), yr > 2004) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock=='aru.27.123a4') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
vb_plot_overtime_byyear_aru.27.5b6a <-
vb_pars_bystock_byyear_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(yr)) %>%
filter(!is.na(stock), yr > 2004) %>%
unite(age_year, age, yr, remove = FALSE) %>%
rename(`Length (cm)` = length_cm, Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock== 'aru.27.5b6a') %>%
ggplot() +
geom_boxplot(aes(x = Age, y = `Length (cm)`, group = age_year, color = Year, fill = Year), alpha = 0.2) +
geom_line(aes(x = Age, y = fit, color = Year),
data = vb_pars_bystock_byyear_overtime %>%
flatten() %>%
keep(., names(.)=="x") %>%
bind_rows() %>%
mutate(stock = ifelse(person=='Elvar Hallfredsson', 'aru.27.123a4',
ifelse(person=='Pamela J. Woods', 'aru.27.5a14',
ifelse(person=='Lise H. Ofstad', 'aru.27.5b6a', person))),
gender = ifelse(gender=='F', 'Female',
ifelse(gender=='M', 'Male',
ifelse(gender=='U', 'Unidentified', gender))),
Year = as.factor(yr)) %>%
filter(!is.na(stock), !is.na(fit), yr > 2004) %>%
unite(age_stock, age, stock, remove = FALSE) %>%
rename(Age = age, Stock = stock, Gender = gender) %>%
filter(Age < 30, Stock=='aru.27.5b6a') )+
theme_bw() +
scale_fill_viridis_d() +
scale_color_viridis_d() +
facet_wrap(~Gender, ncol = 1)
png(paste0('R/biol_figs_output/vb_plot_overtime_byyear_aru.27.123a4.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(vb_plot_overtime_byyear_aru.27.123a4)
dev.off()
png(paste0('R/biol_figs_output/vb_plot_overtime_byyear_aru.27.5a14.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(vb_plot_overtime_byyear_aru.27.5a14)
dev.off()
png(paste0('R/biol_figs_output/vb_plot_overtime_byyear_aru.27.5b6a.png'), height = png_dims[2]*0.75, width = png_dims[1]*0.75)
print(vb_plot_overtime_byyear_aru.27.5b6a)
dev.off()
#OTHER ZOOMED MAPS
png(paste0('R/biol_figs_output/l50_plot_0_faroes.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_0 +
coord_sf(xlim = c(-12, -2),ylim = c(60, 63.5))+
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= -8, y = 63.25, label = yr))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_300_faroes.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_300 +
coord_sf(xlim = c(-12, -2),ylim = c(60, 63.5))+
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= -8, y = 63.25, label = yr))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_500_faroes.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_500+
coord_sf(xlim = c(-12, -2),ylim = c(60, 63.5))+
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= -8, y = 63.25, label = yr))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_0_iceland.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_0 +
coord_sf(xlim = c(-31, -12),ylim = c(62, 68))+
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= -28, y = 67, label = yr))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_300_iceland.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_300 +
coord_sf(xlim = c(-31, -12),ylim = c(62, 68)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= -28, y = 67, label = yr))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_500_iceland.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_500+
coord_sf(xlim = c(-31, -12),ylim = c(62, 68)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= -28, y = 67, label = yr))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_0_norway_s.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_0 +
coord_sf(xlim = c(-2, 18),ylim = c(57, 66)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x=2, y = 65, label = yr))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_300_norway_s.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_300 +
coord_sf(xlim = c(-2, 18),ylim = c(57, 66)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 2, y = 65, label = yr))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_500_norway_s.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_500+
coord_sf(xlim = c(-2, 18),ylim = c(57, 66)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 8, y = 65, label = yr))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_0_norway_n.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_0 +
coord_sf(xlim = c(5, 31),ylim = c(66, 79)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 8, y = 78, label = yr)) +
theme(axis.text.x = element_text(angle = 90))+
theme(legend.position = c(0.9, 0.07))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_300_norway_n.png'), height = png_dims[1]*0.7, width = png_dims[1]*0.7)
print(l50_plot_300 +
coord_sf(xlim = c(5, 31),ylim = c(66, 79)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 8, y = 78, label = yr)) +
theme(axis.text.x = element_text(angle = 90))+
theme(legend.position = c(0.9, 0.07))
)
dev.off()
png(paste0('R/biol_figs_output/l50_plot_500_norway_n.png'), height = png_dims[1]*0.75, width = png_dims[1]*0.75)
print(l50_plot_500+
coord_sf(xlim = c(5, 31),ylim = c(66, 79)) +
geom_text(aes(x = x, y = y, label = label), data = tibble(yr = yr_min:yr_max, x= 8, y = 78, label = yr)) +
theme(axis.text.x = element_text(angle = 90))+
theme(legend.position = c(0.9, 0.07))
)
dev.off()
#Greenland indices
GrSI <-
Gre %>%
left_join(Gre_st) %>%
mutate(si = ifelse(length_cm < 25, 'si.10-25',
ifelse(length_cm >=25 & length_cm < 30, 'si.25-30',
ifelse(length_cm >= 30 & length_cm < 35, 'si.30-35',
ifelse(length_cm >= 35 & length_cm < 40, 'si.35-40',
ifelse(length_cm >= 40 & length_cm < 45, 'si.40-45',
ifelse(length_cm >= 45, 'si.45-50', length_cm))))))) %>%
group_by(year, si) %>%
count() %>%
mutate(`Numbers 000000s` = n/1000000) %>%
rename(Year = year) %>%
ggplot() +
geom_point(aes(x = Year, y = `Numbers 000000s`)) +
theme_bw() +
facet_wrap(~si, scales = 'free_y')
Grlength <-
Gre %>%
left_join(Gre_st) %>%
group_by(year, length_cm) %>%
count() %>%
rename(`Length (cm)` = length_cm, Year = year, Count = n) %>%
ggplot() +
geom_col(aes(x = `Length (cm)`, y = Count)) +
theme_bw() +
facet_wrap(~Year, scales = 'free_y')
png(paste0('R/biol_figs_output/GrSI.png'), height = png_dims[1]*0.5, width = png_dims[1]*0.75)
print(GrSI)
dev.off()
png(paste0('R/biol_figs_output/GrLength.png'), height = png_dims[1]*0.5, width = png_dims[1]*0.75)
print(Grlength)
dev.off()
|
d36104c35ecfe070d029ad3f1ebaee8061b1e48f
|
08e3d1c102f2449a67dcab76208e7358518dd4ba
|
/R/graphab_project_desc.R
|
8cd257f004975e14ffb54f2f9c5bbe2ebd1ddd22
|
[] |
no_license
|
cran/graph4lg
|
f410231b2bedea19c8bf8d66edeb273e490394db
|
5de8af31ae9011793e7aaa6bc77a9e4c8e199002
|
refs/heads/master
| 2023-02-06T17:16:40.474741
| 2023-01-30T13:00:05
| 2023-01-30T13:00:05
| 198,413,055
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,215
|
r
|
graphab_project_desc.R
|
#' Describe the objects of a Graphab project
#'
#' @description The function describes the objects of a Graphab project
#'
#' @inheritParams get_graphab_linkset
#' @param mode A character string indicating the objects of the project that
#' are described. It must be either:\itemize{
#' \item{\code{mode='patches'}(default): The habitat patches are described
#' with synthetic descriptors (code, number, mean capacity, median capacity,
#' capacity harmonic mean, capacity Gini coefficient) and a histogram of
#' capacity distribution.}
#' \item{\code{mode='linkset'}: The links of a link set are described
#' with synthetic descriptors (codes, costs, number, mean cost distance,
#' median cost distance, cost distance harmonic mean, cost distance Gini
#' coefficient) and a histogram of cost distance distribution.}
#' \item{\code{mode='both'}: Both the patches and links of a linkset are
#' described}
#' }
#' @param fig Logical (default = FALSE) indicating whether to plot a figure of
#' the resulting spatial graph. The figure is plotted using function
#' \code{\link{plot_graph_lg}}. The plotting can be long if the graph has many
#' nodes and links.
#' @param return_val Logical (default = TRUE) indicating whether the project
#' features are returned as a list (TRUE) or only displayed in the
#' R console (FALSE).
#' @import ggplot2
#' @export
#' @author P. Savary
#' @examples
#' \dontrun{
#' graphab_project_desc(proj_name = "grphb_ex",
#' mode = "patches",
#' fig = FALSE)
#' }
graphab_project_desc <- function(proj_name,
mode = "patches",
linkset = NULL,
proj_path = NULL,
fig = FALSE,
return_val = TRUE){
#########################################
# Check for project directory path
if(!is.null(proj_path)){
if(!dir.exists(proj_path)){
stop(paste0(proj_path, " is not an existing directory or the path is ",
"incorrectly specified."))
} else {
proj_path <- normalizePath(proj_path)
}
} else {
proj_path <- normalizePath(getwd())
}
#########################################
# Check for proj_name class
if(!inherits(proj_name, "character")){
stop("'proj_name' must be a character string")
} else if (!(paste0(proj_name, ".xml") %in%
list.files(path = paste0(proj_path, "/", proj_name)))){
stop("The project you refer to does not exist.
Please use graphab_project() before.")
}
proj_end_path <- paste0(proj_path, "/", proj_name, "/", proj_name, ".xml")
##########################################
# Check for mode
if(!inherits(mode, "character")){
stop("'mode' must be a character string")
} else if(!(mode %in% c("patches", "linkset", "both"))){
stop("'mode' must be equal to either 'patches', 'linkset' or 'both'.")
}
##########################################
# Check for return_val
if(!inherits(return_val, "logical")){
stop("'return_val' must be a logical")
}
#####
# Describe patches
if(mode %in% c("patches", "both")){
# Get all codes
all_codes <- get_graphab_raster_codes(proj_name = proj_name,
mode = 'all',
proj_path = proj_path)
# Get habitat code
hab_code <- get_graphab_raster_codes(proj_name = proj_name,
mode = 'habitat',
proj_path = proj_path)
# Get patches information
patches <- get_graphab_metric(proj_name = proj_name,
proj_path = proj_path)
all_codes_p <- paste0("Raster source layer codes: ",
paste(all_codes, collapse = ", "))
hab_codes_p <- paste0("Habitat patch codes: ",
paste(hab_code, collapse = ", "))
nb_p <- paste0("Number of patches: ", nrow(patches))
cap <- "Patch capacities:"
total_cap <- paste0(" Total: ", sum(patches$Capacity))
min_cap <- paste0(" Minimum: ", min(patches$Capacity))
max_cap <- paste0(" Maximum: ", max(patches$Capacity))
mean_cap <- paste0(" Mean: ", mean(patches$Capacity))
median_cap <- paste0(" Median: ", stats::median(patches$Capacity))
sd_cap <- paste0(" Standard deviation: ", stats::sd(patches$Capacity))
hm_cap <- paste0(" Harmonic mean: ", harm_mean(patches$Capacity))
gini_cap <- paste0(" Gini index: ", gini_coeff(patches$Capacity))
cat(c(all_codes_p, "\n",
hab_codes_p, "\n",
nb_p, "\n",
cap, "\n",
total_cap, "\n",
min_cap, "\n",
max_cap, "\n",
mean_cap, "\n",
median_cap, "\n",
sd_cap, "\n",
hm_cap, "\n",
gini_cap, "\n"),
"\n", sep = "")
if(return_val){
res_p <- list(all_codes, hab_code,
nrow(patches), sum(patches$Capacity),
min(patches$Capacity), max(patches$Capacity),
mean(patches$Capacity), stats::median(patches$Capacity),
stats::sd(patches$Capacity), harm_mean(patches$Capacity),
gini_coeff(patches$Capacity))
names(res_p) <- c("Raster source layer codes",
"Habitat patch codes",
"Number of patches",
"Total patch capacities",
"Min. patch capacity",
"Max. patch capacity",
"Mean patch capacity",
"Median patch capacity",
"Std. deviation patch capacity",
"Harmonic mean patch capacity",
"Gini coeff. patch capacity")
}
if(fig){
range <- max(patches$Capacity) - min(patches$Capacity)
if(range > 0){
b_w <- range/80
fig_patch <- ggplot(data = patches,
aes(x = .data$Capacity)) +
geom_histogram(binwidth = b_w,
fill = "#396D35",
color = "#776F62", size = .2) +
labs(x = "Patch capacities",
y = "Frequencies")
print(fig_patch)
} else {
message(paste0("The range of patch capacities is equal to 0. ",
"Plotting an histogram does not make sense."))
fig_patch <- NULL
}
} else {
fig_patch <- NULL
}
}
if(mode %in% c("linkset", "both")){
# Get linkset
linkset_desc <- get_graphab_linkset_cost(proj_name = proj_name,
linkset = linkset,
proj_path = proj_path)
# Get linkset values
all_links <- get_graphab_linkset(proj_name = proj_name,
linkset = linkset,
proj_path = proj_path)
all_codes_l <- paste0("Raster source layer codes considered for the costs: ",
paste(linkset_desc$code, collapse = ", "))
all_cost_l <- paste0("Corresponding costs: ",
paste(linkset_desc$cost, collapse = ", "))
nb_l <- paste0("Number of links: ", nrow(all_links))
cd <- "Cost distances:"
min_cd <- paste0(" Minimum: ", min(all_links$Dist))
max_cd <- paste0(" Maximum: ", max(all_links$Dist))
mean_cd <- paste0(" Mean: ", mean(all_links$Dist))
median_cd <- paste0(" Median: ", stats::median(all_links$Dist))
sd_cd <- paste0(" Standard deviation: ", stats::sd(all_links$Dist))
hm_cd <- paste0(" Harmonic mean: ", harm_mean(all_links$Dist))
gini_cd <- paste0(" Gini index: ", gini_coeff(all_links$Dist))
cat(c(all_codes_l, "\n",
all_cost_l, "\n",
cd, "\n",
nb_l, "\n",
min_cd, "\n",
max_cd, "\n",
mean_cd, "\n",
median_cd, "\n",
sd_cd, "\n",
hm_cd, "\n",
gini_cd, "\n"),
"\n", sep = "")
if(return_val){
res_l <- list(linkset_desc$code, linkset_desc$cost,
nrow(all_links),
min(all_links$Dist), max(all_links$Dist),
mean(all_links$Dist), stats::median(all_links$Dist),
stats::sd(all_links$Dist), harm_mean(all_links$Dist),
gini_coeff(all_links$Dist))
names(res_l) <- c("Raster source layer codes considered for the costs",
"Corresponding costs",
"Number of links",
"Min. cost distance",
"Max. cost distance",
"Mean cost distance",
"Median cost distance",
"Std. deviation cost distance",
"Harmonic mean cost distance",
"Gini coeff. cost distance")
}
if(fig){
range <- max(all_links$Dist) - min(all_links$Dist)
if(range > 0){
b_w <- range/80
fig_cd <- ggplot(data = all_links,
aes(x = .data$Dist)) +
geom_histogram(binwidth = b_w,
fill = "#396D35",
color = "#776F62", size = .2) +
labs(x = "Cost distances",
y = "Frequencies")
print(fig_cd)
} else {
message(paste0("The range of cost distances is equal to 0. ",
"Plotting an histogram does not make sense."))
fig_cd <- NULL
}
} else {
fig_cd <- NULL
}
}
if(return_val){
if(mode == "both"){
res <- list(res_p, res_l)
} else if(mode == "patches"){
res <- res_p
} else if(mode == "linkset"){
res <- res_l
}
return(res)
}
}
|
3360e27744c3b6b38fefd4e2cb65a4129a06cc58
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/selfingTree/examples/getTargets.Rd.R
|
f1805d3427e66b73b33e60ba5cad1901f69bda59
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 198
|
r
|
getTargets.Rd.R
|
library(selfingTree)
### Name: getTargets
### Title: Obtain all possible genotypes that match a certain target
### configuration
### Aliases: getTargets
### ** Examples
getTargets("AHB")
|
09e2fd1d73126b5522ca0c76d59c3852644050c1
|
e858606ccacb9a78bfb48ca90b56d9469cff7a09
|
/RImageBook/man/crop.Rd
|
307a343734f7faccd1446a03154075acc6c00dee
|
[] |
no_license
|
tkatsuki/rimagebook
|
51f41166e98d442f7b9e2226b65046586f95dfc8
|
d26a1502faf39804bf8cb06d1699de24e6d53d58
|
refs/heads/master
| 2021-01-19T17:59:07.539596
| 2015-06-29T21:12:57
| 2015-06-29T21:12:57
| 38,264,836
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 907
|
rd
|
crop.Rd
|
\name{cropImage}
\alias{cropImage}
\alias{cropROI}
\concept{image cropping}
\title{Cropping image}
\description{
These functions interactively crop a part of an image displayed in the plot area
using the mouse pointer.
\code{cropImage} crops a rectangular area specified by a set of opposite vertices.
\code{cropROI} crops a polygonal area specified by any desired number of vertices.
}
\usage{
cropImage(img)
cropROI(img)
}
\arguments{
\item{img}{An image of \code{imagedata} class.}
}
\value{
Returns a cropped image in \code{imagedata} class.
}
\examples{
violet <- readTiff(system.file("samples/violet.tif", package="biOps"))
violetroi <- cropROI(violet)
violetcr <- cropImage(violet)
plot(violetroi)
plot(violetcr)
}
\author{
Yuichiro Hourai, \email{yhourai@gmail.com}
Takeo Katsuki, \email{takeo.katsuki@gmail.com}
}
|
40d966053687e818ce3e32ca3c4b400a7b48acbe
|
ee99cdad0ef6bc53d1e876e1ec87137973f9169c
|
/reproducibility/Fig 3/Fig 3 - VIM2_Solubility.R
|
e79eff9d1b4bb1684d995645abf9df09aa84722f
|
[
"Apache-2.0"
] |
permissive
|
lucidbio/crypticvariation
|
f0530953957c00b290c4e1e9dce8cab7c86dc424
|
e4a4198bfbec55edc230830a31ee33eb9fa21f7d
|
refs/heads/master
| 2022-11-17T06:10:24.092320
| 2020-03-02T11:30:59
| 2020-03-02T11:30:59
| 186,026,848
| 1
| 0
|
Apache-2.0
| 2022-11-08T03:16:26
| 2019-05-10T17:19:58
|
JavaScript
|
UTF-8
|
R
| false
| false
| 2,006
|
r
|
Fig 3 - VIM2_Solubility.R
|
# Code to reliably reproduce Fig 3
library(plotly)
directory = 'D:/eLife Ambassador/Readability Initiative/01 - Cryptic Genetic Variation/Supplementary Data from paper/Plots/Fig 3'
setwd(directory)
filename = 'Fig 3 - VIM2.csv'
basename = unlist(strsplit(filename, '.', fixed=TRUE))[1]
data = read.delim(filename, header=TRUE, sep=',')
# Plot a line plot for each ortholog's kcat/KM and Solubility, as a function of the rounds of evolution.
font <- list(family = "Arial", size = 16)
x = list(title="Evolution Rounds", titlefont = font, ticks="outside", tickvals = 1:11, ticktext = as.character(data$Variant))
y1 = list(title="Fold change in fitness (Variant/WT)", titlefont=font, ticks="outside")
# color_palette = as.character(unique(data$Color))
y2 = list(title="kcat/KM (M-1 s-1)", type = "log", range = c(-1, 3), titlefont=font, ticks="outside", overlaying = "y", side="right")
p = plot_ly(data=data) %>%
add_trace(x= ~Order,
y = ~Fitness,
type='scatter',
mode = 'lines+markers',
marker = list(size=7),
hoverinfo = 'text',
hovertext = ~paste('<b>Ortholog: </b>', Ortholog,
'<b><br> Round: </b>', Variant,
'<b><br> Fold Change Fitness: </b>', Fitness,
'<b><br> kcat/kM: </b>', kcat)) %>%
add_trace(x= ~Order,
y = ~Solubility,
type='scatter',
mode = 'lines+markers',
marker = list(size=7),
hoverinfo = 'text',
hovertext = ~paste('<b>Ortholog: </b>', Ortholog,
'<b><br> Round: </b>', Variant,
'<b><br> Fold Change Fitness: </b>', Fitness,
'<b><br> kcat/kM: </b>', kcat),
yaxis = "y2") %>%
layout(p, xaxis=x, yaxis=y1, yaxis2=y2, showlegend = FALSE)
p
plot_json = plotly_json(p, jsonedit = FALSE)
write(plot_json, paste(basename, '.json', sep=""))
|
fecfdd067b596251f1c49f8c194a0dbfcb88e046
|
04a7e4899d9aac6d1dbb0c37a4c45e5edb4f1612
|
/man/extract_rmd_table_label.Rd
|
b7c629839bf1f2d18ea556da0850a33723be3021
|
[
"MIT"
] |
permissive
|
pbs-assess/csasdown
|
796ac3b6d30396a10ba482dfd67ec157d7deadba
|
85cc4dda03d6513c11350f7f607cce1cacb6bf6a
|
refs/heads/main
| 2023-08-16T17:22:18.050497
| 2023-08-16T00:35:31
| 2023-08-16T00:35:31
| 136,674,837
| 47
| 18
|
NOASSERTION
| 2023-06-20T01:45:07
| 2018-06-08T23:31:16
|
R
|
UTF-8
|
R
| false
| true
| 1,011
|
rd
|
extract_rmd_table_label.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_rmd_table_label.R
\name{extract_rmd_table_label}
\alias{extract_rmd_table_label}
\title{Search chunk to find an Rmarkdown table label}
\usage{
extract_rmd_table_label(chunk)
}
\arguments{
\item{chunk}{A vector of character strings representing lines for RMD code}
}
\value{
A list of length two. The elements are:
\enumerate{
\item A vector representing the lines of the label chunk or \code{NULL} if none found
\item A vector representing the remainder of the chunk after the label or
\code{NULL} if the label reached to the end of the chunk
}
}
\description{
Search chunk to find an Rmarkdown table label. There are two possible
starting lines to the table caption
}
\details{
The two caption possibilities are of the forms:
\enumerate{
\item Table: Caption text
Optional text..
More optional text..
...
\item Table: -Any amount of whitespace-
Caption text..
Optional text..
More optional text..
...
}
}
\keyword{internal}
|
3c2a883cc6340560455ad12ea68d78fdf6953b89
|
7f107bd4be987667feb046548868ed88d06cdb88
|
/contributed/Nonclinical/R/toxSummary/app.R
|
c69eacd557fd83b240577ac36548372321c7d8a7
|
[
"MIT"
] |
permissive
|
phuse-org/phuse-scripts
|
d24d748de6cbdfd98af8be8d912749bb04811570
|
11b416b7b379f6094caccbc1f9069713033500c4
|
refs/heads/master
| 2023-08-08T17:41:11.238199
| 2023-08-01T15:21:20
| 2023-08-01T15:21:20
| 32,612,654
| 107
| 98
|
MIT
| 2023-08-01T15:02:06
| 2015-03-20T23:42:41
|
SAS
|
UTF-8
|
R
| false
| false
| 83,606
|
r
|
app.R
|
# libraries
library(shiny)
library(ggplot2)
library(stringr)
library(htmltools)
library(shinydashboard)
library(shinycssloaders)
library(tidyverse)
library(RColorBrewer)
library(DT)
library(plotly)
library(officer)
library(flextable)
library(ggiraph)
library(patchwork)
#library(ggstance)
#library(ggrepel)
library(shinyjs)
# Bugs ####
# notes from 07/13/2020
# nonclinical
#### group findings, rearranging like study option and put right side of Study # done
##### fix finding plot so that dose text readable when there are lot more findings --
##### (text size 4, when more than 6 findings, else textsize 6)
##### add autocompletion for adding findings # done
# make a list for possible findings and provide that list as choices in findings # yousuf
# warning message for save study while required field empty
# save automatically for study
##### double save button not working properly for savestudy # fixed
# male/female (sex) severity filtered in plot
#clinical
# fix the issue that two start dose appeared
# dosing units
#table
# check filter option for numeric column (only slider option available)
# table 2 does not show table sometimes (only shows NOAEL and no absent severity)
# export any appication (whole dataset in rds)
# Notes from 6/29: ################################
# Data Selection:
#### - Change Enter Tox Program to Enter Application Number # done
# - Automatically open new application after entering it rather than having user select from list
# Clinical Data:
# - Set default to check Start Dose and MRHD
# - Fix that need to enter both a Start Dose and MRHD
#### pop up delete button to confirm delete # added by yousuf
#### - Add solid-lines above Start Dose/MRHD/Custom Dose ## Done
# - Wait for feedback on everything above Start Dose Information: in Clinical Data
# Nonclinical Data:
#### - Move study name below Species and Duration ## Done
#### - Add a save button at bottom of Nonclincial Data
#### - Add dashed-lines above Dose 2/3/etc., and above Findings 2/3/etc. ## Done # dashed line above 1/2/3
#### - Move NOAEL checkbox below Cmax and AUC # done
#### - Add solid-lines above number of Dose levels and above number of findings # done
# - Add asterisk next to Dose 1/2/3/etc. ???
#### - Fix typo in "Partially Revesible" # done
# Main Panel:
# - Generate informative error message if safety margin calculation method of Cmax or
# AUC is selected but no Cmax or AUC clinical (or nonclinical) data has been provided.
# - Wait for feedback on table names
# General Notes:
#### - Fix numericInputs to not take negative values for dose and Cmax and AUC # done, what should be the minimum number? 0?
# - Figure out how to handle data entry in the context of updates to the application'
# - Explore User-based updates
###################################################
# Project Improvement Ideas:
# - Add legend to figure that lists dose compared and PK/HED option
# - Allow user to create display names of findings with legend at bottom
# - Add option to display margin on top of figure
# - Make an optional figure legend (with checkbox)
# - Color "errorbar" to indicate severity (white for no toxicity at dose)
# Color by the lowest dose on the ladder and switch color half-way between dose edges if space allows
# on the UI bar side, change checkboxes to selectInputs to indicate dose severity
# - For table export, generate the three tables from the smart template in Word format
# - Add footnotes tied to findings (numbered) as well as a general footnote
# - Start with Smart Template as default table layout
# - Allow table to be flexibly modified
# - Brackets for findings
# - Text wrap finding names so that they don't overlap and use bullets to denote findings
# - Stagger doses (down -> up) so they don't overlap when close
# - use error bar to combine findings across doses
## added by Yousuf
# apply roundSigigs funciton to plotData_p$SM
# remove findings string from hovertext in findings figure
### need to add or change in 3rd table of template
# correct the HED calculation
# add starting Dose and MHRD
# add
#
'%ni%' <- Negate('%in%')
# # Save configuration of blankData.rds below for later: ####
Data <- list(
CmaxUnit = 'ng/mL',
AUCUnit = 'ng*h/mL',
'Clinical Information'= list(
HumanWeight = 60,
MgKg = F,
'Start Dose' = list(
StartDose = NULL,
StartDoseMgKg = NULL,
StartDoseCmax = NULL,
StartDoseAUC = NULL
),
'MRHD' = list(
MRHD = NULL,
MRHDMgKg = NULL,
MRHDCmax = NULL,
MRHDAUC = NULL
),
'Custom Dose' = list(
CustomDose = NULL,
CustomDoseMgKg = NULL,
CustomDoseCmax = NULL,
CustomDoseAUC = NULL
)
),
'Nonclinical Information' = list(
'New Study' = list(
Species = NULL,
Duration = '',
Notes = NULL,
check_note = F,
nDoses = 1,
Doses = list(Dose1=list(
Dose = '',
NOAEL = F,
Cmax = '',
AUC = ''
)),
nFindings=1,
Findings = list(Finding1=list(
Finding = '',
Reversibility = '[Rev]',
Severity = list(
Dose1='Absent')
))
)
)
)
saveRDS(Data,'blankData.rds')
####
addUIDep <- function(x) {
jqueryUIDep <- htmlDependency("jqueryui", "1.10.4", c(href="shared/jqueryui/1.10.4"),
script = "jquery-ui.min.js",
stylesheet = "jquery-ui.min.css")
attachDependencies(x, c(htmlDependencies(x), list(jqueryUIDep)))
}
######
values <- reactiveValues()
values$Application <- NULL
values$SM <- NULL
values$selectData <- NULL
values$tmpData <- NULL
values$changeStudyFlag <- F
values$Findings <- ''
# Species Conversion ----
speciesConversion <- c(6.2,1.8,3.1,3.1
,12.3,1.1,4.6,7.4)
names(speciesConversion) <- c('Rat','Dog','Monkey','Rabbit',
'Mouse', 'Mini-pig', 'Guinea pig', 'Hamster')
##
clinDosingOptions <- c('Start Dose','MRHD','Custom Dose')
## significant figure
sigfigs <- function(x){
orig_scipen <- getOption("scipen")
options(scipen = 999)
on.exit(options(scipen = orig_scipen))
x <- as.character(x)
x <- sub("\\.", "", x)
x <- gsub("(^0+|0+$)", "", x)
nchar(x)
}
roundSigfigs <- function(x,N=2) {
if (is.na(x)) {
return(x)} else {
roundNumber <- round(x,digits=0)
if (sigfigs(roundNumber)<=N) {
roundNumber <- signif(x,digits=N)
}
return(roundNumber)
}
}
# <<<<<<< HEAD
# create Applications folder if it does not exist
dir.create('Applications',showWarnings = F)
# create ramdom number which will add with folder name
directory_number <- ceiling(runif(1, min = 1, max = 10000))
if (paste0('folder_',directory_number) %in% list.files('Applications')) {
directory_number <- ceiling(runif(1, min = 1, max = 10000))
if (paste0('folder_',directory_number) %in% list.files('Applications')) {
directory_number <- ceiling(runif(1, min = 1, max = 10000))
if (paste0('folder_',directory_number) %in% list.files('Applications')) {
directory_number <- ceiling(runif(1, min = 1, max = 10000))
if (paste0('folder_',directory_number) %in% list.files('Applications')) {
directory_number <- ceiling(runif(1, min = 1, max = 10000))
}
}
}
}
# get all the file path for Application_Demo.rds
files_rds <- list.files('Applications', pattern = "Application_Demo.rds", recursive = T, full.names = T)
# get current data time
current <- Sys.time()
for ( i in seq(files_rds)) {
file_dir <- files_rds[[i]]
last_mod_time <- file.mtime(file_dir)
differece_days <- ceiling(difftime(current, last_mod_time, units = 'days'))
if (differece_days >= 3 ) {
dir_delet <- dirname(file_dir)
unlink(dir_delet, recursive = T)
}
}
# =======
# function for using whether there are any value that is not NULL
# fundctin will return sum of all clinical doses
clin_data <- function(Data_rds) {
Data <- Data_rds[["Clinical Information"]]
dose_list <- list(
start_dose = Data[["Start Dose"]][["StartDose"]],
mrhd = Data[["MRHD"]][["MRHD"]],
custom = Data[["Custom Dose"]][["CustomDose"]],
start_dose_kg = Data[["Start Dose"]][["StartDoseMgKg"]],
mrhd_kg = Data[["MRHD"]][["MRHDMgKg"]],
custom_kg = Data[["Custom Dose"]][["CustomDoseMgKg"]]
)
dose_value <- sum(unlist(dose_list))
dose_value
}
# >>>>>>> master
# Server function started here (selectData) ----
server <- function(input,output,session) {
# user folder ----
user <- reactive({
# <<<<<<< HEAD
# url_search <- session$clientData$url_search
# username <- unlist(strsplit(url_search,'user='))[2]
# username <- str_to_lower(username)
username <- paste0('folder_', directory_number)
# =======
# url_search <- session$clientData$url_search
# username <- unlist(strsplit(url_search,'user='))[2]
# username <- str_to_lower(username)
# #username <- "md.ali@fda.hhs.gov"
# >>>>>>> master
username <- paste0("Applications/", username)
return(username)
})
# create folder and copy Aplication_Demo.rds file that folder
observeEvent(user(), {
dir_list <- list.dirs("Applications", full.names = F, recursive = F)
if (!basename(user()) %in% dir_list) {
dir.create(user())
file.copy("Application_Demo.rds", user())
}
})
###
output$selectData <- renderUI({
datasets <- c('blankData.rds',grep('.rds',list.files(user(),full.names = T),value=T))
names(datasets) <- basename(unlist(strsplit(datasets,'.rds')))
names(datasets)[which(datasets=='blankData.rds')] <- 'New Program'
if (is.null(values$selectData)) {
selectInput('selectData','Select Develpment Program:',datasets,selected='blankData.rds')
} else {
selectInput('selectData','Select Develpment Program:',datasets,selected=values$selectData)
}
})
### Study Name ----
output$studyName <- renderUI({
req(input$selectData)
if (input$selectData!='blankData.rds') {
HTML(paste(
p(HTML(paste0('<h4> <u>Selected Program:</u></h4>
<h4 style= "color:skyblue"> ',
(basename(unlist(strsplit(input$selectData,'.rds')))),'</h4>')
))
))
}
})
# getData ------
getData <- reactive({
input$refreshPlot
req(input$selectData)
input$selectStudy
Data <- readRDS(input$selectData)
})
observe({
req(input$selectData)
if (input$selectData == 'blankData.rds') {
values$Application <- paste0(user(), "/",input$newApplication,'.rds')
} else {
values$Application <- input$selectData
}
})
#
observeEvent(input$saveData,{
Data <- getData()
saveRDS(Data,values$Application)
datasets <- c('blankData.rds',grep('.rds',list.files(user(),full.names = T),value=T))
names(datasets) <- basename(unlist(strsplit(datasets,'.rds')))
names(datasets)[which(datasets=='blankData.rds')] <- 'New Program'
selectInput('selectData','Select Develpment Program:',datasets)
updateSelectInput(session,'selectData',choices=datasets,selected=values$Application)
})
# delete application ----
observeEvent(input$deleteData, {
showModal(modalDialog(
title="Delete Program?",
footer = tagList(modalButton("Cancel"),
actionButton("confirmDelete", "Delete")
)
))
})
# COnfirm delete application ----
observeEvent(input$confirmDelete, {
file.remove(values$Application)
datasets <- c('blankData.rds',grep('.rds',list.files(user(),full.names = T),value=T))
names(datasets) <- basename(unlist(strsplit(datasets,'.rds')))
names(datasets)[which(datasets=='blankData.rds')] <- 'New Program'
selectInput('selectData','Select Develpment Program:',datasets)
updateSelectInput(session,'selectData',choices=datasets,selected='blankData.rds')
removeModal()
})
# select study ----
output$selectStudy <- renderUI({
req(input$selectData)
input$selectData
isolate(Data <- getData())
studyList <- names(Data[['Nonclinical Information']])
selectInput('selectStudy','Select Study:',choices=studyList)
})
############## Auto-Save Dose ######################
# read data from disk into values$tmpData upon study selection
observeEvent(input$selectStudy,ignoreNULL=T,{
values$changeStudyFlag <- F
Data <- getData()
values$tmpData <- Data[['Nonclinical Information']][[input$selectStudy]]
if (input$selectStudy=='New Study') {
blankData <- readRDS('blankData.rds')
values$tmpData <- blankData[['Nonclinical Information']][[input$selectStudy]]
#values$tmpData <- Data[['Nonclinical Information']][['New Study']]
}
})
# Flip changeStudyFlag after "New Study" has loaded
observe({
if (is.null(input$dose1)) {
values$changeStudyFlag <- T
} else if (is.na(input$dose1)) {
values$changeStudyFlag <- T
}
})
# Flip changeStudyFlag after study has loaded and update tmpData to match UI
observe({
req(input$nDoses)
req(input$dose1)
req(input$nFindings)
req(input[[paste0('Severity',input$nFindings,'_',input$nDoses)]])
if (!is.na(input$dose1)) {
# <<<<<<< HEAD
# print(values$tmpData$Findings$Finding1$Severity[[paste0('Dose',values$tmpData$nDoses)]])
# print(input$nFindings)
# print(input$nDoses)
# print(input[[paste0('Severity',input$nFindings,'_',input$nDoses)]])
if ((values$tmpData$Doses$Dose1$Dose == input$dose1)&(values$tmpData$nDoses == input$nDoses)&
(values$tmpData$Findings$Finding1$Finding == input$Finding1)&(values$tmpData$nFindings == input$nFindings)&
(values$tmpData$Findings$Finding1$Severity[[paste0('Dose',values$tmpData$nDoses)]] == input[[paste0('Severity',input$nFindings,'_',input$nDoses)]])) {
# print(input$nDoses)
# print(values$changeStudyFlag)
values$changeStudyFlag <- T
# print(values$changeStudyFlag)
# =======
# if ((values$tmpData$Doses$Dose1$Dose == input$dose1)&(values$tmpData$nDoses == input$nDoses)&
# (values$tmpData$Findings$Finding1$Finding == input$Finding1)&(values$tmpData$nFindings == input$nFindings)&
# (values$tmpData$Findings$Finding1$Severity[[paste0('Dose',values$tmpData$nDoses)]] == input[[paste0('Severity',input$nFindings,'_',input$nDoses)]])) {
# values$changeStudyFlag <- T
# >>>>>>> master
}
}
if (values$changeStudyFlag==T) {
for (i in seq(input$nDoses)) {
if (!is.null(input[[paste0('dose',i)]])) {
newList <- list(
Dose = input[[paste0('dose',i)]],
NOAEL = input[[paste0('NOAEL',i)]],
Cmax = input[[paste0('Cmax',i)]],
AUC = input[[paste0('AUC',i)]]
)
values$tmpData[['Doses']][[paste0('Dose',i)]] <- newList
}
}
}
})
# Add findings to the list
observeEvent(input$selectData,ignoreNULL = T,{
Data <- getData()
for (Study in names(Data[['Nonclinical Information']])) {
if (Study != "New Study") {
studyData <- Data[['Nonclinical Information']][[Study]]
for ( i in seq(studyData$nFindings)) {
Finding <- studyData[['Findings']][[paste0('Finding', i)]][['Finding']]
if (Finding %ni% values$Findings) {
values$Findings <- c(values$Findings, Finding)
}
}
}
}
})
########### Auto-save findings ###############
observe({
req(input$nFindings)
req(input$Finding1)
if (values$changeStudyFlag==T) {
# print(paste('input$nDoses:',input$nDoses))
# print(input$nFindings)
# print(values$tmpData$Findings[[paste0('Finding',input$nFindings)]])
for (i in seq(input$nFindings)) {
if (!is.null(input[[paste0('Finding',i)]])) {
Finding_list= input[[paste0('Finding',i)]]
if (Finding_list %ni% values$Findings) {
values$Findings <- c(values$Findings, Finding_list)
}
# print(values$Findings)
newList <- list(
Finding= input[[paste0('Finding',i)]],
Reversibility = input[[paste0('Reversibility',i)]])
sev_list <- list()
# print(seq(input$nDoses))
for (j in seq(input$nDoses)) {
finding_seq <- input[[paste0('Severity', i, '_', j)]]
if (!is.null(finding_seq)) {
names(finding_seq) <- paste0("Dose", j)
}
sev_list <- c(sev_list, finding_seq)
}
newList <- c(newList, list(Severity= sev_list))
# print(newList)
#print(str(newList))
values$tmpData[['Findings']][[paste0('Finding',i)]] <- newList
}
}
}
})
# Clinical information -----
observeEvent(input$selectData,ignoreNULL = T,{
Data <- getData()
#update units for Cmax/AUC
updateTextInput(session, "cmax_unit", value=Data[["CmaxUnit"]])
updateTextInput(session, "auc_unit", value=Data[["AUCUnit"]])
# update clinical information
clinData <- Data[['Clinical Information']]
if (clinData$MgKg==F) {
updateNumericInput(session,'HumanWeight',value = clinData$HumanWeight)
} else { updateCheckboxInput(session, "MgKg", value = T)}
clinDosing <- NULL
for (dose in clinDosingOptions) {
clin_dose <- clinData[[dose]][[gsub(' ','',dose)]]
clin_dose_mgkg <- clinData[[dose]][[paste0(gsub(' ','',dose), 'MgKg')]]
if ((!is.null(clin_dose)) | (!is.null(clin_dose_mgkg))) {
clinDosing <- c(clinDosing,dose)
}
}
updateCheckboxGroupInput(session,'clinDosing',selected=clinDosing)
for (dose in clinDosing) {
doseName <- gsub(' ','',dose)
if (clinData$MgKg==F) {
updateNumericInput(session,doseName,value = clinData[[dose]][[doseName]])
} else {
updateNumericInput(session,paste0(doseName,'MgKg'),value = clinData[[dose]][[paste0(doseName,'MgKg')]])
}
updateNumericInput(session,paste0(doseName,'Cmax'),value = clinData[[dose]][[paste0(doseName,'Cmax')]])
updateNumericInput(session,paste0(doseName,'AUC'),value = clinData[[dose]][[paste0(doseName,'AUC')]])
}
})
# Nonclinical data update ------
observeEvent(input$selectStudy,ignoreNULL = T,{
Data <- getData()
studyData <- Data[['Nonclinical Information']][[input$selectStudy]]
updateSelectInput(session,'Species',selected=studyData$Species)
updateTextInput(session,'Duration',value=studyData$Duration)
updateNumericInput(session,'nDoses',value=studyData$nDoses)
updateNumericInput(session,'nFindings',value=studyData$nFindings)
updateCheckboxInput(session, "notes", value = studyData$check_note)
})
# first save study button ----
observeEvent(eventExpr = input$saveStudy, {
doseList <- as.list(seq(input$nDoses))
names(doseList) <- paste0('Dose',seq(input$nDoses))
for (i in seq(input$nDoses)) {
doseList[[i]] <- list(Dose=input[[paste0('dose',i)]],
NOAEL = input[[paste0('NOAEL',i)]],
Cmax = input[[paste0('Cmax',i)]],
AUC = input[[paste0('AUC',i)]]
)
}
findingList <- as.list(seq(input$nFindings))
names(findingList) <- paste0('Finding',seq(input$nFindings))
if (input$nFindings > 0) {
for (i in seq(input$nFindings)) {
severity <- list()
for (j in seq(input$nDoses)) {
severity[[paste0("Dose", j)]] <- input[[paste0("Severity", i, "_", j)]]
}
if ((is.null(input[[paste0('Finding',i)]])) | (input[[paste0('Finding',i)]]=='')) {
finding_null <- "No Finding"
} else {
finding_null <- input[[paste0('Finding',i)]]
}
findingList[[i]] <- list(Finding=finding_null,
Reversibility = input[[paste0('Reversibility',i)]],
# FindingDoses = input[[paste0('FindingDoses',i)]],
Severity = severity
)
}
} else {
findingList[[1]] <- NULL
}
Data <- getData()
studyName <- paste(input$Species,input$Duration,sep=': ')
Data[['Nonclinical Information']][[studyName]] <- list(
Species = input$Species,
Duration = input$Duration,
Notes = input$note_text,
check_note = input$notes,
nDoses = input$nDoses,
Doses = doseList,
nFindings = input$nFindings,
Findings = findingList
)
saveRDS(Data,values$Application)
showNotification("Saved", duration = 3)
studyList <- names(Data[['Nonclinical Information']])
updateSelectInput(session,'selectStudy',choices=studyList,selected=studyName)
input$refreshPlot
})
# second save study button ----
observeEvent(eventExpr = input$saveStudy_02, {
doseList <- as.list(seq(input$nDoses))
names(doseList) <- paste0('Dose',seq(input$nDoses))
for (i in seq(input$nDoses)) {
doseList[[i]] <- list(Dose=input[[paste0('dose',i)]],
NOAEL = input[[paste0('NOAEL',i)]],
Cmax = input[[paste0('Cmax',i)]],
AUC = input[[paste0('AUC',i)]]
)
}
findingList <- as.list(seq(input$nFindings))
names(findingList) <- paste0('Finding',seq(input$nFindings))
if (input$nFindings > 0) {
for (i in seq(input$nFindings)) {
severity <- list()
for (j in seq(input$nDoses)) {
severity[[paste0("Dose", j)]] <- input[[paste0("Severity", i, "_", j)]]
}
if ((is.null(input[[paste0('Finding',i)]])) | (input[[paste0('Finding',i)]]=='')) {
finding_null <- "No Finding"
} else {
finding_null <- input[[paste0('Finding',i)]]
}
findingList[[i]] <- list(Finding=finding_null,
Reversibility = input[[paste0('Reversibility',i)]],
# FindingDoses = input[[paste0('FindingDoses',i)]],
Severity = severity
)
}
} else {
findingList[[1]] <- NULL
}
Data <- getData()
studyName <- paste(input$Species,input$Duration,sep=': ')
Data[['Nonclinical Information']][[studyName]] <- list(
Species = input$Species,
Duration = input$Duration,
Notes = input$note_text,
check_note = input$notes,
nDoses = input$nDoses,
Doses = doseList,
nFindings = input$nFindings,
Findings = findingList
)
saveRDS(Data,values$Application)
showNotification("Saved", duration = 3)
studyList <- names(Data[['Nonclinical Information']])
updateSelectInput(session,'selectStudy',choices=studyList,selected=studyName)
input$refreshPlot
})
## save clinical information ----
observeEvent(input$saveClinicalInfo, {
Data <- getData()
clinData <- Data[['Clinical Information']]
if (input$MgKg==F) {
clinData[['HumanWeight']] <- input$HumanWeight
} else {
clinData[['HumanWeight']] <- NULL
}
clinData[['MgKg']] <- input$MgKg
if (length(input$clinDosing)>0) {
for (clinDose in input$clinDosing) {
clinDoseName <- gsub(' ','',clinDose)
if (input$MgKg==F) {
clinData[[clinDose]][[clinDoseName]] <- input[[clinDoseName]]
} else {
clinData[[clinDose]][[paste0(clinDoseName,'MgKg')]] <- input[[paste0(clinDoseName,'MgKg')]]
}
clinData[[clinDose]][[paste0(clinDoseName,'Cmax')]] <- input[[paste0(clinDoseName,'Cmax')]]
clinData[[clinDose]][[paste0(clinDoseName,'AUC')]] <- input[[paste0(clinDoseName,'AUC')]]
}
}
Data[['Clinical Information']] <- clinData
saveRDS(Data,values$Application)
showNotification("saved", duration = 3)
})
# click refresh button after save clinical information
observeEvent(input$saveClinicalInfo, {
click('refreshPlot')
})
## delete study ----
observeEvent(input$deleteStudy, {
showModal(modalDialog(
title="Delete Study?",
footer = tagList(modalButton("Cancel"),
actionButton("confirmRemove", "Delete")
)
))
})
# confirm delete study
observeEvent(input$confirmRemove, {
Data <- getData()
studyIndex <- which(names(Data[['Nonclinical Information']])==input$selectStudy)
restIndex <- seq(length(names(Data[['Nonclinical Information']])))[-studyIndex]
restNames <- names(Data[['Nonclinical Information']])[restIndex]
Data[['Nonclinical Information']] <- Data[['Nonclinical Information']][restNames]
saveRDS(Data,values$Application)
studyList <- names(Data[['Nonclinical Information']])
updateSelectInput(session,'selectStudy',choices=studyList,selected='New Study')
removeModal()
})
# title
output$studyTitle <- renderText({
paste(input$Species,input$Duration,sep=': ')
})
# display Studies ----
output$displayStudies <- renderUI({
req(input$clinDosing)
input$selectData
input$selectStudy
isolate(Data <- getData())
studyList <- names(Data[['Nonclinical Information']])
studyList <- studyList[-which(studyList=='New Study')]
studyList <- str_sort(studyList, numeric = T)
addUIDep(selectizeInput('displayStudies',label='Select and Order Studies to Display:',choices=studyList,
selected=studyList,
multiple=TRUE,width='100%',options=list(plugins=list('drag_drop','remove_button'))))
})
## display findings ----
output$displayFindings <- renderUI({
req(input$clinDosing)
input$selectData
input$selectStudy
data <- getPlotData()
find_fact <- as.factor(data$Findings)
findings <- unique(find_fact)
findings <- str_sort(findings, numeric = T)
addUIDep(selectizeInput('displayFindings', label = 'Select and Order Findings to Display:',
choice= findings, selected = findings,
multiple = TRUE, width = "100%",
options=list(plugins=list('drag_drop','remove_button' ))))
})
## output$Doses -----
output$Doses <- renderUI({
req(input$selectStudy)
cmax_unit <- paste0(" Cmax (", input$cmax_unit, ")")
auc_unit <- paste0(" AUC (", input$auc_unit, ")")
# <<<<<<< HEAD
# if (input$selectStudy=='New Study') {
# lapply(1:(4*input$nDoses), function(i) {
# I <- ceiling(i/4)
# if (i %% 4 == 1) {
# div(
# hr(style = "border-top: 1px dashed skyblue"),
# numericInput(paste0('dose',I),paste0('*Dose ',I,' (mg/kg/day):'), min = 0,NULL))
# } else if (i %% 4 == 2) {
# div(style="display: inline-block;vertical-align:top; width: 115px;",
# #numericInput(paste0('Cmax',I),paste0('Dose ',I,' Cmax (ng/mL):'), min = 0, NULL))
# numericInput(paste0('Cmax',I),paste0('Dose ',I, cmax_unit), min = 0, NULL))
# }
# else if (i %% 4 == 3) {
# div(style="display: inline-block;vertical-align:top; width: 115px;",
# numericInput(paste0('AUC',I),paste0('Dose ',I, auc_unit),min = 0, NULL))
# } else {
# div(checkboxInput(paste0('NOAEL',I),'NOAEL?',value=F))
# }
# })
# } else {
# Data <- getData()
# studyData <- Data[['Nonclinical Information']][[input$selectStudy]]
# lapply(1:(4*input$nDoses), function(i) {
# I <- ceiling(i/4)
# doseName <- names(studyData$Doses)[I]
# if (i %% 4 == 1) {
# div(hr(style = "border-top: 1px dashed skyblue"),
# numericInput(paste0('dose',I),paste0('*Dose ',I,' (mg/kg/day):'),studyData$Doses[[doseName]][['Dose']]))
# } else if (i %% 4 == 2) {
# div(style="display: inline-block;vertical-align:top; width: 115px;",
# numericInput(paste0('Cmax',I),paste0('Dose ',I, cmax_unit),studyData$Doses[[doseName]][['Cmax']]))
# }
# else if (i %% 4 == 3) {
# div(style="display: inline-block;vertical-align:top; width: 115px;",
# numericInput(paste0('AUC',I),paste0('Dose ',I, auc_unit),studyData$Doses[[doseName]][['AUC']]))
#
# } else {
# div(checkboxInput(paste0('NOAEL',I),'NOAEL?',value=studyData$Doses[[doseName]][['NOAEL']]))
# }
# })
# }
# =======
studyData <- values$tmpData
lapply(1:(4*input$nDoses), function(i) {
I <- ceiling(i/4)
doseName <- names(studyData$Doses)[I]
if (i %% 4 == 1) {
div(hr(style = "border-top: 1px dashed skyblue"),
numericInput(paste0('dose',I),paste0('*Dose ',I,' (mg/kg/day):'), min=0, value =studyData$Doses[[doseName]][['Dose']]))
} else if (i %% 4 == 2) {
div(style="display: inline-block;vertical-align:top; width: 115px;",
numericInput(paste0('Cmax',I),paste0('Dose ',I, cmax_unit), min=0, value=studyData$Doses[[doseName]][['Cmax']]))
}
else if (i %% 4 == 3) {
div(style="display: inline-block;vertical-align:top; width: 115px;",
numericInput(paste0('AUC',I),paste0('Dose ',I, auc_unit), min=0, value=studyData$Doses[[doseName]][['AUC']]))
} else {
div(checkboxInput(paste0('NOAEL',I),'NOAEL?',value=studyData$Doses[[doseName]][['NOAEL']]))
}
})
# >>>>>>> master
})
# Findings -----
output$Findings <- renderUI({
req(input$selectStudy)
studyData <- values$tmpData
if (input$nFindings>0) {
numerator <- 2 + input$nDoses
lapply(1:(numerator*input$nFindings), function(i) {
I <- ceiling(i/numerator)
if (i %% numerator == 1) {
findings <- str_sort(unique(values$Findings))
div(
hr(style = "border-top: 1px dashed skyblue"),
selectizeInput(paste0('Finding',I),paste0('*Finding ',I,':'), choices= findings,
selected = studyData$Findings[[paste0('Finding',I)]]$Finding,
options = list(create = TRUE)))
} else if (i %% numerator == 2) {
radioButtons(paste0('Reversibility',I),'Reversibility:',
choiceNames=c('Reversible [Rev]','Not Reversible [NR]',
'Partially Reversible [PR]','Not Assessed'),
choiceValues=c('[Rev]','[NR]','[PR]',''),
selected=studyData$Findings[[paste0('Finding',I)]]$Reversibility)
} else {
lapply(1:input$nDoses, function(j) {
if ((i %% numerator == 2+j)|((i %% numerator == 0)&(j==input$nDoses))) {
selectInput(inputId = paste0('Severity',I,'_',j),
label = paste0('Select Severity at Dose ',j,' (',input[[paste0('dose',j)]],' mg/kg/day)'),
choices = c('Absent','Present','Minimal','Mild','Moderate','Marked','Severe'),
selected=studyData$Findings[[paste0('Finding',I)]]$Severity[[paste0('Dose',j)]])
}
})
}
})
}
})
### add note for study ----
output$study_note <- renderUI({
req(input$selectStudy)
Data <- getData()
studyData <- Data[['Nonclinical Information']][[input$selectStudy]]
if (input$selectStudy=='New Study') {
if (input$notes ==T) {
textAreaInput("note_text", "Notes:", placeholder = "Enter Note here for this Study", height = "100px")
}
} else{
if (input$notes==T) {
textAreaInput("note_text", "Notes:", value = studyData$Notes, height = "100px")
}
}
})
# Create PlotData (changed) -----
getPlotData <- reactive({
Data <- getData()
plotData <- data.frame(matrix(ncol = 17 ))
column_names <- c("Study", "Dose",
"NOAEL", "Cmax", "AUC", "Findings",
"Reversibility", "Severity", "Value_order",
"SM", "HED_value", "SM_start_dose", "SM_MRHD", "noael_value", "Severity_max", "Severity_num", "Study_note")
colnames(plotData) <- column_names
count <- 1
for (Study in names(Data[["Nonclinical Information"]])) {
if (Study != "New Study") {
studyData <- Data[["Nonclinical Information"]][[Study]]
for (i in seq(studyData$nFindings)){
for (j in seq(studyData$nDoses)){
plotData[count, "Study"] <- Study
plotData[count, "Dose"] <- studyData[["Doses"]][[paste0("Dose", j)]][["Dose"]]
plotData[count, "NOAEL"] <- studyData[["Doses"]][[paste0("Dose",j)]][["NOAEL"]]
plotData[count, "Cmax"] <- studyData[["Doses"]][[paste0("Dose", j)]][["Cmax"]]
plotData[count, "AUC"] <- studyData[["Doses"]][[paste0("Dose", j)]][["AUC"]]
plotData[count, "Findings"] <- studyData[["Findings"]][[paste0("Finding", i)]][["Finding"]]
plotData[count, "Reversibility"] <- studyData[["Findings"]][[paste0("Finding", i)]][["Reversibility"]]
plotData[count, "Severity"] <- studyData[["Findings"]][[paste0("Finding", i)]][["Severity"]][[paste0("Dose", j)]]
plotData[count, "Value_order"] <- j
plotData[count, "SM"] <- NA
plotData[count, "HED_value"] <- NA
plotData[count, "SM_start_dose"] <- NA
plotData[count, "SM_MRHD"] <- NA
plotData[count, "noael_value"] <- NA
plotData[count, "Severity_max"] <- NA
plotData[count, "Severity_num"] <- NA
if (!is.null(studyData[["Notes"]])) {
plotData[count, "Study_note"] <- studyData[["Notes"]]
} else {plotData[count, "Study_note"] <- NA}
count <- count+1
}
}
}
}
plotData$Rev <- gsub("\\[|\\]", "", plotData$Reversibility)
plotData$Dose <- as.numeric(plotData$Dose)
plotData$Value <- 1
plotData$Rev[plotData$Rev == ""] <- "Not Assessed"
plotData$Rev[plotData$Rev == "Rev"] <- "Reversible"
plotData$Rev[plotData$Rev == "NR"] <- "Not Reversible"
plotData$Rev[plotData$Rev == "PR"] <- "Partially Reversible"
plotData <- plotData[which(plotData$Study %in% input$displayStudies),]
plotData$Severity <- factor(plotData$Severity,
levels= c('Absent','Present','Minimal', 'Mild',
'Moderate', 'Marked', 'Severe'), ordered = TRUE)
plotData$Severity_num <- as.numeric(plotData$Severity)
return(plotData)
})
## human dose ----
output$humanDosing <- renderUI({
req(input$clinDosing)
Data <- getData()
clinDosingNames <- input$clinDosing
names(clinDosingNames) <- clinDosingNames
if (length(clinDosingNames)>0) {
for (clinDose in input$clinDosing) {
if (Data[['Clinical Information']][['MgKg']]==F) {
names(clinDosingNames)[which(clinDosingNames==clinDose)] <- paste0(clinDose,
': (', Data[['Clinical Information']][[clinDose]][[paste0(unlist(strsplit(clinDose,' ')),
collapse='')]],' mg)')
} else {
names(clinDosingNames)[which(clinDosingNames==clinDose)] <- paste0(clinDose,': (', Data[['Clinical Information']][[clinDose]][[paste0(gsub(' ', '', clinDose), 'MgKg')]],' mg/kg)')
}
}
}
selectInput('humanDosing','Select Human Dose:',choices=clinDosingNames)
})
## filter NOAEL data preparation ----
filter_NOAEL <- reactive({
df_plot <- getPlotData()
count <- 0
for (i in unique(df_plot$Study)){
ind <- which(df_plot$Study == i)
study <- df_plot[ind,]
max_severe <- max(study$Severity)
row_num <- nrow(study)
for (j in seq(nrow(study))) {
if (any(study$NOAEL == TRUE)) {
dose <- study$Dose[which(study$NOAEL == TRUE)]
dose <- unique(dose)
k <- count+j
df_plot[k, "noael_value"] <- as.numeric(dose)
df_plot[k, "Severity_max"] <- max_severe
} else {
dose <- min(study$Dose)
dose <- as.numeric(dose) - 1
k <- count + j
df_plot[k, "noael_value"] <- as.numeric(dose)
df_plot[k, "Severity_max"] <- max_severe
}
}
count <- count +row_num
}
df_plot
})
# ## calculate safety margin (SM) ------
#
calculateSM <- reactive({
Data <- getData()
plotData <- filter_NOAEL()
if (nrow(plotData)>0) {
for (i in seq(nrow(plotData))) {
if (input$SMbasis=='HED') {
Dose <- as.numeric(plotData[i,'Dose'])
} else if (input$SMbasis=='Cmax') {
Dose <- as.numeric(plotData[i,'Cmax'])
} else if (input$SMbasis=='AUC') {
Dose <- as.numeric(plotData[i,'AUC'])
}
Species <- unlist(strsplit(plotData[i,'Study'],':'))[1]
humanDoseName <- gsub(' ','',input$humanDosing)
if (input$SMbasis=='HED') {
HED <- Dose/speciesConversion[[Species]]
if (input$MgKg==F) {
humanDose <- Data[['Clinical Information']][[input$humanDosing]][[humanDoseName]]
HED <- HED*Data[['Clinical Information']][['HumanWeight']]
if (!is.null(Data[["Clinical Information"]][["Start Dose"]][["StartDose"]])) {
SM_start <- HED/(Data[["Clinical Information"]][["Start Dose"]][["StartDose"]])
} else {SM_start <- NA}
if (!is.null(Data[["Clinical Information"]][["MRHD"]][["MRHD"]])) {
SM_MRHD <- HED/(Data[["Clinical Information"]][["MRHD"]][["MRHD"]])
} else {SM_MRHD <- NA}
} else {
humanDose <- Data[['Clinical Information']][[input$humanDosing]][[paste0(humanDoseName, "MgKg")]]
if (!is.null(Data[["Clinical Information"]][["Start Dose"]][["StartDoseMgKg"]])){
SM_start <- HED/(Data[["Clinical Information"]][["Start Dose"]][["StartDoseMgKg"]])
} else {SM_start <- NA}
if (!is.null(Data[["Clinical Information"]][["MRHD"]][["MRHDMgKg"]])) {
SM_MRHD <- HED/(Data[["Clinical Information"]][["MRHD"]][["MRHDMgKg"]])
} else {SM_MRHD <- NA}
}
} else if (input$SMbasis=='Cmax') {
if (!is.null(Data[['Clinical Information']][[input$humanDosing]][[paste0(humanDoseName,input$SMbasis)]])) {
humanDose <- Data[['Clinical Information']][[input$humanDosing]][[paste0(humanDoseName,input$SMbasis)]]
} else {humanDose <- NA}
HED <- Dose
if (!is.null(Data[["Clinical Information"]][["Start Dose"]][["StartDoseCmax"]])) {
SM_start <- HED/(Data[["Clinical Information"]][["Start Dose"]][["StartDoseCmax"]])
} else {SM_start <- NA}
if (!is.null(Data[["Clinical Information"]][["MRHD"]][["MRHDCmax"]])) {
SM_MRHD <- HED/(Data[["Clinical Information"]][["MRHD"]][["MRHDCmax"]])
} else (SM_MRHD <- NA)
} else {
if (!is.null(Data[['Clinical Information']][[input$humanDosing]][[paste0(humanDoseName,input$SMbasis)]])) {
humanDose <- Data[['Clinical Information']][[input$humanDosing]][[paste0(humanDoseName,input$SMbasis)]]
} else {humanDose <- NA}
HED <- Dose
if (!is.null(Data[["Clinical Information"]][["Start Dose"]][["StartDoseAUC"]])) {
SM_start <- HED/(Data[["Clinical Information"]][["Start Dose"]][["StartDoseAUC"]])
} else (SM_start <- NA)
if (!is.null(Data[["Clinical Information"]][["MRHD"]][["MRHDAUC"]])) {
SM_MRHD <- HED/(Data[["Clinical Information"]][["MRHD"]][["MRHDAUC"]])
} else {SM_MRHD <- NA}
}
plotData[i, "HED_value"]<- round(HED, digits = 2) ##for table 03
plotData[i, "SM"] <- round(HED/humanDose, 2)
plotData[i, "SM_start_dose"] <- round(SM_start, digits = 2)
plotData[i, "SM_MRHD"] <- round(SM_MRHD, digits = 2)
}
}
return(plotData)
})
# table 01 ----
dt_01 <- reactive({
plotData_tab <- calculateSM()
plotData_tab <- plotData_tab %>%
mutate(Findings = as.factor(Findings),
Rev = as.factor(Rev),
Study = as.factor(Study),
Dose = as.numeric(Dose),
SM = as.numeric(SM),
Severity = as.factor(Severity))
plotData_tab <- plotData_tab %>%
select( Findings,Rev, Study, Dose, SM, Severity) %>%
filter(Severity != "Absent") %>%
select(-Severity) %>%
rename(Reversibility = Rev,
"Clinical Exposure Margin" = SM,
"Dose (mg/kg/day)" = Dose)
plotData_tab$Findings <- factor(plotData_tab$Findings,levels= input$displayFindings)
plotData_tab <- plotData_tab %>%
arrange(Findings)
plotData_tab
})
# table 01 UI side
output$table_01 <- renderDT({
data <- getData()
clin_dose <- clin_data(data)
if (clin_dose>0) {
plotData_tab <- dt_01()
plotData_tab <- datatable(plotData_tab, rownames = FALSE,
class = "cell-border stripe",
filter = list(position = 'top'),
extensions = list("Buttons" = NULL,
"ColReorder" = NULL),
caption = htmltools::tags$caption(
style = "caption-side: top; text-align: center; font-size: 20px; color: black",
"Table :", htmltools::strong("Nonclinical Findings of Potential Clinical Relevance")
),
options = list(
dom = "lfrtipB",
buttons = c("csv", "excel", "copy", "print"),
colReorder = TRUE,
scrollY = TRUE,
pageLength = 25,
columnDefs = list(list(className = "dt-center", targets = "_all")),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});",
"}"),
rowsGroup = list(0,1,2))) %>%
formatStyle(columns = colnames(plotData_tab), `font-size` = "18px")
path <- "DT_extension" # folder containing dataTables.rowsGroup.js
dep <- htmltools::htmlDependency(
"RowsGroup", "2.0.0",
path, script = "dataTables.rowsGroup.js")
plotData_tab$dependencies <- c(plotData_tab$dependencies, list(dep))
plotData_tab
}})
# DT table to flex table
filtered_tab_01 <- reactive({
req(input$table_01_rows_all)
data <- dt_01()
data[input$table_01_rows_all, ]
})
# Flextable for docx file
dt_to_flex_01 <- reactive({
plotData_tab <- filtered_tab_01()
plotData_tab <- plotData_tab %>%
dplyr::arrange(Findings, Reversibility, Study) %>%
flextable() %>%
merge_v(j = ~ Findings + Reversibility + Study) %>%
flextable::autofit() %>%
add_header_row(values = c("Nonclinical Findings of Potential Clinical Relevance"), colwidths = c(5)) %>%
theme_box()
plotData_tab
})
# download table 01
output$down_01_doc <- downloadHandler(
filename = function() {
Sys.sleep(2)
paste0("clinical_relevance", ".docx")
},
content = function(file) {
save_as_docx(dt_to_flex_01(), path = paste0(user(), "/clinical_relevance.docx"))
file.copy(paste0(user(),"/clinical_relevance.docx"), file)
}
)
#### table 02 ----
dt_02 <- reactive({
plotData_tab <- calculateSM()
plotData_tab <- plotData_tab %>%
dplyr::select(Study, Dose, NOAEL, Cmax, AUC, SM) %>%
filter(NOAEL == TRUE) %>%
dplyr::select(-NOAEL) %>%
dplyr::arrange(Study, Dose)
plotdata_finding <- calculateSM()
greater_than_noeal <- plotdata_finding[which(plotdata_finding$Dose>plotdata_finding$noael_value),]
greater_than_noeal <- greater_than_noeal %>%
select(Study, Findings) %>%
distinct()
cmax_unit <- paste0("Cmax (", input$cmax_unit, ")")
auc_unit <- paste0("AUC (", input$auc_unit, ")")
plotData_tab <- full_join(plotData_tab, greater_than_noeal, by="Study") %>%
arrange(Study,Dose,Cmax,AUC,SM,Findings) %>%
rename(
"NOAEL (mg/kg/day)" = Dose,
"Safety Margin" = SM,
"Findings at Greater than NOAEL for the Study" = Findings
) %>%
mutate(Study = as.factor(Study))
names(plotData_tab)[names(plotData_tab)=="Cmax"] <- cmax_unit
names(plotData_tab)[names(plotData_tab)=="AUC"] <- auc_unit
plotData_tab$Study <- factor(plotData_tab$Study,levels= input$displayStudies)
plotData_tab <- plotData_tab %>%
arrange(Study)
plotData_tab
})
# make column name same as flextable (add unit in DT table)
output$table_02 <- renderDT({
data <- getData()
clin_dose <- clin_data(data)
if (clin_dose>0) {
plotData_tab <- dt_02()
plotData_tab <- datatable(plotData_tab, rownames = FALSE, class = "cell-border stripe",
filter = list(position = 'top'),
extensions = list("Buttons" = NULL),
caption = htmltools::tags$caption(
style = "caption-side: top; text-align: center; font-size: 20px; color: black",
"Table :", htmltools::strong("Key Study Findings")
),
options = list(
scrollY = TRUE,
pageLength = 100,
dom = "lfrtipB",
buttons = c("csv", "excel", "copy", "print"),
columnDefs = list(list(className = "dt-center", targets = "_all")),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});",
"}"),
rowsGroup = list(0,1,2,3,4,5))) %>%
formatStyle(columns = colnames(plotData_tab), `font-size` = "18px")
path <- "DT_extension" # folder containing dataTables.rowsGroup.js
dep <- htmltools::htmlDependency(
"RowsGroup", "2.0.0",
path, script = "dataTables.rowsGroup.js")
plotData_tab$dependencies <- c(plotData_tab$dependencies, list(dep))
plotData_tab
}})
# get data from DT for flextable
filtered_tab_02 <- reactive({
req(input$table_02_rows_all)
data <- dt_02()
data[input$table_02_rows_all, ]
})
# flextable 02
dt_to_flex_02 <- reactive({
cmax_unit <- paste0("Cmax (", input$cmax_unit, ")")
auc_unit <- paste0("AUC (", input$auc_unit, ")")
plotData_tab <- filtered_tab_02()
plotData_tab <- plotData_tab %>%
rename(
"Dose" ="NOAEL (mg/kg/day)",
"SM"= "Safety Margin",
"Findings" = "Findings at Greater than NOAEL for the Study"
)
colnames(plotData_tab)[3] <- "Cmax"
colnames(plotData_tab)[4] <- "AUC"
plotData_tab <- plotData_tab %>%
flextable() %>%
merge_v(j = ~ Study + Dose + Cmax+ AUC +SM+Findings) %>%
flextable::autofit() %>%
set_header_labels("Dose" = "NOAEL (mg/kg/day)",
"Cmax" = cmax_unit,
"AUC" = auc_unit,
"Findings" = "Findings at Greater than NOAEL for the Study",
"SM" = "Safety Margin") %>%
add_header_row(values = c("Key Study Findings"), colwidths = c(6)) %>%
theme_box()
plotData_tab
})
# download table 02
output$down_02_doc <- downloadHandler(
filename = function() {
paste0("key_findings", ".docx")
},
content = function(file) {
save_as_docx(dt_to_flex_02(), path = paste0(user(), "/key_findings.docx"))
file.copy(paste0(user(), "/key_findings.docx"), file)
}
)
## table 03 ----
dt_03 <- reactive({
cmax_unit <- paste0("Cmax (", input$cmax_unit, ")")
auc_unit <- paste0("AUC (", input$auc_unit, ")")
plotData_03 <- calculateSM()
plotData_03 <- plotData_03 %>%
select( Study,NOAEL, Dose, HED_value, Cmax, AUC , SM_start_dose, SM_MRHD) %>%
mutate(Study = as.factor(Study)) %>%
unique() %>%
filter(NOAEL == TRUE) %>%
select(-NOAEL) %>%
dplyr::rename("NOAEL (mg/kg/day)" = Dose,
"Safety Margin at Starting Dose" = SM_start_dose,
"Safety Margin at MRHD" = SM_MRHD)
names(plotData_03)[names(plotData_03)=="Cmax"] <- cmax_unit
names(plotData_03)[names(plotData_03)=="AUC"] <- auc_unit
if (input$MgKg==F) {
plotData_03 <- plotData_03 %>%
rename("HED (mg/day)" = HED_value)
} else {plotData_03 <- plotData_03 %>%
rename("HED (mg/kg/day)" = HED_value)
}
##
plotData_03$Study <- factor(plotData_03$Study,levels= input$displayStudies)
plotData_03 <- plotData_03 %>%
arrange(Study)
plotData_03
})
# table 03 DT
output$table_03 <- renderDT({
data <- getData()
clin_dose <- clin_data(data)
if (clin_dose>0) {
plotData_03 <- dt_03()
plotData_03 <- datatable(plotData_03,rownames = FALSE,
extensions = list("Buttons" = NULL,
"ColReorder" = NULL),
class = "cell-border stripe",
filter = list(position = 'top'),
caption = htmltools::tags$caption(
style = "caption-side: top; text-align: center; font-size: 20px; color: black",
"Table :", htmltools::strong("Safety Margins Based on NOAEL from Pivotal Toxicology Studies")
),
options = list(
dom = "lfrtipB",
buttons = c("csv", "excel", "copy", "print"),
colReorder = TRUE,
pageLength = 10,
columnDefs = list(list(className = "dt-center", targets = "_all")),
scrollY = TRUE,
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});",
"}"))) %>%
formatStyle(columns = colnames(plotData_03), `font-size` = "18px")
plotData_03
}})
# get data from DT table
filtered_tab_03 <- reactive({
req(input$table_03_rows_all)
data <- dt_03()
data[input$table_03_rows_all, ]
})
# flextable for docx file
dt_to_flex_03 <- reactive({
plotData_tab <- filtered_tab_03() %>%
flextable() %>%
add_header_row(values = c("Nonclinical", "Clinical Exposure Margins"), colwidths = c(5,2)) %>%
add_header_row(values = c("Safety Margins Based on NOAEL from Pivotal Toxicology Studies"), colwidths = c(7)) %>%
theme_box()
plotData_tab
})
# download table 03
output$down_03_doc <- downloadHandler(
filename = function() {
paste0("safety_margin", ".docx")
},
content = function(file) {
save_as_docx(dt_to_flex_03(), path = paste0(user(), "/safety_margin.docx") )
file.copy(paste0(user(), "/safety_margin.docx"), file)
}
)
## download all table
download_all <- reactive({
doc <- read_docx()
doc_02 <- body_add_flextable(doc, dt_to_flex_01()) %>%
body_add_par(" ") %>%
body_add_par(" ") %>%
body_add_par(" ") %>%
body_add_flextable( dt_to_flex_02()) %>%
body_add_par(" ") %>%
body_add_par(" ") %>%
body_add_par(" ") %>%
body_add_flextable(dt_to_flex_03())
doc_02
})
##
output$down_all <- downloadHandler(
filename = function() {
paste0("table_all", ".docx")
},
content = function(file) {
print(download_all() , target = paste0(user(), "/table_all.docx"))
file.copy(paste0(user(), "/table_all.docx"), file)
}
)
# craete notes table ----
all_study_notes <- reactive({
plotData_tab <- calculateSM()
plotData_tab <- plotData_tab %>%
dplyr::select(Study_note, Study) %>%
dplyr::rename(Notes = Study_note)
plotData_tab$Study <- factor(plotData_tab$Study,levels= input$displayStudies)
plotData_tab <- plotData_tab %>%
distinct() %>%
arrange(Study)
plotData_tab
})
# output table for notes ----
output$table_note <- renderTable({
data <- getData()
clin_dose <- clin_data(data)
if (clin_dose>0) {
all_study_notes()}},
bordered = TRUE,
striped = TRUE,
spacing = 'xs',
width = '100%', align = 'lr')
## download notes table
table_note_to_flex <- reactive({
note_table <- all_study_notes() %>%
flextable() %>%
add_header_row(values = c("Note for Studies"), colwidths = c(2)) %>%
theme_box()
note_table
})
# download notes table
output$down_notes <- downloadHandler(
filename = function() {
paste0("note_table", ".docx")
},
content = function(file) {
save_as_docx(table_note_to_flex(), path = paste0(user(), "/note_table.docx"))
file.copy(paste0(user(), "/note_table.docx"), file)
}
)
## filter NOAEL reactive ----
filtered_plot <- reactive({
if (input$NOAEL_choices == "ALL") {
plot_data <- calculateSM()
} else if (input$NOAEL_choices == "Less than or equal to NOAEL") {
plot_data <- calculateSM()
plot_data <- plot_data %>%
dplyr::filter(Dose <= noael_value)
} else {
plot_data <- calculateSM()
plot_data <- plot_data %>%
dplyr::filter(Dose > noael_value)
}
plot_data
})
# plotheight ----
plotHeight <- reactive({
plotData <- calculateSM()
nStudies <- length(unique(plotData$Study))
plot_height <- (input$plotheight) * (nStudies)
plot_height
})
## figure -----
output$figure <- renderGirafe({
req(input$clinDosing)
input$selectData
data <- getData()
clin_dose <- clin_data(data)
if (clin_dose>0) {
plotData <- filtered_plot()
plotData <- plotData[which(plotData$Findings %in% input$displayFindings),]
plotData$Dose <- as.numeric(plotData$Dose)
axis_limit <- calculateSM()
suppressWarnings(SM_max <- max(axis_limit$SM))
suppressWarnings(y_max <- as.numeric(max(axis_limit$Value_order)) +1)
suppressWarnings(q_y_max <- as.numeric(max(axis_limit$Value_order)))
finding_count <- length(unique(plotData$Findings))
# column width
if (finding_count < 4) {
q_col_width <- 0.2* finding_count
} else {
q_col_width <- 0.9
}
# text size of finding plot
if (finding_count < 6) {
q_text_size <- 6
} else {
q_text_size <- 4
}
## plotdata for p plot (changed) ----
plotData_p <- plotData
plotData_p <- plotData_p %>%
select(Study, Dose, SM, Value, NOAEL, Value_order, Study_note) %>%
unique()
plotData_p$SM <- lapply(plotData_p$SM, roundSigfigs)
plotData_p$SM <- as.numeric(plotData_p$SM)
#note
# plotData_note <- plotData_p %>%
# select(Study, Study_note, SM, Value_order) %>%
# unique()
if (nrow(plotData)>0) {
plotData$Study <- factor(plotData$Study,levels= input$displayStudies)
plotData_p$Study <- factor(plotData_p$Study,levels= input$displayStudies)
#plotData_note$Study <- factor(plotData_note$Study, levels = input$displayStudies)
plotData$Findings <- factor(plotData$Findings, levels = input$displayFindings)
plotData$DoseLabel <- factor(paste(plotData$Dose,'mg/kg/day'),
levels=unique(paste(plotData$Dose,'mg/kg/day'))[order(unique(as.numeric(plotData$Dose),decreasing=F))])
maxFindings <- 1
for (doseFinding in plotData$doseFindings) {
nFindings <- str_count(doseFinding,'\n')
if (nFindings > maxFindings) {
maxFindings <- nFindings
}
}
maxFindings <- maxFindings + 1
#plotData$Findings <- as.factor(plotData$Findings)
plotData$Severity <- as.factor(plotData$Severity)
# make severity ordered factor
plotData$Severity <- factor(plotData$Severity,
levels= c('Absent','Present','Minimal', 'Mild',
'Moderate', 'Marked', 'Severe'), ordered = TRUE)
#color_manual <- c('transparent','grey','#feb24c','#fd8d3c','#fc4e2a','#e31a1c','#b10026')
color_manual <- c('Absent' = 'transparent',
'Present' = 'grey',
'Minimal' = '#feb24c',
'Mild' = '#fd8d3c',
'Moderate' = '#fc4e2a',
'Marked' = '#e31a1c',
'Severe' = '#b10026')
# # safety margin plot ----
color_NOAEL <- c("TRUE" = "#239B56", "FALSE" = "black")
tooltip_css <- "background-color:#3DE3D8;color:black;padding:2px;border-radius:5px;"
if (input$dose_sm==1) {
plot_p_label <- ggplot(plotData_p)+
geom_label_interactive(aes(x = SM, y = Value_order,
label = paste0(Dose, " mg/kg/day"),
tooltip =paste0(SM, "x")), #DoseLabel changed
color = "white",
fontface = "bold",
size = 6,
fill= ifelse(plotData_p$NOAEL == TRUE, "#239B56", "black"),
label.padding = unit(0.6, "lines")
)
} else if (input$dose_sm==2) {
plot_p_label <- ggplot(plotData_p)+
geom_label_interactive(aes(x = SM, y = Value_order,
label = paste0(Dose, " mg/kg/day", "\n", SM, "x"),
tooltip =paste0(Study_note)), #DoseLabel changed
color = "white",
fontface = "bold",
size = 6,
fill= ifelse(plotData_p$NOAEL == TRUE, "#239B56", "black"),
label.padding = unit(0.6, "lines"))
} else {
plot_p_label <- ggplot(plotData_p)+
geom_label_interactive(aes(x = SM, y = Value_order,
label = paste0(Dose, " mg/kg/day", "\n", SM, "x"),
tooltip =paste0(Study_note)), #DoseLabel changed
color = "white",
fontface = "bold",
size = 6,
fill= ifelse(plotData_p$NOAEL == TRUE, "#239B56", "black"),
label.padding = unit(0.6, "lines")
)+
geom_text(data=plotData_p ,aes(x = 0.5*(SM_max), y=0.3 , label= Study_note),
color = "black",
size= 6)
}
p <- plot_p_label +
scale_x_log10(limits = c(min(axis_limit$SM/2), max(axis_limit$SM*2)))+
#scale_fill_manual(values = color_NOAEL)+
ylim(0,y_max)+
facet_grid( Study ~ .)+
labs( title = " Summary of Toxicology Studies", x = "Exposure Margin")+
theme_bw(base_size=12)+
theme(
axis.title.y = element_blank(),
axis.ticks.y= element_blank(),
axis.text.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size= 20, hjust = 1),
axis.title.x = element_text(size = 18, vjust = -0.9),
axis.text.x = element_text(size = 16),
legend.position = "none",
strip.text.y = element_text(size=14, color="black"),
strip.background = element_rect( fill = "white"))
# findings plot ----
q <- ggplot(plotData)+
geom_col_interactive(aes(x= Findings, y = Value, fill = Severity, group = Dose, tooltip = Findings),
position = position_stack(reverse = TRUE),
color = 'transparent',
width = q_col_width)+
geom_text_interactive(aes(x = Findings, y = Value, label = Dose, group = Dose, tooltip = Findings),
size = q_text_size,
color = 'white',
fontface = 'bold',
position = position_stack(vjust = 0.5, reverse = TRUE))+
#scale_y_discrete(position = 'right')+
ylim(0, q_y_max)+
scale_fill_manual(values = color_manual)+
facet_grid(Study ~ ., scales = 'free')+
theme_bw(base_size=12)+
theme(axis.title.y = element_blank(),
strip.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
axis.text.x = element_text(size= 16, angle = 90), #need to work
#plot.title = element_text(size=20,hjust = 0.5),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_line(),
panel.grid.minor.x = element_blank(),
legend.text = element_text(size = 14),
legend.title = element_text(size = 16),
legend.justification = "top")+
#labs(title = '' )+
guides(fill = guide_legend(override.aes = aes(label = "")))
girafe(code = print(p+q+plot_layout(ncol = 2, widths = c(3,1))),
options = list(opts_tooltip(css = tooltip_css)),
fonts = list(sans= "Roboto"),
width_svg = 18, height_svg = plotHeight())
}}
})
observe({
req(input$selectData)
values$selectData <- input$selectData
})
## download rds file
output$download_rds <- renderUI({
datasets <- c(grep('.rds',list.files(user(),full.names = T),value=T))
names(datasets) <- basename(unlist(strsplit(datasets,'.rds')))
selectInput("downloadRDS", "Select to Download an Application:", choices = datasets, selected = NULL)
})
output$down_btn <- downloadHandler(
filename = function() {
app_name <- basename(input$downloadRDS)
app_name
},
content = function(file) {
file.copy(input$downloadRDS, file)
}
)
## upload file rds
observe({
if (is.null(input$upload_rds)) return()
file.copy(input$upload_rds$datapath, paste0(user(), "/", input$upload_rds$name))
datasets <- c('blankData.rds',grep('.rds',list.files(user(),full.names = T),value=T))
names(datasets) <- basename(unlist(strsplit(datasets,'.rds')))
names(datasets)[which(datasets=='blankData.rds')] <- 'New Program'
selectInput('selectData','Select Develpment Program:',datasets)
updateSelectInput(session,'selectData',choices=datasets,selected=values$Application)
})
# download tar file ----
output$tar_file <- downloadHandler(
filename = function() {
"all_file.tar"
},
content = function(file) {
all_file <- tar("all_file.tar", files = "Applications")
file.copy("all_file.tar", file)
}
)
dir_to_df <- reactive({
df_files <- data.frame(matrix(ncol = 2))
colnames(df_files) <- c("user", "files")
folder_list <- basename(list.dirs("Applications/"))
folder_list <- tail(folder_list, -1)
count <- 1
for (folder in folder_list) {
file_list <- grep(".rds", list.files(paste0("Applications/", folder)), value = T)
for (file in file_list) {
df_files[count, "user"] <- folder
file <- unlist(strsplit(file, ".rds"))
df_files[count, "files"] <- file
count <- count+1
}
}
df_files <- df_files %>%
arrange(user, files)
df_files
})
###
output$dir_list <- renderDT({
dir_tab <- dir_to_df()
dir_tab <- datatable(dir_tab, rownames = FALSE, class = "cell-border stripe",
filter = list(position = 'top'),
extensions = list("Buttons" = NULL),
caption = htmltools::tags$caption(
style = "caption-side: top; text-align: center; font-size: 20px; color: black",
"Table :", htmltools::strong("All the RDS Files")
),
options = list(
scrollY = TRUE,
pageLength = 100,
dom = "lfrtipB",
buttons = c("csv", "excel", "copy", "print"),
columnDefs = list(list(className = "dt-center", targets = "_all")),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});",
"}"),
rowsGroup = list(0))) %>%
formatStyle(columns = colnames(dir_tab), `font-size` = "18px")
path <- "DT_extension" # folder containing dataTables.rowsGroup.js
dep <- htmltools::htmlDependency(
"RowsGroup", "2.0.0",
path, script = "dataTables.rowsGroup.js")
dir_tab$dependencies <- c(dir_tab$dependencies, list(dep))
dir_tab
})
## save units for Cmax and AUC ----
observeEvent(input$save_units, {
Data <- getData()
Data[["CmaxUnit"]] <- input$cmax_unit
Data[["AUCUnit"]] <- input$auc_unit
saveRDS(Data,values$Application)
showNotification("saved", duration = 3)
})
five_space <- paste0(HTML(' '), HTML(' '), HTML(' '),
HTML(' '), HTML(' '))
## start dose cmax and auc untis
output$start_cmax <- renderUI({
cmax <- paste0("Start Dose Cmax ", "(", input$cmax_unit, "):")
HTML(paste0(five_space, strong(cmax)))
})
output$start_auc <- renderUI({
auc <- paste0("Start Dose AUC ", "(", input$auc_unit, "):")
HTML(paste0(five_space, strong(auc)))
})
## MRHD dose cmax and auc unit
output$MRHD_cmax <- renderUI({
cmax <- paste0("MRHD Dose Cmax ", "(", input$cmax_unit, "):")
HTML(paste0(five_space, strong(cmax)))
})
output$MRHD_auc <- renderUI({
auc <- paste0("MRHD Dose AUC ", "(", input$auc_unit, "):")
HTML(paste0(five_space, strong(auc)))
})
## custom dose
output$custom_cmax <- renderUI({
cmax <- paste0("Custom Dose Cmax ", "(", input$cmax_unit, "):")
HTML(paste0(five_space, strong(cmax)))
})
output$custom_auc <- renderUI({
auc <- paste0("Custom Dose AUC ", "(", input$auc_unit, "):")
HTML(paste0(five_space, strong(auc)))
})
# output$menu function -----
output$menu <- renderMenu({
if (!is.null(input$selectData)) {
if (input$selectData=='blankData.rds') {
sidebarMenu(id='menu',
menuItem('Data Selection',icon=icon('database'),startExpanded = T,
uiOutput('selectData'),
conditionalPanel('input.selectData=="blankData.rds"',
# <<<<<<< HEAD
textInput('newApplication','Enter Program Name:')
),
actionButton('saveData','Open New Program',icon=icon('plus-circle')),
# =======
# textInput('newApplication','Enter New Application Number:')
# ),
# actionButton('saveData','Submit',icon=icon('plus-circle')),
# >>>>>>> master
br()
),
# <<<<<<< HEAD
# br(),
# uiOutput('studyName'),
# br(),
hr(),
menuItem('Source Code',icon=icon('code'),href='https://github.com/phuse-org/phuse-scripts/blob/master/contributed/Nonclinical/R/toxSummary')
# =======
# hr(),
# menuItem('Questions/Feedback',icon=icon('envelope-square'),
# href = 'mailto:kevin.snyder@fda.hhs.gov')
# >>>>>>> master
)
} else {
sidebarMenu(id='menu',
menuItem('Data Selection',icon=icon('database'),startExpanded = T,
uiOutput('selectData'),
conditionalPanel('input.selectData=="blankData.rds"',
# <<<<<<< HEAD
textInput('newApplication','Enter Program Name:')
# =======
# textInput('newApplication','Enter New Application Number:')
# >>>>>>> master
),
actionButton('deleteData','Delete',icon=icon('minus-circle')),
br()
),
hr(),
uiOutput('studyName'),
hr(),
menuItem("Units for Cmax/AUC", icon = icon("balance-scale"),
textInput("cmax_unit", "*Insert Unit for Cmax:", value = "ng/mL"),
textInput("auc_unit", "*Insert Unit for AUC:", value = "ng*h/mL"),
actionButton('save_units','Save Units',icon=icon('plus-circle')),
br()),
menuItem('Clinical Data',icon=icon('user'),
checkboxGroupInput('clinDosing','Clinical Dosing:',clinDosingOptions),
conditionalPanel('condition=input.MgKg==false',
numericInput('HumanWeight','*Human Weight (kg):',value=60, min=0)
),
checkboxInput('MgKg','Dosing in mg/kg?',value=F),
conditionalPanel(
condition='input.clinDosing.includes("Start Dose")',
hr(),
#tags$hr(style="height:3px;border-width:0;color:white;background-color:green"),
h4('Start Dose Information:'),
conditionalPanel(condition='input.MgKg==true',
numericInput('StartDoseMgKg','*Start Dose (mg/kg/day):',value=NULL,min=0)
),
conditionalPanel(condition='input.MgKg==false',
numericInput('StartDose','*Start Dose (mg/day):',value = NULL, min=0)
),
uiOutput("start_cmax"),
numericInput('StartDoseCmax',NULL,value=NULL, min=0),
#numericInput('StartDoseCmax',paste0('Start Dose Cmax ', input$cmax_unit),value=NULL, min=0),
uiOutput("start_auc"),
numericInput('StartDoseAUC',NULL,value=NULL, min=0)
),
conditionalPanel(
condition='input.clinDosing.includes("MRHD")',
hr(),
#tags$hr(style="height:3px;border-width:0;color:white;background-color:skyblue"),
h4('MRHD Information:'),
conditionalPanel(condition='input.MgKg==true',
numericInput('MRHDMgKg','*MRHD (mg/kg):',value=NULL, min=0)
),
conditionalPanel(condition='input.MgKg==false',
numericInput('MRHD','*MRHD (mg):',value = NULL, min=0)
),
uiOutput("MRHD_cmax"),
numericInput('MRHDCmax',NULL,value=NULL, min=0),
uiOutput("MRHD_auc"),
numericInput('MRHDAUC',NULL,value=NULL, min=0)
),
conditionalPanel(
condition='input.clinDosing.includes("Custom Dose")',
hr(),
#tags$hr(style="height:3px;border-width:0;color:white;background-color:white"),
h4('Custom Dose Information:'),
conditionalPanel(condition='input.MgKg==true',
numericInput('CustomDoseMgKg','*Custom Dose (mg/kg):',value=NULL, min=0)
),
conditionalPanel(condition='input.MgKg==false',
numericInput('CustomDose','*Custom Dose (mg):',value = NULL, min=0)
),
uiOutput("custom_cmax"),
numericInput('CustomDoseCmax',NULL,value=NULL, min=0),
uiOutput("custom_auc"),
numericInput('CustomDoseAUC',NULL,value=NULL, min=0)
),
actionButton('saveClinicalInfo','Save Clinical Information',icon=icon('plus-circle')),
br()
),
menuItem('Nonclinical Data',icon=icon('flask'),tabName = 'Nonclinical Info',
uiOutput('selectStudy'),
actionButton('saveStudy','Save Study',icon=icon('plus-circle')),
actionButton('deleteStudy','Delete Study',icon=icon('minus-circle')),
selectInput('Species','*Select Species:',choices=names(speciesConversion)),
textInput('Duration','*Study Duration/Description:'),
h4('Study Name:'),
verbatimTextOutput('studyTitle'),
hr(),
#tags$hr(style="height:3px;border-width:0;color:white;background-color:green"),
numericInput('nDoses','*Number of Dose Levels:',value=1,step=1,min=1),
uiOutput('Doses'),
hr(),
#tags$hr(style="height:3px;border-width:0;color:white;background-color:green"),
numericInput('nFindings','*Number of Findings:',value=1,step=1,min=1),
uiOutput('Findings'),
checkboxInput("notes", "Notes for Study?", value = FALSE),
uiOutput("study_note"),
actionButton('saveStudy_02','Save Study',icon=icon('plus-circle'))
),
hr(),
h6('* Indicates Required Fields'),
# <<<<<<< HEAD
hr(),
menuItem('Source Code',icon=icon('code'),href='https://github.com/phuse-org/phuse-scripts/blob/master/contributed/Nonclinical/R/toxSummary')
# =======
# hr(),
# menuItem('Questions/Feedback',icon=icon('envelope-square'),href = 'mailto:kevin.snyder@fda.hhs.gov')
# >>>>>>> master
)
}
} else {
sidebarMenu(id='menu',
menuItem('Data Selection',icon=icon('database'),startExpanded = T,
uiOutput('selectData'),
conditionalPanel('input.selectData=="blankData.rds"',
# <<<<<<< HEAD
textInput('newApplication','Enter Program Name:')
),
actionButton('saveData','Open New Program',icon=icon('plus-circle')),
# =======
# textInput('newApplication','Enter New Application Number:')
# ),
# actionButton('saveData','Submit',icon=icon('plus-circle')),
# >>>>>>> master
br()
),
# br(),
# uiOutput('studyName'),
# br(),
# <<<<<<< HEAD
# br(),
hr(),
menuItem('Source Code',icon=icon('code'),href='https://github.com/phuse-org/phuse-scripts/blob/master/contributed/Nonclinical/R/toxSummary')
# =======
# # br()
# hr(),
# menuItem('Questions/Feedback',icon=icon('envelope-square'),href = 'mailto:kevin.snyder@fda.hhs.gov')
# # tags$a(href='mailto:kevin.snyder@fda.hhs.gov?','Questions/Feedback')
# >>>>>>> master
)
}
})
output$renderFigure <- renderUI({
withSpinner(girafeOutput('figure',width='100%',height=paste0(100*plotHeight(),'px')))
})
}
# ui function ------
ui <- dashboardPage(
dashboardHeader(title="Nonclinical Summary Tool",titleWidth = 250),
dashboardSidebar(width = 250,
sidebarMenuOutput('menu'),
tags$head(
tags$style(
HTML(".sidebar {height: 94vh; overflow-y: auto;}")
)
)
),
dashboardBody(
useShinyjs(),
# tags$head(
# tags$link(rel = "stylesheet", type = "text/css", href = "style.css")
# ),
fluidRow(
column(2,
uiOutput('humanDosing')
),
column(2,
conditionalPanel(
'input.clinDosing != null && input.clinDosing != ""',
selectInput('SMbasis','Base Exposure Margin on:',c('HED','Cmax','AUC'))
)
),
column(4,
uiOutput('displayStudies')
),
column(4,
uiOutput('displayFindings'))
),
conditionalPanel(
condition='input.selectData!="blankData.rds" && input.clinDosing != null && input.clinDosing != ""',
tabsetPanel(
tabPanel('Figure',
fluidRow(
column(2,
actionButton('refreshPlot','Refresh Plot')),
column(3,
selectInput("NOAEL_choices", "Filter NOAEL:", choices = c("ALL", "Less than or equal to NOAEL", "Greater than NOAEL"),
selected = "ALL")),
column(3,
# <<<<<<< HEAD
# radioButtons("dose_sm", "Display Units:", choices = list("Show Dose Only"=1,
# "Show Dose with SM"= 2,
# "Notes" =3))),
# column(3,
# sliderInput("plotheight", "Adjust Plot Height:", min = 1, max = 15, value = 4))),
# =======
radioButtons("dose_sm", "Display Dose/Exposure Margin/Notes:", choices = list("Show Dose Only"=1,
"Show Dose with Exposure Margin"= 2,
"Show Notes" =3))),
column(3,
sliderInput("plotheight", "Adjust Plot Height:", min = 1, max = 15, value = 6))),
# >>>>>>> master
br(),
# <<<<<<< HEAD
# withSpinner(girafeOutput('figure')),
uiOutput('renderFigure'),
br(),
hr(style = "border-top: 1px dashed black"),
fluidRow(
column(9,
tableOutput("table_note"),
h4("Click on button below to export the table in a docx file"),
downloadButton("down_notes", "Docx file download")
))),
tabPanel("Clinical Relevance Table",
DT::dataTableOutput('table_01'),
br(),
hr(style = "border-top: 1px dashed black"),
h4("Click on button below to export the table in a docx file"),
downloadButton("down_01_doc", "Docx file download"),
br()
),
tabPanel("Key Findings Table",
DT::dataTableOutput('table_02'),
br(),
hr(style = "border-top: 1px dashed black"),
h4("Click on button below to export the table in a docx file"),
downloadButton("down_02_doc", "Docx file download"),
br()
),
tabPanel("Safety Margin Table",
DT::dataTableOutput('table_03'),
br(),
hr(style = "border-top: 1px dashed black"),
h4("Click on button below to export the table in a docx file"),
downloadButton("down_03_doc", "Docx file download"),
br()
),
tabPanel("All Table",
br(),
p("All three table can be downloaded in single docx file. Click button below to download."),
downloadButton("down_all", "Docx file download")),
tabPanel("Download Program Data",
br(),
h4("Download Program Data in RDS format:"),
br(),
p("Program Data can be downloaded in RDS format to share with others"),
uiOutput("download_rds"),
downloadButton("down_btn", "Download Program Data"),
br(),
hr(style = "border-top: 1px dashed black"),
h4("Upload Program Data in RDS format:"),
fileInput("upload_rds", "Upload", accept = c(".rds"), multiple = F))
))))
# app running function ----
shinyApp(ui = ui, server = server)
|
bcaab18c39788589c9bc155d09b632ce6d923736
|
f517f53080a1a833848b9fd3ff8cc2830a8d523c
|
/R/write_qvalue.R
|
564aa787e621188645e5b8306891f90ae624b9a2
|
[
"BSD-2-Clause"
] |
permissive
|
PNNL-Comp-Mass-Spec/Rodin
|
a2b3ddadd312dde9a00e9f03c8deb65a42293579
|
8f93bc5f9e007744d19e3d60c76973aa3e8a115e
|
refs/heads/master
| 2022-02-25T01:34:19.019930
| 2022-02-16T22:19:38
| 2022-02-16T22:19:38
| 144,644,879
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,780
|
r
|
write_qvalue.R
|
#' @title Write results to file
#' @description Write the results of the q-value object to a file.
#' @param x A q-value object.
#' @param file Output filename (optional).
#' @param sep Separation between columns.
#' @param eol Character to print at the end of each line.
#' @param na String to use when there are missing values.
#' @param row.names logical. Specify whether row names are to be printed.
#' @param col.names logical. Specify whether column names are to be printed.
#'
#' @details The output file includes: (i) p-values, (ii)
#' q-values (iii) local FDR values, and (iv) the estimate of \eqn{\pi_0}{pi_0},
#' one per line. If an FDR significance
#' level was specified in the call to \code{\link{qvalue}}, the significance
#' level is printed and an indicator of significance is included.
#'
#' @return Nothing of interest.
#'
#' @author John D. Storey, Andrew J. Bass
#' @seealso \code{\link{qvalue}}, \code{\link{plot.qvalue}},
#' \code{\link{summary.qvalue}}
#' @aliases write.qvalue
#' @keywords write.qvalue
#' @export
write.qvalue <- function(x, file = NULL, sep = " ", eol = "\n", na = "NA",
row.names = FALSE, col.names = TRUE) {
if (class(x) != "qvalue") {
stop("x must be a qvalue object.")
}
d <- data.frame(pvalue = x$pval,
qvalue = x$qval,
lfdr = x$lfdr,
pi0 = x$pi0)
if (any(names(x) == "fdr.level")) {
d$significant <- x$significant
d$fdr.level <- x$fdr.level
write.table(as.matrix(d), file = file, sep = sep, eol = eol, na = na,
row.names = row.names, col.names = col.names)
} else {
write.table(as.matrix(d), file = file, sep = sep, eol = eol, na = na,
row.names = row.names, col.names = col.names)
}
}
|
a40867301bacd56c1cffb28742cfc72df7b6ba9b
|
dc81c83a220d2ff0e11a1d33370b1c928a981c35
|
/Apple and Google/Google_playstore_analysis.R
|
9c096a9e804f8781bff423d3c3e31c4f347f1f46
|
[] |
no_license
|
visheshwar/Apple_google-EDA
|
104bc20ab219ba6c4f3b222162750ed57744c797
|
05a9b04caae9a9e503831506a72c90bbd02b9a7c
|
refs/heads/master
| 2020-07-18T18:57:16.192295
| 2019-09-04T10:46:37
| 2019-09-04T10:46:37
| 206,296,168
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,647
|
r
|
Google_playstore_analysis.R
|
library(tidyverse)
library(plyr)
library(dplyr)
library(magrittr)
gplay <- read.csv("C:/Users/Shefali Kolge/Desktop/WORK/ML/googleplaystore.csv")
View(gplay)
dim(gplay)
colnames(gplay)
names(gplay) <- make.names(names(gplay), unique=TRUE)
str(gplay)
# Data Cleaning
gplay$Last.Updated[gplay$Last.Updated=="1.0.19 April 1"]= NULL
gplay$Last.Updated <- as.character(gplay$Last.Updated)
gplay$Last.Updated <- as.Date(gplay$Last.Updated,format="%B %d, %Y",tryFormats = "%Y %m %B")
gplay$Rating <- as.numeric(gplay$Rating)
gplay$Reviews <- as.numeric(gplay$Reviews)
# For Installs
gplay$Installs <- gsub(",", "", gsub("\\.", "", gplay$Installs))
gplay$Installs <- as.character(gplay$Installs)
gplay$Installs = substr(gplay$Installs,1,nchar(gplay$Installs)-1)
gplay$Installs <- as.numeric(gplay$Installs)
# For Size
gplay$Size <- gsub(",", "", gsub("\\.", "", gplay$Size))
gplay$Size <- as.character(gplay$Size)
gplay$Size = substr(gplay$Size,1,nchar(gplay$Size)-1)
gplay$Size <- as.numeric(gplay$Size)
gplay$App <- as.character(gplay$App)
gplay$Category = as.character(gplay$Category)
gplay$Category[gplay$Category=="1.9"]<-NA
gplay$Category <- as.factor(gplay$Category)
gplay$Type = as.character(gplay$Type)
gplay$Type[gplay$Type=="0"]<-NA
gplay$Type[gplay$Type=="NaN"] <- NA
gplay$Type <- as.factor(gplay$Type)
gplay$Price <- as.character(gplay$Price)
gplay$Price = as.numeric(gsub("\\$", "", gplay$Price))
gplay$Genres = as.character(gplay$Genres)
gplay$Month <- format(gplay$Last.Updated, "%b")
gplay$Month <- factor(gplay$Month, labels=c('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'))
#No. of apps last updated each month
table(gplay$Month)
gplay<-unique(gplay)
str(gplay)
summary(gplay)
colnames(gplay)
gplay<- na.omit(gplay)
sum(is.na(gplay))
summary(gplay$Rating)
##Let us analyze the type of Apps and their average rating
cat = gplay %>% group_by(Category) %>% select(c(App, Category, Rating))
cat = as.data.frame(cat)
str(cat$Rating)
table(cat$Rating)
#Checking for NA values
sum(is.na(cat$Rating))
#list of average rating per category
list<-aggregate(cat[,3], list(cat$Category), mean,na.rm=TRUE, na.action=na.pass)
View(list)
table(gplay$Type)
#Analyzing the Type of Apps and their Average Rating
type<- gplay %>% group_by(Type) %>% select(c(Rating,Type, Installs, Category, Price))
type=as.data.frame(type)
View(type)
table(gplay$Type)
#Installations by Type and Categories
ggplot(type, aes(x=type$Type, y=type$Installs, fill=type$Type))+geom_bar(stat="identity")+labs(x="Type",y="Installs",fill="Types",title="Installations by Type of Apps")
ggplot(type, aes(x=type$Category, y=type$Installs, fill=type$Category))+ geom_bar(stat="identity")+ theme(axis.text.x = element_text(angle = 90, vjust=0.5))+ labs(title="Installations by Category of Apps",x="Categories",y="Installs",fill="Categories")
#Since Game has more installations lets see the most installed Apps under it
gp1<- subset(gplay, Category=="GAME", select = c(App, Installs, Rating))
gp1<-top_n(gp1, 10)
ggplot(gp1, aes(x=gp1$App, y=gp1$Installs, fill=gp1$Rating))+geom_bar(stat="identity")+ theme(axis.text.x = element_text(angle = 90, vjust=0.5))+ labs(title="Most installed Apps under Game Category",subtitle="(All rated above 5)",x="Apps",y="Installs",fill="Rating")
#Most Family Apps installed
gp2<- subset(gplay, Category=="FAMILY", select = c(App, Installs, Rating))
gp2<-top_n(gp2, 10)
ggplot(gp2, aes(x=gp2$App, y=gp2$Installs, fill=gp2$Rating))+geom_bar(stat="identity")+ theme(axis.text.x = element_text(angle = 90, vjust=0.5))+ labs(title="Most installed Apps under Family Category",subtitle="(All rated above 5)",x="Apps",y="Installs",fill="Rating")
#Lets see how are the App's Priced?
ggplot(gplay, aes(x=gplay$Price))+geom_density(linetype="dashed", color="Black", fill="blue")+ theme(axis.text.x = element_text(angle = 90, vjust=0.5))+labs(title="Lets see how are the App's Priced?",x="App Prices")
paid_data<- subset(gplay, Type=="Paid", select = c(App, Price, Rating,Installs))
ggplot(paid_data, aes(x=paid_data$Installs))+geom_density(color="black",fill="blue")+labs(title="Installation density of Paid Apps",x="Installs")
#Let's Analyze content rating and Installs
ggplot(gplay, aes(x=gplay$Content.Rating, y=gplay$Installs, fill=gplay$Installs))+geom_bar(stat="identity")+labs(title="Content Rating and Installs",subtitle= "Analysis",x="Content Rating",y="Installs",fill="Installs")
# check what category has the highest rating
ggplot(gplay, aes(x=Rating, y=Category)) +
geom_segment(aes(yend=Category), xend=0, colour="grey50") +
geom_point(size=1, aes(colour=Type)) +
scale_colour_brewer(palette="Set1", limits=c("Free", "Paid"), guide=FALSE) +
theme_bw() +
theme(panel.grid.major.y = element_blank()) +
facet_grid(Type ~ ., scales="free_y", space="free_y") +
ggtitle("Checking which Category has the highest Rating between Free and Paid Apps")
#Ratings
ggplot(gplay, aes(x= Category, y= Rating, fill = Type)) +
geom_bar(position='dodge',stat='identity') +
coord_flip() +
ggtitle("Number Of App Ratings Based On Category and Type")
#Reviews
ggplot(gplay, aes(x= Category, y= Reviews, fill = Type)) +
geom_bar(position='dodge',stat='identity') +
coord_flip() +
ggtitle("Number Of App Reviews Based On Category and Type")
#Installs
ggplot(gplay, aes(x= Category, y= Installs, fill = Type)) +
geom_bar(position='dodge',stat='identity') +
coord_flip() +
ggtitle("Number Of App Installs Based On Category and Type")
|
eec5da5b9e237a8cb3ab73a5a4593c69417f541b
|
ce5e30194b3fa3aec3982e87bbf21f0e91891c6e
|
/HW1.R
|
291fbfffb2a73d440f5218c810a83e3940f3d563
|
[] |
no_license
|
Jonathonsun03/Home-work-1
|
8806a19e9bf9e1fe5efdddf33f73bfc03a8d0687
|
adb3edf0a97c0fb1e8ed3553098564293148cf3a
|
refs/heads/main
| 2023-02-27T20:10:49.502904
| 2021-02-03T23:37:17
| 2021-02-03T23:37:17
| 335,784,772
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,968
|
r
|
HW1.R
|
library(tidyverse)
library(ggplot2)
library(esquisse)
library(plotly)
library(formattable)
#Case Study 1 ------------------------------
dataset <- read.csv("E:/Documents/Penn-One-Drive/OneDrive - PennO365/2020-2021/SP/Datamining/Home work 1/data/Survey_results_final.csv",
na.strings = c(""))
head(dataset)
Variables <- as.tibble(colnames(dataset))
View(Variables)
datasetClean <- dataset %>%
select(starts_with("answer.") | contains("worktime")) %>%
rename(Age = Answer.Age,
Education = Answer.Education,
Gender = Answer.Gender,
Income = Answer.HouseHoldIncome,
Sirius = Answer.Sirius.Radio,
Wharton = Answer.Wharton.Radio,
Worktime = WorkTimeInSeconds)
summary(datasetClean)
#Age cleaning -----------------------------
AgeUnique <- unique(datasetClean$Age)
x <- which(grepl(AgeUnique[53], datasetClean$Age))
datasetClean$Age[x] <- NA
x <- which(grepl('Eighteen', datasetClean$Age))
datasetClean$Age[x] <- 18
x <- which(grepl(AgeUnique[58], datasetClean$Age))
datasetClean$Age[x] <- 27
x <- which(grepl(AgeUnique[51], datasetClean$Age))
datasetClean$Age[x] <- 27
unique(datasetClean$Age)
datasetClean <- datasetClean %>%
filter(grepl("[[:digit:]]",Age))
datasetClean$Age <- as.numeric(datasetClean$Age)
summary(datasetClean)
#Education level Factor -----------------------
datasetClean$Education <- str_remove(datasetClean$Education, "’")
EducationUnique <- unique(datasetClean$Education)
EducationUnique <- as.tibble(EducationUnique) %>%
filter(!grepl("select one", value)) #This is a description line that we don't need
EducationUnique$Level <- c(3,5,4,2,1,6)
EducationUnique <- EducationUnique %>%
arrange(Level)
datasetClean <- datasetClean %>%
filter(Education %in% EducationUnique$value)
datasetClean$Education <- factor(datasetClean$Education, levels = EducationUnique$value)
#Gender clean -----------------------
#Income Level Factor ------------------------
IncomeUnique <- unique(na.omit(datasetClean$Income))
IncomeUnique <- as.tibble(IncomeUnique)
IncomeUnique$Level <- c("3","2","4","6","1","5")
IncomeUnique <- IncomeUnique %>%
arrange(Level)
datasetClean <- datasetClean %>%
filter(Income %in% IncomeUnique$value)
datasetClean$Income <- factor(datasetClean$Income, levels = IncomeUnique$value)
#worktime clean -------------------------
datasetClean$Worktime <- as.numeric(datasetClean$Worktime)
#Shared Audience
Shared <- datasetClean %>%
filter(Sirius == "Yes" & Wharton == "Yes")
#Summary Statistics ---------------------------------
x <- na.omit(datasetClean) %>%
group_by(Education) %>%
summarize(Frequency = n(),
Mean_Age = mean(Age),
Median_Age = median(Age),
Mean_Worktime = mean(Worktime),
Median_Worktime = median(Worktime))
formattable(x, align = c("l","c","c","c","c"),
list('Frequency' = color_tile("white", "red")))
x <- na.omit(datasetClean) %>%
group_by(Gender) %>%
summarize(Frequency = n(),
Mean_Age = mean(Age),
Median_Age = median(Age),
Mean_Worktime = mean(Worktime),
Median_Worktime = median(Worktime))
x <- formattable(x, align = c("l","c","c","c","c"),
list('Frequency' = color_tile("white", "red")))
na.omit(datasetClean) %>%
group_by(Income) %>%
summarize(Frequency = n(),
Mean_Age = mean(Age),
Median_Age = median(Age),
Mean_Worktime = mean(Worktime),
Median_Worktime = median(Worktime))
formattable(x, align = c("l","c","c","c","c"),
list('Frequency' = color_tile("white", "red")))
#Charts Listeners -----------------------
ggplot(Shared) +
aes(x = Income, y = Age, fill = Gender) +
geom_boxplot() +
scale_fill_hue() +
theme_minimal() +
facet_wrap(vars(Education)) +
theme(strip.text.x = element_text(
size = 18, face = "bold"),
text = element_text(size = 18))
ggplot(datasetClean) +
aes(x = Age, fill = Income) +
geom_histogram(bins = 30L) +
scale_fill_hue() +
theme_minimal() +
facet_wrap(vars(Gender))
ggplot(datasetClean) +
aes(x = Age, fill = Income) +
geom_histogram(bins = 30L) +
scale_fill_hue() +
labs(title = "Age by Income") +
theme_minimal() +
facet_wrap(vars(Gender))
ggplot(datasetClean) +
aes(x = Income, y = Age, fill = Gender) +
geom_boxplot() +
scale_fill_hue() +
theme_minimal()
f <- datasetClean %>%
filter(Sirius == "Yes" & Wharton == "Yes")
#Wharton Yes Sirius No
datasetClean %>%
filter(Sirius == "No" & Wharton == "Yes") %>%
ggplot() +
aes(x = Education, y = Age, fill = Income) +
geom_boxplot() +
scale_fill_hue() +
theme_minimal() +
facet_wrap(vars(Gender))
#wharton No sirius Yes
datasetClean %>%
filter(Sirius == "Yes" & Wharton == "No") %>%
ggplot() +
aes(x = Education, y = Age, fill = Income) +
geom_boxplot() +
scale_fill_hue() +
theme_minimal() +
facet_wrap(vars(Gender))
#Case Study 2 ----------------------------
library(readxl)
Casestudy2 <- read_excel("E:/Documents/Penn-One-Drive/OneDrive - PennO365/2020-2021/SP/Datamining/Home work 1/data/WomenData_06_16.xlsx")
head(Casestudy2)
Casestudy2 <- Casestudy2 %>%
rename("Field" = "Field and sex",
"Number" = "Degrees Awarded")
Casestudy2 <- Casestudy2 %>%
mutate('S&E' = Casestudy2$Field == 'Non-S&E')
Casestudy2 <- Casestudy2 %>%
mutate('S&E' = as.character(Casestudy2$`S&E`))
Casestudy2$`S&E` <- str_replace(Casestudy2$`S&E`, 'FALSE', 'S&E')
Casestudy2$`S&E`<- str_replace(Casestudy2$`S&E`, 'TRUE', 'Non-S&E')
Casestudy2$Degree <- factor(Casestudy2$Degree)
Casestudy2$Year <- factor(Casestudy2$Year)
Casestudy2%>%
filter(Year == 2015) %>%
filter(Sex == "Male") %>%
group_by(`S&E`)%>%
summarize(Total = sum(Number))
Casestudy2%>%
filter(Year == 2015) %>%
filter(Sex == "Male")
Casestudy2%>%
group_by(Year)%>%
summarize(Total = sum(Number))
Casestudy2%>%
group_by(Sex, Field)%>%
summarize(Total = sum(Number))
x <- Casestudy2 %>%
pivot_wider(names_from = Sex, values_from = "Number") %>%
group_by(Field) %>%
summarize(Male = sum(Male),
Female = sum(Female))
x <- x %>%
mutate(More = x$Male > x$Female)
x$More <- as.character(x$More)
x$More <- str_replace(x$More, 'FALSE', 'Female')
x$More <- str_replace(x$More, 'TRUE','Male')
#Case Study 2 [3.2] --------------------------
ggplot(Casestudy2%>%
filter(Year == 2015) %>%
filter(Sex == "Male")) +
aes(x = `S&E`, fill = Field, weight = Number) +
geom_bar() +
scale_fill_brewer(palette = "RdGy") +
labs(title = "Non-S&E compared to S&E", subtitle = "Males in sciences related fields in 2015") +
ggthemes::theme_economist() +
facet_wrap(vars(Degree))
ggplot(Casestudy2%>%
filter(Year == 2015)) +
aes(x = `S&E`, fill = Sex, weight = Number) +
geom_bar() +
scale_fill_brewer(palette = "RdGy") +
labs(title = "Non-S&E compared to S&E", subtitle = "Males in sciences related fields in 2015") +
ggthemes::theme_economist() +
facet_wrap(vars(Degree))
ggplot(Casestudy2%>%
filter(Year == 2015)) +
aes(x = `S&E`, fill = Degree, weight = Number) +
geom_bar() +
scale_fill_brewer(palette = "RdGy") +
labs(title = "Non-S&E compared to S&E", subtitle = "Males in sciences related fields in 2015") +
ggthemes::theme_economist() +
facet_wrap(vars(Sex))
Casestudy2 %>%
filter(Field %in% c("Computer sciences", "Mathematics and statistics")) %>%
ggplot() +
aes(x = Field, y = Number, fill = Sex) +
geom_boxplot() +
scale_fill_hue() +
scale_fill_brewer(palette = "RdGy") +
coord_flip() +
ggthemes::theme_economist()
#Non-S&E
Casestudy2 %>%
filter(Sex == 'Male') %>%
group_by(`S&E`) %>%
summarize(Frequency = sum(Number))
Casestudy2 %>%
filter(Sex == 'Male') %>%
filter(!Field == 'Non-S&E') %>%
group_by(Sex) %>%
summarize('Total S&E' = sum(Number))
#Case Study 2 [3.3] ---------------------------
library(ggplot2)
ggplot(Casestudy2) +
aes(x = Field, y = Number, fill = Degree) +
geom_boxplot() +
scale_fill_hue() +
theme_minimal() +
facet_grid(vars(Sex), vars())
theme_minimal()
#Case Study 3 ---------------------------
Casestudy3long <- read.csv("E:/Documents/Penn-One-Drive/OneDrive - PennO365/2020-2021/SP/Datamining/Home work 1/data/baseball.csv",
na.strings = c(""))
Casestudy3wide <- read.csv("E:/Documents/Penn-One-Drive/OneDrive - PennO365/2020-2021/SP/Datamining/Home work 1/data/MLPayData_Total.csv",
na.strings = c(""))
head(Casestudy3wide)
#Case Study 3 4.1 ---------------------------------
Casestudy3 <- Casestudy3wide %>%
select(Team.name.2014,p2013,p2012) %>%
mutate(Diff = p2013 - p2012,
Log_Diff = log(p2013) - log(p2012))
Casestudy3a <- Casestudy3wide %>%
select(Team.name.2014,X2013.pct,X2012.pct) %>%
pivot_longer(cols = starts_with("X"), names_to = "Year") %>%
rename(Win_percent = value)
Casestudy3a$Year <- str_remove(Casestudy3a$Year, "X")
Casestudy3a$Year <- str_remove(Casestudy3a$Year, ".pct")
Casestudy3 <- pivot_longer(Casestudy3, cols = starts_with("p"), names_to = "Year")
Casestudy3 <- Casestudy3 %>%
rename(Payroll = value)
Casestudy3$Year <- str_remove(Casestudy3a$Year, "p")
left_join(Casestudy3,Casestudy3a, by = "Year") %>%
select(!Team.name.2014.y)
# THE CODE FROM CLASS -----------------------
PayrollKey <- Casestudy3wide %>% # first create variable: payroll and year
select(Team.name.2014, p1998:p2014) %>%
mutate(`Diff1998-1999` = p1999 - p1998,
`Diff1999-2000` = p2000 - p1999,
`Diff2001-2002` = p2001 - p2000,
`Diff2002-2003` = p2003 - p2002,
`Diff2003-2004` = p2004 - p2003,
`Diff2005-2004` = p2005 - p2004,
`Diff2006-2005` = p2006 - p2005,
`Diff2007-2006` = p2007 - p2006,
`Diff2008-2007` = p2008 - p2007,
`Diff2009-2008` = p2009 - p2008,
`Diff2010-2009` = p2010 - p2009,
`Diff2011-2010` = p2011 - p2010,
`Diff2012-2011` = p2012 - p2011,
`Diff2013-2012` = p2013 - p2012,
`Diff2014-2013` = p2014 - p2013)
payrollDiff <- Casestudy3wide %>% # first create variable: payroll and year
select(Team.name.2014, p1998:p2014) %>%
mutate(Diff1998 = p1999 - p1998,
Diff1999 = p2000 - p1999,
Diff2001 = p2001 - p2000,
Diff2002 = p2003 - p2002,
Diff2003 = p2004 - p2003,
Diff2005 = p2005 - p2004,
Diff2006 = p2006 - p2005,
Diff2007 = p2007 - p2006,
Diff2008 = p2008 - p2007,
Diff2009 = p2009 - p2008,
Diff2010 = p2010 - p2009,
Diff2011 = p2011 - p2010,
Diff2012 = p2012 - p2011,
Diff2013 = p2013 - p2012,
Diff2014 = p2014 - p2013)
payrollDiff <- payrollDiff %>%
select(Team.name.2014, Diff1998:Diff2014) %>%
pivot_longer(cols = starts_with("Diff"),
names_to = "year",
names_prefix = "Diff",
values_to = "Difference")
logpayrollDiff <- Casestudy3wide %>% # first create variable: payroll and year
select(Team.name.2014, p1998:p2014) %>%
mutate(Diff1998 = log(p1999) - log(p1998),
Diff1999 = log(p2000) - log(p1999),
Diff2001 = log(p2001) - log(p2000),
Diff2002 = log(p2003) - log(p2002),
Diff2003 = log(p2004) - log(p2003),
Diff2005 = log(p2005) - log(p2004),
Diff2006 = log(p2006) - log(p2005),
Diff2007 = log(p2007) - log(p2006),
Diff2008 = log(p2008) - log(p2007),
Diff2009 = log(p2009) - log(p2008),
Diff2010 = log(p2010) - log(p2009),
Diff2011 = log(p2011) - log(p2010),
Diff2012 = log(p2012) - log(p2011),
Diff2013 = log(p2013) - log(p2012),
Diff2014 = log(p2014) - log(p2013))
logpayrollDiff <- logpayrollDiff %>%
select(Team.name.2014, Diff1998:Diff2014) %>%
pivot_longer(cols = starts_with("Diff"),
names_to = "year",
names_prefix = "Diff",
values_to = "Log_Difference")
payroll <- Casestudy3wide %>% # first create variable: payroll and year
select(Team.name.2014, p1998:p2014) %>%
pivot_longer(cols = starts_with("p"),
names_to = "year",
names_prefix = "p",
values_to = "payroll")
payroll[1:3, 1:3] # show a few rows
win_num <- Casestudy3wide %>% # create variable: win_num and year
select(Team.name.2014, X1998:X2014) %>%
pivot_longer(cols = X1998:X2014,
names_to = "year",
names_prefix = "X",
values_to = "win_num")
#win_num[1:3, 1:3]
win_pct <- Casestudy3wide %>% # create variable: win_pct and year
select(Team.name.2014, X1998.pct:X2014.pct) %>%
pivot_longer(cols = X1998.pct:X2014.pct,
names_to = "year",
names_prefix = "X",
values_to = "win_pct") %>%
mutate(year = substr(year, 1, 4))
#win_pct[1:3, 1:3]
# join tables into team, year, payrow, win_num, win_pct
Casestudy3wide_long <- payroll %>%
inner_join(win_num, by = c("Team.name.2014", "year")) %>%
inner_join(win_pct, by = c("Team.name.2014", "year")) %>%
inner_join(payrollDiff, by = c("Team.name.2014", "year")) %>%
inner_join(logpayrollDiff, by = c("Team.name.2014", "year")) %>%
rename(Payrole_Diff = Difference,
Log_Payrole_Diff = Log_Difference)
head(Casestudy3wide_long, 2) # see first 2 rows
#Summary Statistics ---------------------
Casestudy3wide_long%>%
filter(year == 2010:2014) %>%
group_by(Team.name.2014) %>%
summarize(Change_Payroll = sum(Log_Payrole_Diff),
Wins = sum(win_num),
Wins_Pct = mean(win_pct)) %>%
arrange(desc(Change_Payroll))
#PLOTS --------------------------
f <- Casestudy3wide_long %>%
ggplot(aes(x = year, y = Log_Payrole_Diff, group = Team.name.2014, col = Team.name.2014)) +
geom_line() +
geom_point() +
theme_bw()
ggplotly(f +
theme(legend.position = "none"))
|
3a8f89f189ab934d4b56cdd3890ff02b60faffff
|
742fe8b884db01ac7b293b87485fe69e5d40ba02
|
/man/SplitLoss.Rd
|
be90907eb62984596322726434fd9e801436a285
|
[] |
no_license
|
lorenzha/hdcd
|
47d3f8557ea3f2b6b6fbbc892592662d4d6436a1
|
23be900a3414c3e3e25865df00ceb5279a2637fb
|
refs/heads/master
| 2021-07-03T02:59:52.456658
| 2018-09-01T15:21:20
| 2018-09-01T15:21:20
| 111,222,046
| 0
| 3
| null | 2018-12-22T18:00:17
| 2017-11-18T16:47:41
|
R
|
UTF-8
|
R
| false
| true
| 698
|
rd
|
SplitLoss.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loss_funs.R
\name{SplitLoss}
\alias{SplitLoss}
\title{SplitLoss}
\usage{
SplitLoss(x, split_point, SegmentLossFUN, start, end)
}
\arguments{
\item{x}{A n times p matrix or data frame.}
\item{split_point}{Index on which to split the segment}
\item{SegmentLossFUN}{A loss function as created by closure \code{\link{SegmentLoss}}.}
\item{start}{The start index of the given segment \code{x}.}
\item{end}{The end index of the given segment \code{x}.}
}
\value{
Sum of the loss for both new segments after split.
}
\description{
Caluclates the sum of the loss that results from splitting the segment at the given point
}
|
aff618dbf69ef6dd8804bce512b317131b09ec2f
|
fe905f8afb90a22c5efb31680cc25db4f9a5f5e7
|
/R/module_overview_table.R
|
6aa87d9f717d8128a88735f26e3e3d02926adb34
|
[] |
no_license
|
voutcn/pavian
|
cb7ecae6bb6bbd78ae45412e05ae17b6f4bd4243
|
614ce7062f596f9252935bc225c8a552f731561c
|
refs/heads/master
| 2020-07-13T18:31:46.059286
| 2016-11-16T05:13:44
| 2016-11-16T05:13:44
| 73,886,274
| 1
| 0
| null | 2016-11-16T05:10:15
| 2016-11-16T05:10:14
| null |
UTF-8
|
R
| false
| false
| 5,033
|
r
|
module_overview_table.R
|
#' UI part of report overview module
#'
#' @param id Shiny namespace id.
#'
#' @return UI elements for report overview module.
#' @export
#' @import shiny
reportOverviewModuleUI <- function(id) {
ns <- shiny::NS(id)
shiny::tagList(
checkboxInput(ns("opt_samples_overview_percent"), label = "Show percentages instead of number of reads", value = TRUE),
div(style = 'overflow-x: scroll',
DT::dataTableOutput(ns('dt_samples_overview')))
)
}
#' Shiny modules to display an overview of metagenomics reports
#'
#' @param input Shiny input object.
#' @param output Shiyn output object.
#' @param session Shiny session object.
#' @param sample_data Samples \code{data.frame}.
#' @param reports List of reports.
#' @param datatable_opts Additional options for datatable.
#'
#' @return Report overview module server functionality.
#' @export
#' @import shiny
reportOverviewModule <- function(input, output, session, sample_data, reports, datatable_opts = NULL) {
#r_state <- list()
observeEvent(input$opt_samples_overview_percent, {
## save state of table
#r_state <<- list(
# search_columns = input$dt_samples_overview_search_columns,
# state = input$dt_samples_overview_state
# )
# utils::str(input$dt_samples_overview_state)
})
get_samples_summary <- reactive( {
validate(need(sample_data(), message = "No data available."))
validate(need(reports(), message = "No data available."))
withProgress({
## Create summaries of all reports
#str(reports())
samples_summary <- do.call(rbind, lapply(reports(), summarize_report))
samples_summary$Name <- rownames(samples_summary)
#samples_summary$FileName <- sample_data()[,"ReportFile"]
extra_cols <- c("Name")
samples_summary <- samples_summary[,c(extra_cols, setdiff(colnames(samples_summary),extra_cols))]
colnames(samples_summary) <- beautify_string(colnames(samples_summary))
samples_summary
}, message = "Summarizing sample contents ... ")
})
## Samples overview output
output$dt_samples_overview <- DT::renderDataTable({
samples_summary <- get_samples_summary()
start_color_bar_at <- 2 ## length of extra_cols + 1
number_range <- c(0, max(samples_summary[, start_color_bar_at], na.rm = TRUE))
if (isTRUE(input$opt_samples_overview_percent)) {
## add a custom renderer.
start_color_bar_at <- start_color_bar_at + 1
number_range <- c(0, 100)
samples_summary[, start_color_bar_at:ncol(samples_summary)] <-
100 * signif(sweep(samples_summary[, start_color_bar_at:ncol(samples_summary)], 1, samples_summary[, start_color_bar_at], `/`), 4)
## TODO: Define columnDefs and give read counts on mouse-over
}
styleColorBar2 = function(data, color, angle=90) {
rg = range(data, na.rm = TRUE, finite = TRUE)
r1 = rg[1]; r2 = rg[2]; r = r2 - r1
htmlwidgets::JS(sprintf(
"isNaN(parseFloat(value)) || value <= %s ? '' : 'linear-gradient(%sdeg, transparent ' + (%s - value)/%s * 100 + '%%, %s ' + (%s - value)/%s * 100 + '%%)'",
r1, angle, r2, r, color, r2, r
))
}
microbial_col <- start_color_bar_at + 5
dt <- DT::datatable(
samples_summary
, rownames = FALSE
, selection = 'single'
,extensions = c('Buttons')
, options = list(
dom = 'Bfrtip'
, buttons = c('pageLength','pdf', 'excel' , 'csv', 'copy', 'colvis')
, lengthMenu = list(c(10, 25, 100, -1), c('10', '25', '100', 'All'))
, pageLength = 25
, options = c(datatable_opts, list(stateSave = TRUE))
)
) %>%
DT::formatStyle(
colnames(samples_summary)[seq(from=start_color_bar_at, to=microbial_col-1)],
background = styleColorBar2(number_range, 'lightblue')
) %>%
DT::formatStyle(colnames(samples_summary)[seq(from=microbial_col,to=ncol(samples_summary))],
background = DT::styleColorBar(c(0, max(
samples_summary[, microbial_col], na.rm = TRUE
)), 'lightgreen'))
#formatString <- function(table, columns, before="", after="") {
# DT:::formatColumns(table, columns, function(col, before, after)
# sprintf("$(this.api().cell(row, %s).node()).html((%s + data[%d]) + %s); ",col, before, col, after),
# before, after
# )
#}
if (isTRUE(input$opt_samples_overview_percent)) {
dt <- dt %>%
DT::formatCurrency(start_color_bar_at - 1, currency = '', digits = 0) %>%
DT::formatString(seq(from=start_color_bar_at, to=ncol(samples_summary)),
suffix = '%') ## TODO: display as percent
# ## not implemented for now as formatPercentage enforces a certain number of digits, but I like to round
# ## with signif.
} else {
dt <-
dt %>% DT::formatCurrency(seq(from=start_color_bar_at, to=ncol(samples_summary)),
currency = '',
digits = 0)
}
dt
})
}
|
f92a05d7cdd0921b033e014958cc5ec32d60cf7e
|
f544305bed485204ab990f54775d7250224bd73d
|
/Station_Flux_Data/Recalculate_Hourly_Flux.R
|
fd8dd5929ed09704764299f98369c9006b6c776b
|
[] |
no_license
|
mikeramsey21/Nice-Ride-Routing-Update
|
4687c632c19cab89771d70dff5d458798da7a492
|
c12e4a438c64b44790c8781806c0bf989989d187
|
refs/heads/master
| 2020-03-28T21:40:37.012185
| 2018-09-17T19:41:38
| 2018-09-17T19:41:38
| 149,173,651
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,232
|
r
|
Recalculate_Hourly_Flux.R
|
#############################################
# The Fundamental Group
# Date Created: 6/15/18
# Last Updated: 9/14/18
# Joint work completed by:
# Michael E. Ramsey, University of Colorado, Boulder
# Eric Roberts, University of California, Merced
# Olivia Cannon, University of Minnesota, Twin Cities
# Ariel Bowman, University of Texas at Arlington
# Elizabeth Wicks, University of Washington
# Sheng Zhang, Purdue University
#############################################
# This is an R script to reformulate the data present in "stationFluxAvgHour17.csv"
# This csv file contains the calculated average hourly flux for each station for
# each day of the week. The flux is defined as the number of rides out of the
# station minus the number of rides coming into the station.
# In it's current form, this R-script recalculates the flux to be viewed in the
# form of morning, afternoon, and evening. This corresponding to new routes being
# made 3 times a day for each shuttler.
####### Workspace Items #######
# data: The original average hourly flux calculation for each day of the week
# monday: The updated flux calculation by Morning, Afternoon, and Evening
#############################################
# Load necessary packages
library(tidyr)
library(dplyr)
# Load the data
data <- read.csv("Station_Flux_Data/Average_Hourly_Flux_17.csv",row.names = 1)
# Define new column for morning, afternnoon, and evening
data <- data %>%
mutate(timezone = ifelse(hour >= 3 & hour < 11, "M",
ifelse(hour >= 11 & hour < 19, "A", "E")))
# Add up the fluxes for each time period
data <- data %>%
group_by(Stationnumber,weekday, timezone) %>%
summarise(RidesOut = sum(RidesOut),
RidesIn = sum(RidesIn),
Rideoutminusin = sum(Rideoutminusin))
# Round the flux column
data$Rideoutminusin <- round(data$Rideoutminusin)
# Reformat the data frame:
Monday <- data %>%
filter(weekday == "Mon") %>%
select(Stationnumber,timezone,Rideoutminusin) %>%
spread(timezone,Rideoutminusin)
# Replace NAs with zeros
Monday[is.na(Monday)] <- 0
# Write to a csv
#write.csv(Monday, "Average_Hourly_Flux_17_Mon_MAE.csv")
|
3e7045fa7f469d64d89e608e51dadbc342ab389b
|
bd0ff0492d12431e88ae8b1d4a471c15ab26e76b
|
/man/load.menus.Rd
|
2907a3d307a60b90dd60ccff51f668f4c8727380
|
[] |
no_license
|
cran/CADStat
|
b01fe3c32c4e7ab24a592c744e259e6ed67aa6d4
|
2c8110fb89a8b7c9706359c00d12ccae399f276f
|
refs/heads/master
| 2021-01-18T14:20:35.989297
| 2017-05-10T01:09:22
| 2017-05-10T01:09:22
| 17,717,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 114
|
rd
|
load.menus.Rd
|
\name{load.menus}
\alias{load.menus}
\title{Load new CADStat menus}
\description{
Internal function
}
|
dfc47cabde20f1cb10b9a2f1f74fce1fad4acbb5
|
2891723cf9b7af5bca36338a768faeab0f974a09
|
/analysis_for_AlleleHMM_manuscript/Performance_test_with_GRO-seq_data/Compare_allele-specificity_between_gene_AlleleHMM_AlleleDB.R
|
4315520bace0f6e50916c38ee65326e5b526a976
|
[
"BSD-2-Clause"
] |
permissive
|
Danko-Lab/AlleleHMM
|
66dcb04df4edbc3680407c743b05828fb730c716
|
03c3ac6d50d81cf9329fa1ac2bf0e7b89b39ba79
|
refs/heads/master
| 2022-07-14T07:18:13.070456
| 2019-10-09T14:57:05
| 2019-10-09T14:57:05
| 144,310,749
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,205
|
r
|
Compare_allele-specificity_between_gene_AlleleHMM_AlleleDB.R
|
#Fig4 D,E, Sup_fig7
#Figure4D
pdf("fig4D.pdf")
par(mfrow=c(1,3))
# Simple Pie Chart
lbls <- c("Concordant", "Discordant", "Symmetric")
H <- c( 7957, 399, 2250)
A <- c( 5274, 512, 1993)
D <- c( 15926, 7681, 34356)
#par(mfrow=c(1,3))
pie(H, labels = lbls, main="H")
pie(A, labels = lbls, main="A")
pie(D, labels = lbls, main="D")
dev.off()
#Sup_fig7
## read counts of SNPs in H (AlleleHMM, not AlleleDB), A (AlleleHMM and AlleleDB), and D (AlleleDB, not AlleleHMM)
pdf("sup_fig7.pdf")
H_Con=read.table("H_Concordant_counts.txt")
H_Dis=read.table("H_Discordant_counts.txt")
H_Sym=read.table("H_Symmetric_counts.txt")
A_Con=read.table("A_Concordant_counts.txt")
A_Dis=read.table("A_Discordant_counts.txt")
A_Sym=read.table("A_Symmetric_counts.txt")
D_Con=read.table("D_Concordant_counts.txt")
D_Dis=read.table("D_Discordant_counts.txt")
D_Sym=read.table("D_Symmetric_counts.txt")
H=c(H_Con$V1, H_Dis$V1, H_Sym$V1)
A=c(A_Con$V1, A_Dis$V1, A_Sym$V1)
D=c(D_Con$V1, D_Dis$V1, D_Sym$V1)
u=100
H[H>=u]=u
A[A>=u]=u
D[D>=u]=u
par(mfrow=c(3,1))
b=1
hist(H, freq=F, col="red", breaks = seq(1,u+b,b), xlab="Read counts per SNP", ylab="fraction of SNPs" )
hist(A, freq=F, col="purple", breaks = seq(1,u+b,b), xlab="Read counts per SNP", ylab="fraction of SNPs")
hist(D, freq=F, col="blue", breaks = seq(1,u+b,b), xlab="Read counts per SNP", ylab="fraction of SNPs")
dev.off()
#Fig4E
pdf("Fig4E.pdf")
A=read.table("interestingHets_AlleleDB_in_AlleleHMM_MP_switch_counts.txt")
D=read.table("interestingHets_AlleleDB_out_AlleleHMM_MP_switch_counts.txt")
H=read.table("counts_noX_MinCount1_inAlleleHMM_t1e-05_interestingHets_outAlleleDB_switch_counts.txt")
xmax = max(c(A$V1, D$V1, H$V1))
par(mfrow=c(3,1))
par(cex.lab=2.2, cex.axis=2.2)
u=10
H$V1[H$V1>=u]=u
A$V1[A$V1>=u]=u
D$V1[D$V1>=u]=u
hist(H$V1-1, breaks = seq(-0.5,xmax,1), freq=F,col="red", xlim=c(0,10), ylim=c(0,1), main=NA, xlab="number of switches")
hist(A$V1-1, breaks = seq(-0.5,xmax,1), freq=F,col="purple", xlim=c(0,10), ylim=c(0,1), main=NA, xlab="number of switches") #,density=20,angle=180
hist(D$V1-1, breaks = seq(-0.5,xmax,1), freq=F,col="blue", xlim=c(0,10), ylim=c(0,1), main=NA, xlab="number of switches") #,density=20,angle=45
dev.off()
|
8cf04b58b720e788b919caaa42f17a68ce934a1e
|
d102b1c77e63801d3d5d401ac15f22ae46b394e4
|
/agrpfunc.R
|
852f331d8c480c97c85346da700306e430e0101e
|
[] |
no_license
|
hqwang126/aGRP
|
ff52d7cbb27d7e53ffaaa5639407e3a724a8862c
|
93f2781d8080c4e5ca6662f61441a25da2ba2b76
|
refs/heads/master
| 2020-03-31T17:47:10.073879
| 2018-10-11T05:11:25
| 2018-10-11T05:11:25
| 152,434,114
| 1
| 0
| null | null | null | null |
ISO-8859-9
|
R
| false
| false
| 904
|
r
|
agrpfunc.R
|
##usage£º
##input:d1-expression matrix (Genes*Samples) of cancer class£¬d2-expression matrix of normal tissues
##output: a list containing GRP statistics and its p-values for each gene
agrpfunc<-function(d1,d2){
if(class(d1)=="numeric") d1=matrix(d1,nrow=1);
if(class(d2)=="numeric") d2=matrix(d2,nrow=1);
up1=apply(d1,2,function(x){rowSums(x>=d2)/ncol(d2)});
up2=apply(d2,2,function(x){rowSums(x<=d1)/ncol(d1)});
pu=(ncol(d1)*rowMeans(up1)+ncol(d2)*rowMeans(up2))/(ncol(d1)+ncol(d2))
down1=apply(d1,2,function(x){rowSums(x<=d2)/ncol(d2)});
down2=apply(d2,2,function(x){rowSums(x>d1)/ncol(d1)});
pd=(ncol(d1)*rowMeans(down1)+ncol(d2)*rowMeans(down2))/(ncol(d1)+ncol(d2));
pud=pu-pd;
n=ncol(d1);
m=ncol(d2);
p.value <- 2 * (1 - pnorm(abs(pud), 0,
((n^2+m^2)/(2*n*m*(n+m))/0.75)^(1/2)));
GRP <- list( pud = pud, p.value = p.value);
return(GRP);
}
|
cc7d45f6fba62eacad795236f1e8ddbc48bf33c2
|
9982a8150a01cc0214dc05d36960e12b252457a7
|
/data_flow/a_import/code/merge_all.R
|
0a17ee96569b6d1381e957b060c849d922ec0030
|
[] |
no_license
|
JeffreyUslan/PrototypicalProject
|
7eaf13fe036808833ffad211fcd40c6b7036cd9b
|
71d8f0271c802fe90a0b81e6db382dae5184a470
|
refs/heads/master
| 2021-01-19T05:25:53.869721
| 2015-07-06T17:39:52
| 2015-07-06T17:39:52
| 34,831,099
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
merge_all.R
|
file=merge(file001,file003,by="time.UTC.",all=TRUE)
file=merge(file,file250,by="time.UTC.",all=TRUE)
|
ae1d86ece8560e7b66678903a63f6a01ee370aa0
|
158af21f249555f32e12889c7344f06042120748
|
/man/join_expression.Rd
|
1340c0506512a4e495a9d7884bb735cb5b08f16e
|
[
"MIT"
] |
permissive
|
RubD/Giotto
|
cd441655913246190f5097d4fbff869a9e8d7d0a
|
3e6671a2512484a7b90b421b7e697d1abc2ec760
|
refs/heads/master
| 2023-09-01T06:21:39.024110
| 2023-04-19T14:34:40
| 2023-04-19T14:34:40
| 547,482,695
| 8
| 5
|
MIT
| 2023-04-04T17:56:36
| 2022-10-07T19:03:41
|
R
|
UTF-8
|
R
| false
| true
| 482
|
rd
|
join_expression.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_registration.R
\name{join_expression}
\alias{join_expression}
\title{join_expression}
\usage{
join_expression(expression_list, z_vals)
}
\arguments{
\item{expression_list}{list of expression values to merge}
\item{z_vals}{z values to use z stacking expression values}
}
\description{
joins expression list together while appending z value to cellIDs to ensure unique colnames
}
\keyword{internal}
|
c0280759dbf88873d1557be05182b6efd588ee0c
|
66cf4adc85683e73290da2139c071365b40b6209
|
/R/addPhantomSummary.R
|
b0444feee7028fa0deab51937dad289bd8257a41
|
[
"MIT"
] |
permissive
|
dpelegri/EASIER
|
db51d623eae85300b4b63f30f97ac93c9676a004
|
ce9101b990874c13f6d8564867fdb3cbdc5a7841
|
refs/heads/main
| 2023-08-21T21:33:43.968027
| 2021-10-22T15:28:07
| 2021-10-22T15:28:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 624
|
r
|
addPhantomSummary.R
|
#' Add phantom summary
#'
#' Add summary from 'Phantom' annotated column in data
#'
#' @param data dataframe used to create columns
#'
#' @return Original dataframe with Phantom_S field data split in multiple column (TSS200, TSS1500, 5'UTR, 1stExon, Body, 3'UTR) with TRUE/FALSE
#'
#'
#' @export
addPhantomSummary <- function(data)
{
if(! "Phantom" %in% colnames(data))
stop("'Phantom' not found in data")
# Adding phantom summary
data$Phantom_S <- ifelse(grepl("low", data$Phantom), "low",
ifelse(grepl("high", data$Phantom),"high", ""))
retur(data)
}
|
9ed9288e23bd71008e94d7c36ad94972ce35a549
|
0eafe565b2b81d5a75393ecf8e1332f9a17118fd
|
/donga_news_crawling_frequency_analysis.R
|
61cc7ba4e4426da1a0b022ebae7330c2716b1cc8
|
[] |
no_license
|
LeeSuA/TextMining
|
b548564bdd7313190f4de279749a31b380aa7538
|
f53591cf286c3a5aae47732dece4b96ff0c0d3cd
|
refs/heads/master
| 2021-01-04T11:04:51.915383
| 2020-02-14T14:00:40
| 2020-02-14T14:00:40
| 240,519,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,035
|
r
|
donga_news_crawling_frequency_analysis.R
|
library(rvest)
library(dplyr)
urlPart1 <- "http://www.donga.com/news/search?p="
urlPart3 <- "&query=코로나&check_news=1&more=1&sorting=1&search_date=1&v1=&v2=&range=1"
urls <- NULL
for(x in 0:5){
urls[x+1] <- paste0(urlPart1, as.character(x*15+1), urlPart3)
}
urls
links <- NULL
for(url in urls){
html <- read_html(url)
links <- c(links,html %>%
html_nodes('.searchCont') %>%
html_nodes('a') %>%
html_attr('href') %>%
unique())
}
links
links <- links[-grep("pdf", links)]
txts <- NULL
for(link in links){
html <- read_html(link)
txts <- c(txts, html %>%
html_nodes('.article_txt') %>%
html_text())
}
txts[3]
news <- txts
news <- gsub("[A-z]", "", news)
news <- gsub("[0-9]", "", news)
news <- gsub("[/;:|)*~'!^-_+<>@#$%&({}]", "", news)
news <- gsub("\r", "", news)
news <- gsub(" \n", "", news)
news <- gsub("..\n", "", news)
news <- gsub(", , ", "", news)
news <- gsub(" ", " ", news)
news
write.table(txts, "news.txt")
|
2e182ec27caaa6eb8652bb7451bd736f37d565af
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dobson/examples/aids.Rd.R
|
d058d0f658476c1f8f69967905f202ef52df79d4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 156
|
r
|
aids.Rd.R
|
library(dobson)
### Name: aids
### Title: AIDS data from table 4.5
### Aliases: aids
### Keywords: datasets
### ** Examples
data(aids)
summary(aids)
|
a6770e84a902521cff097e394e7fdb38260e9f2e
|
495aed58f478cfac14b775386d2da971355e7442
|
/run_analysis.R
|
6bd54b911d6628deaa74aab80b3d9375fc932c96
|
[] |
no_license
|
Sanguinius1/Getting-and-Cleaning-Data-Course-Project
|
e96a2e7c955b89596ab69f040e51eeacde788e21
|
b2fece9deb569a931988e0a4799fdf13236e1a96
|
refs/heads/master
| 2020-03-18T03:58:45.921201
| 2018-05-21T14:15:32
| 2018-05-21T14:15:32
| 134,265,502
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,963
|
r
|
run_analysis.R
|
##Load library
library(dplyr)
#1. Download and unzip the Dataset:
URL <- "http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipFile <- "UCI HAR Dataset.zip"
if (!file.exists(zipFile)) {
download.file(URL, zipFile, mode = "wb")
}
dataset <- "UCI HAR Dataset"
if(!file.exists(dataset)) {unzip(zipFile)}
#2. Read in the files:
#2.1 Test Set Files:
TestValues <- read.table("./UCI HAR Dataset/test/X_test.txt")
TestActivities <- read.table("./UCI HAR Dataset/test/y_test.txt")
TestSubjects <- read.table("./UCI HAR Dataset/test/subject_test.txt")
#2.2 Training Set Files:
TrainingValues <- read.table("./UCI HAR Dataset/train/X_train.txt")
TrainingActivities <- read.table("./UCI HAR Dataset/train/y_train.txt")
TrainingSubjects <- read.table("./UCI HAR Dataset/train/subject_train.txt")
#2.3 Read in Features:
features <- read.table("./UCI HAR Dataset/features.txt", as.is = TRUE)
#2.4 Read in Activites:
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
colnames(activities) <- c("ActivityID", "ActivityName")
activities[,2] <- as.character(activities[,2])
#3. Merge the data into one dataset:
Activity <- rbind(cbind(TrainingSubjects, TrainingActivities, TrainingValues), cbind(TestSubjects, TestActivities, TestValues))
#3.1 Assign column names to "Activity":
colnames(Activity) <- c("Subjects", "Activities", features[,2])
#4. Extract mean and standard deviation measurements:
ExtractColumns <- grep("Subject|Activities|mean\\(\\)|std\\(\\)", colnames(Activity), value = TRUE )
#4.1 Save these columns to "Activity":
Activity <- Activity[, ExtractColumns]
#5. Substitute variable names with descriptive names:
#5.1 Remove special characters
colnames(Activity) <- gsub("[\\(\\)-]", "", colnames(Activity))
colnames(Activity) <- gsub("^t", "timeDomain", colnames(Activity))
colnames(Activity) <- gsub("^f", "frequencyDomain", colnames(Activity))
colnames(Activity) <- gsub("Acc", "Accelerometer", colnames(Activity))
colnames(Activity) <- gsub("Gyro", "Gyroscope", colnames(Activity))
colnames(Activity) <- gsub("Mag", "Magnitude", colnames(Activity))
colnames(Activity) <- gsub("mean", "Mean", colnames(Activity))
colnames(Activity) <- gsub("std", "StandardDeviation", colnames(Activity))
#5.2 Fix Typo:
colnames(Activity) <- gsub("BodyBody", "Body", colnames(Activity))
#6. Create second data set with the average of each variable for each activity and each subject:
Tidydata <- Activity %>%
group_by(Subjects, Activities) %>%
summarise_all(.funs = mean)
#6.1 Use descriptive activity names to name the activities in the data
# set:
Tidydata$Activities <- factor(Tidydata$Activities, levels = activities[,1], labels = activities[,2])
# output "Tidydata.txt"
write.table(Tidydata, "Tidydata.txt", row.names = FALSE,
quote = FALSE)
|
f6f6c703f71c640900b84150d267e4c3b5b941ce
|
613f8e9b13208eaad1c973f66cd8f207ea40f719
|
/zillow/zillow2.R
|
0814282b11349dcf4c2b61d53b0fc766da3bb7a3
|
[] |
no_license
|
nav711/kaggle
|
9b0c087d138d8fb439b1c479b3a0401dead68696
|
bce225239a86029fee5ec52cd69ff447cfe271ae
|
refs/heads/master
| 2021-09-02T14:43:47.857468
| 2018-01-03T08:25:11
| 2018-01-03T08:25:11
| null | 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 15,060
|
r
|
zillow2.R
|
#########################################
# 주제: 집값예측
# 최종수정일: 2017.09.20
# 작성자: 윤일근
#########################################
#패키지 설치 및 로딩
library(reshape2)
library(data.table)
library(ggplot2)
library(dplyr)
library(caret)
library(e1071)
library(class)
library(h2o)
localH2O=h2o.init()
#2016/01~2016/12월 집값데이터로 2016/10~12월과 2017/10~12월 집값을 예측 (logerror predict)
setwd("C:/Users/il-geun/Desktop/zillow")
train1 <- fread("train_2016_v2.csv",header=T) #거래가 있던 정보들 => 한번에 여러개인건 어떻게 처리? 방조건은 안달라짐
train2 <- fread("train_2017.csv",header=T)
train <- rbind(train1, train2)
properties1 <- fread("properties_2016.csv", header = T, stringsAsFactors = T)
properties2 <- fread("properties_2017.csv", header = T, stringsAsFactors = T)
properties <- rbind(properties1, properties2)
summary(properties)
#2016년에는 결측이지만 2017년에는 있는거가 많음 -> 보정으로 생각 / 2016년 이후 개조로 바뀌었을 가능성고도 려
sub <- fread("sample_submission.csv",header=T)
pro_na2 <- properties1[properties1$parcelid %in% properties2[is.na(properties2$latitude),]$parcelid ,]
properties2 <- rbind(properties2[!is.na(properties2$latitude),], pro_na2)
pro_na1 <- properties2[properties2$parcelid %in% properties1[is.na(properties1$latitude),]$parcelid ,]
properties1 <- rbind(properties1[!is.na(properties1$latitude),], pro_na1)
properties1$censustractandblock[properties1$censustractandblock == -1] <- NA #3건뿐 => 결측처리
properties2$censustractandblock[properties2$censustractandblock == -1] <- NA
#summary(properties1$assessmentyear)
#p1 <- properties %>%
# group_by(parcelid) %>%
# summarise(diff(structuretaxvaluedollarcnt))
#summary(p1)
#diff(c(NA,99))
#properties <- properties[duplicated(properties) == F ,] #5966749
train$year <- as.factor(substr(train$transactiondate,1,4))
train$month <- as.factor(as.numeric(substr(train$transactiondate,6,7)))
#train에 있는 결과값은 모두 sub에 넣기
colnames(train)[1] <- "ParcelId"
sub <- merge(sub, subset(train[train$year=="2016" & train$month == "10",], select = c("ParcelId", "logerror") ), by = "ParcelId", all.x = T)
colnames(sub)[ncol(sub)] <- "log201610"
sub <- merge(sub, subset(train[train$year=="2016" & train$month == "11",], select = c("ParcelId", "logerror") ), by = "ParcelId", all.x = T)
colnames(sub)[ncol(sub)] <- "log201611"
sub <- merge(sub, subset(train[train$year=="2016" & train$month == "12",], select = c("ParcelId", "logerror") ), by = "ParcelId", all.x = T)
colnames(sub)[ncol(sub)] <- "log201612"
m_y <- train %>%
group_by(year, month) %>%
summarise(mm = mean(logerror))
plot(m_y$mm) #경향성이 보임
properties <- properties1
#data 컬럼별 타입 변환
properties$rawcensustractandblock <- as.factor(properties$rawcensustractandblock)
properties$censustractandblock <- as.factor(properties$censustractandblock)
properties$regionidzip <- as.factor(properties$regionidzip)
properties$regionidcounty <- as.factor(properties$regionidcounty)
properties$regionidcity <- as.factor(properties$regionidcity) #1803개 결측
properties$fips <- as.factor(properties$fips)# Federal Information Processing Standard code
properties$propertylandusetypeid <- as.factor(properties$propertylandusetypeid) #결측 x
#properties$propertycountylandusecode <- as.factor(properties$propertycountylandusecode) #코드가 너무 다양 (1)
#properties$propertyzoningdesc <- as.factor(properties$propertyzoningdesc) # 코드가 너무 다양(31962)
#properties$hashottuborspa <- as.factor(properties$hashottuborspa) #스파여부
#properties$fireplaceflag <- as.factor(properties$fireplaceflag)
#properties$taxdelinquencyflag <- as.factor(properties$taxdelinquencyflag)
#위경도 로그변환
properties$latitude <- log(properties$latitude)
properties$longitude <- log(abs(properties$longitude))
#필요없는 컬럼 제거
#properties <- subset(properties,select=c(-assessmentyear))
summary(properties)
properties$storytypeid[is.na(properties$storytypeid)] <- "na"
properties$storytypeid <- as.factor(properties$storytypeid)
properties$pooltypeid2[is.na(properties$pooltypeid2)] <- "na"
properties$pooltypeid2 <- as.factor(properties$pooltypeid2)
properties$pooltypeid7[is.na(properties$pooltypeid7)] <- "na"
properties$pooltypeid7 <- as.factor(properties$pooltypeid7)
properties$pooltypeid10[is.na(properties$pooltypeid10)] <- "na"
properties$pooltypeid10 <- as.factor(properties$pooltypeid10)
properties$decktypeid[is.na(properties$decktypeid)] <- "na"
properties$decktypeid <- as.factor(properties$decktypeid)
properties$buildingclasstypeid[is.na(properties$buildingclasstypeid)] <- "na"
properties$buildingclasstypeid <- as.factor(properties$buildingclasstypeid)
#
properties$airconditioningtypeid[is.na(properties$airconditioningtypeid)] <- "na"
properties$airconditioningtypeid <- as.factor(properties$airconditioningtypeid)
properties$architecturalstyletypeid[is.na(properties$architecturalstyletypeid)] <- "na"
properties$architecturalstyletypeid <- as.factor(properties$architecturalstyletypeid)
properties$buildingqualitytypeid[is.na(properties$buildingqualitytypeid)] <- "na"
properties$buildingqualitytypeid <- as.factor(properties$buildingqualitytypeid)
properties$heatingorsystemtypeid[is.na(properties$heatingorsystemtypeid)] <- "na"
properties$heatingorsystemtypeid <- as.factor(properties$heatingorsystemtypeid)
properties$typeconstructiontypeid[is.na(properties$typeconstructiontypeid)] <- "na"
properties$typeconstructiontypeid <- as.factor(properties$typeconstructiontypeid)
properties$regionidneighborhood[is.na(properties$regionidneighborhood)] <- "na"
properties$regionidneighborhood <- as.factor(properties$regionidneighborhood)
properties$typeconstructiontypeid[is.na(properties$typeconstructiontypeid)] <- "na"
properties$typeconstructiontypeid <- as.factor(properties$typeconstructiontypeid)
#이런건 2를 1로 대체 (지리적 특성은 일치할 것임)
#properties1[is.na(properties1$fips),]
#properties2[is.na(properties2$fips),]
#properties1[is.na(properties1$latitude),]
#properties2[is.na(properties2$latitude),]
summary(properties)
properties$censustractandblock <- as.character(properties$censustractandblock)
properties$censustractandblock[is.na(properties$censustractandblock)] <- "na"
properties$censustractandblock <- as.factor(properties$censustractandblock)
censustractandblock
#수치형 변수
#이상값 결측값 처리 필요 => 데이터에 대한 이해가 동반이 되야함. 논리적인 처리필요
#regionidcounty(부동산위치가 방이 0 개인데는 3101에 다 있고, 3101에는 없음)
#typeconstructiontypeid, fireplaceflag 는 방 1개 이상인 곳만 있음
summary(as.factor(properties[properties$roomcnt == 0, ]$typeconstructiontypeid ))
properties$typeconstructiontypeid[properties$roomcnt == 0 & properties$typeconstructiontypeid =="11" ] <- NA
summary(as.factor(properties[properties$roomcnt == 0, ]$fireplaceflag )) #위에 이상값에 포함됨 => 수정요함
properties$fireplaceflag[properties$roomcnt == 0 & properties$fireplaceflag != "" ] <- ""
#결측값에 대한 고민을 해야함
plot(basementsqft~logerror,set) #변동성 적음 (90232) => 0처리(없다고 가정)
properties$basementsqft[is.na(properties$basementsqft)] <- 0
#calcul이랑 모두 하위 개념 (6,12,13,15) - cor 1 => 여부로 바꾸는게 좋을듯
plot(calculatedfinishedsquarefeet~logerror,set) #피라미드꼴(661)
plot(finishedsquarefeet6~logerror,set) #Base unfinished and finished area (89854) #대부분 결측, 거의 1자
plot(finishedsquarefeet12~logerror,set) #Finished living area 피라미드꼴(4679)
plot(finishedsquarefeet13~logerror,set) #Perimeter living area (90242) #대부분 결측, 1자임
plot(finishedsquarefeet15~logerror,set) #Total area (86711) #대부분 결측, 피라미드 구조
cor(properties[,c(12,17)],use ="complete.obs")
cor(properties[,c(12,13)],use ="complete.obs")
cor(properties[,c(12,14)],use ="complete.obs")
cor(properties[,c(12,15)],use ="complete.obs")
properties$finishedsquarefeet6[!is.na(properties$finishedsquarefeet6)] <- 1
properties$finishedsquarefeet6[is.na(properties$finishedsquarefeet6)] <- 0
properties$finishedsquarefeet6 <- as.factor(properties$finishedsquarefeet6)
properties$finishedsquarefeet12[!is.na(properties$finishedsquarefeet12)] <- 1
properties$finishedsquarefeet12[is.na(properties$finishedsquarefeet12)] <- 0
properties$finishedsquarefeet12 <- as.factor(properties$finishedsquarefeet12)
properties$finishedsquarefeet13[!is.na(properties$finishedsquarefeet13)] <- 1
properties$finishedsquarefeet13[is.na(properties$finishedsquarefeet13)] <- 0
properties$finishedsquarefeet13 <- as.factor(properties$finishedsquarefeet13)
properties$finishedsquarefeet15[!is.na(properties$finishedsquarefeet15)] <- 1
properties$finishedsquarefeet15[is.na(properties$finishedsquarefeet15)] <- 0
properties$finishedsquarefeet15 <- as.factor(properties$finishedsquarefeet15)
plot(finishedfloor1squarefeet~logerror,set) # 거의 1자임 (물방울?)- (83419)
plot(finishedsquarefeet50~logerror,set) # Size of the finished living area on the first (entry) floor of the home (83419) #대부분 결측 1자 물방울
cor(properties[,c(11,16)],use ="complete.obs")
properties$finishedfloor1squarefeet[is.na(properties$finishedfloor1squarefeet)] <- 0
properties <- subset(properties, select= -c(finishedsquarefeet50))
#garagecar cnt => 없으면 평방미터도 0임 => 결측값은 -999로 통일
boxplot(logerror ~ addNA(garagecarcnt), set)
plot(garagecarcnt~logerror,set) # 대부분결측(60338) #피라미드 구조
plot(garagetotalsqft~logerror,set) # 대부분결측(60338) #피라미드 구조
properties$garagecarcnt[is.na(properties$garagecarcnt)] <- -999
properties$garagetotalsqft[is.na(properties$garagetotalsqft)] <- -999
#summary(properties$fireplaceflag) ,nrow(properties[!is.na(properties$fireplacecnt),]) #왜 벽난로여부랑, 갯수랑 안맞을까?
plot(fireplacecnt~logerror,set) # 대부분결측(80668) #피라미드 구조 #벽난로 갯수 결측값은 0으로
properties$fireplacecnt[is.na(properties$fireplacecnt)] <- 0
#수영장 관련변수(대부분 무의미하긴 함)
summary(properties[,28:32])
summary(properties[is.na(properties$poolcnt)]$poolsizesum)
plot(poolsizesum~logerror,set) #대부분 결측 (89306) #거의 1자
properties$poolsizesum[is.na(properties$poolcnt)] <- 0
properties$poolcnt[is.na(properties$poolcnt)] <- 0
#마당, 저장고 야적장 => 결측은 0으로 추정
summary(properties[,46:47])
cor(properties[,c(46:47)],use ="complete.obs")
which(colnames(properties) == "yardbuildingsqft26")
plot(yardbuildingsqft17~logerror,set) #거의1자 물방울...(87629) 마당안뜰
plot(yardbuildingsqft26~logerror,set) #거의1자...(90180) 저장고/야적장
properties$yardbuildingsqft17[is.na(properties$yardbuildingsqft17)] <- 0
properties$yardbuildingsqft26[is.na(properties$yardbuildingsqft26)] <- 0
#plot(regionidneighborhood~logerror,set) # (54263) #대부분 비슷함 -> 부동신 위치동네임 (필요없을것으로 사료)
#summary(as.factor(properties$regionidneighborhood))
plot(unitcnt~logerror,set) #피라미드...(31922)
plot(numberofstories~logerror,set) #69705, 1,2,3,4로 있음..
plot(lotsizesquarefeet~logerror,set) #피라미드 구조(10150) 미스테리
properties$unitcnt[is.na(properties$unitcnt)] <- 0
properties$numberofstories[is.na(properties$numberofstories)] <- 0
properties$lotsizesquarefeet[is.na(properties$lotsizesquarefeet)] <- 0
summary(set[is.na(set$calculatedbathnbr),]$bathroomcnt)
plot(threequarterbathnbr~logerror,set) #(78266),피라미드... 데이터가 나머지는 적음 (샤워+세면대+샤워실) => 1부터 있으니 없는건 0 으로 추정
properties$threequarterbathnbr[is.na(properties$threequarterbathnbr)] <- 0
plot(fullbathcnt~logerror,set) # 피라미드 구조 (1182) => 1165개는 화장실이 없어서 결측치임 (17개로 줄어듬)
plot(calculatedbathnbr~logerror, tr) #피라미드꼴 (1182) => 1165개는 화장실이 없어서 결측치임 (17개로 줄어듬)
properties$calculatedbathnbr[properties$bathroomcnt==0] <- 0
properties$fullbathcnt[properties$bathroomcnt==0] <- 0
plot(yearbuilt~logerror,set) #756, 옛날일 수록 오차가 적고, 나머지는 그냥 그럼
boxplot(logerror ~ addNA(yearbuilt), set) #중앙값 처리가 좋을듯
properties$yearbuilt[is.na(properties$yearbuilt)] <- median(properties$yearbuilt, na.rm=T)
#미납이 결측이라고 생각
plot(taxdelinquencyyear~logerror,set)#미납 된 재산세 납부시기: 역피라미드꼴(88492) (6~15, 99, 결측)
max(properties[properties$taxdelinquencyyear<50,]$taxdelinquencyyear)
properties$taxdelinquencyyear[properties$taxdelinquencyyear>50] <- 0
properties$taxdelinquencyyear[is.na(properties$taxdelinquencyyear)] <- 0
summary(properties)
#99% no match data process
summary(properties[is.na(properties$roomcnt),]) #propertyzoningdesc가 있는 것들, regionidcounty가 3101인 것들은 roomcnt = 0으로 처리
properties <- as.data.frame(properties)
for(i in c(2:which(colnames(properties)=="roomcnt")-1, (which(colnames(properties)=="roomcnt")+1):ncol(properties))) {
if(is.numeric(properties[,i])) {
properties[,i][is.na(properties$roomcnt)] <- median(unlist(properties[,i]), na.rm = T)
}
}
for(i in c(2:which(colnames(properties)=="roomcnt")-1, (which(colnames(properties)=="roomcnt")+1):ncol(properties))) {
if(is.factor(properties[,i])) {
properties[,i] <- as.character(properties[,i])
properties[,i][is.na(properties$roomcnt)] <- "na"
properties[,i] <- as.factor(properties[,i])
}
}
properties$roomcnt[is.na(properties$roomcnt)] <- median(properties$roomcnt, na.rm = T)
#결측치가 적은 데이터는 예측값으로 대체함 (상관관계가 높은 값들이 많음)
cor(as.matrix(subset(set, select = c( taxamount, landtaxvaluedollarcnt, taxvaluedollarcnt, structuretaxvaluedollarcnt))), use = "complete.obs")
plot(calculatedfinishedsquarefeet~logerror,set) #피라미드꼴(661)
plot(structuretaxvaluedollarcnt~logerror,set)#피라미드꼴(380)
plot(taxvaluedollarcnt~logerror,set)#피라미드꼴(1)
plot(landtaxvaluedollarcnt~logerror,set)#피라미드꼴(1)
plot(taxamount~logerror,set) #피라미드꼴.. tax가 작을 수록 변동성이 큼 (6)
library(DMwR)
imp <- knnImputation(subset(properties, select = c(calculatedbathnbr, calculatedfinishedsquarefeet,fullbathcnt,poolsizesum,
taxamount, landtaxvaluedollarcnt, taxvaluedollarcnt, structuretaxvaluedollarcnt)))
real_set <- merge(train, properties, by="parcelid", all.x = T)
|
88c4117e4ea4243191a74e28df01327859495838
|
2469e9f76fb9454942ee729b4ebb86ab1bb72920
|
/Shiny Web App.R
|
527b8afdf5f273bd2efdb0cbd21d6cf58b8df2e4
|
[] |
no_license
|
armanaghamyan/business_intelligence_project
|
5be96a5927f6f9f4c7999b4e278f0a0ef9857d3f
|
dd92c6e2c6e803a103e18abc81cd20cbaf36cc96
|
refs/heads/master
| 2020-12-21T05:19:08.652829
| 2020-01-26T14:04:30
| 2020-01-26T14:04:30
| 236,320,052
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,283
|
r
|
Shiny Web App.R
|
library("shinydashboard")
library("shiny")
library("leaflet")
source("maps.R")
source("Plots.R")
ui <- dashboardPage(skin = "black",
dashboardHeader(title = "South Caucasus"),
dashboardSidebar(
sidebarMenu(
menuItem("Population",
radioButtons("btn",
label=NULL,
choices=list("Population"= "pop5" ,
"Migration"= "mig" ,
"Urban Population"="up"))),
menuItem("GDP",
radioButtons("btn2",
label=NULL,
choices=list("Nominal"= "nominal" ,
"Per Capita"="pc",
"GDP Change"="gdp"))),
menuItem("Tourism",
radioButtons("btn1",
label=NULL,
choices=list("Visitors"= "visitors" ,
"Revenue"="revenue"))),
menuItem("Sports",
radioButtons("btn3",
label = NULL,
choices=list("Chess"= "chess" ,
"Medals"="medals")
))
)),
dashboardBody(color = "blue",
fluidRow(tabBox(width = 12,
tabPanel("Overview",
fluidRow(column(4,box(width =12,height = "455px",
status = "primary",
leafletOutput("map1"))),
column(8,box(width =12,height = "455px",
status = "primary",
plotlyOutput("dens1")
))),
fluidRow(column(4,box(width =12,height = "455px",
status = "primary",
leafletOutput("map2"))),
column(8,box(width =12,height = "455px",
status = "primary",
plotlyOutput("dens2")
))),
fluidRow(column(4,box(width =12,height = "455px",
status = "primary",
leafletOutput("map3"))),
column(8,box(width =12,height = "455px",
status = "primary",
plotlyOutput("dens3")
)))
),
tabPanel("Comparison",
tabBox(width = 12,
tabPanel("Population",
fluidRow(column(1),
column(10,box(width =12,height = "455px",
status = "primary",
plotlyOutput("output1")
)),
column(1))),
tabPanel("GDP",
fluidRow(column(1),
column(10,box(width =12,height = "455px",
status = "primary",
plotOutput("output2")
)),
column(1))),
tabPanel("Military Budget",
fluidRow(column(1),
column(10,box(width =12,height = "455px",
status = "primary",
plotlyOutput("output3")
)),
column(1))),
tabPanel("Crime",
fluidRow(column(1),
column(10,box(width =12,height = "455px",
status = "primary",
plotlyOutput("output4")
)),
column(1))),
tabPanel("Tourism",
fluidRow(column(1),
column(10,box(width =12,height = "455px",
status = "primary",
plotlyOutput("output5")
)),
column(1))),
tabPanel("Sports",
fluidRow(column(1),
column(10,box(width =12,height = "455px",
status = "primary",
plotlyOutput("output6")
)),
column(1))))
)
)
),
tags$head(
tags$style(HTML("
.content-wrapper {
background-color: #2F4858 !important;
}
"))
)
)
)
server <- function(input,output){
output$output1 <- renderPlotly({
if (input$btn == "mig"){
migration_plot
} else if (input$btn =="up"){
urban_plot
}else if (input$btn =="pop5"){
population_plot
}
})
output$output2 <- renderPlot({
if (input$btn2 == "nominal"){
nominal_gdp
} else if (input$btn2 =="gdp"){
change_plot
} else if(input$btn2 =="pc"){
pc_plot
}
})
output$output3 <- renderPlotly(military_plot)
output$output4 <- renderPlotly(crime_plot)
output$output5 <- renderPlotly({
if (input$btn1 == "visitors"){
tourism_plot
} else if (input$btn1 =="revenue"){
revenue_plot
}
})
output$output6 <- renderPlotly({
if (input$btn3 == "chess"){
chess_plot
} else if(input$btn3 == "medals"){
medal_plot
}
})
output$map1 <- renderLeaflet(armenia_map)
output$map2 <- renderLeaflet(georgia_map)
output$map3 <- renderLeaflet(azer_map)
output$dens1 <- renderLeaflet(am_density)
output$dens2 <- renderLeaflet(ge_density)
output$dens3 <- renderLeaflet(az_density)
}
shinyApp(ui = ui, server = server)
|
07ca561f48b69cf5a7886e854aeba245081619e0
|
8df4db71973c9688b731d3ac146f841bf463b4d7
|
/plot1.R
|
cfbe0356026c500df9df9d8b1a609aae095e7844
|
[] |
no_license
|
DanielFletcher1/ExData_Plotting1
|
33baa90eeed16b8206193ed11442d36a929b7fd0
|
54264e65cfe2f9a379507b5fe5e88803f8f0e97e
|
refs/heads/master
| 2021-01-24T02:52:51.862958
| 2014-07-13T07:33:28
| 2014-07-13T07:33:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,305
|
r
|
plot1.R
|
## Download and unzip the file:
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "explor1.zip")
unzip("explor1.zip")
## Identify the dataset's column classes and header names for faster/easier import:
tab5rows <- read.table("household_power_consumption.txt", header = TRUE, nrows = 5, sep = ";")
classes <- sapply(tab5rows, class)
colnames <- names(tab5rows)
## Use pipe() with findstr to subset the data to only dates 1/2/2007 and 2/2/2007
## NOTE: findstr within pipe() works only for Windows - alternative is to read in full
## dataset and then subset using subset <- data[grep("^[1-2]/2/2007", data[,1]),]
data <- read.table(pipe("findstr /B /R ^[1-2]/2/2007 household_power_consumption.txt"),
header = F, sep=";", na.strings = "?", colClasses = classes,
col.names = colnames)
## Convert "Date" and "Time" variables from class == factor to class == date & time
data[,1] <- as.Date(data[,1], "%d/%m/%Y")
data[,2] <- list(strptime(paste(data[,1], data[,2]), "%Y-%m-%d %H:%M:%S"))
## Create histogram from the Global_active_power variable
png(file = "plot1.png")
with(data, hist(data$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red"))
dev.off()
|
ac8693b98b22c398edd7c6803522b1f339812425
|
f2afa5bca4d388d0bbb712d7a05db90f6bc987ba
|
/man/CalcOnGroups2.Rd
|
de0b9b408fe74e2a14f216e4d5c896559cdddc09
|
[] |
no_license
|
khharut/HistAvg2
|
273b404233f8ea4151785d086309f11eb5ed4686
|
3767d5f4b8fdd24906ca6731ccbfe5884dae483c
|
refs/heads/master
| 2021-10-10T16:32:37.059879
| 2019-01-13T22:52:38
| 2019-01-13T22:52:38
| 88,608,726
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 774
|
rd
|
CalcOnGroups2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Divider.R
\name{CalcOnGroups2}
\alias{CalcOnGroups2}
\title{Does calculation on groups}
\usage{
CalcOnGroups2(div_groups, func_calc, func_def = function(w) median(w, na.rm =
TRUE))
}
\arguments{
\item{div_groups}{list of vectors on which statistical calculation should be done}
\item{func_calc}{name of statistical or other function that can be applied on vector of values}
\item{func_def}{a function name that should be used in case when func_calc gives NA values, default value is median}
}
\value{
results of calculation for each group
}
\description{
Calculates some statistics defined by functions on list of vectors and returns it as a list of vectors
}
\author{
Harutyun Khachatryan
}
|
dd070da7880bcf525de691d5af5ef7786b1e859d
|
c1f2a0b1726295d8a698be6b386e7e5d6cb485da
|
/tentativafracassada.R
|
dd87cb7a043559119efa3a3e6001a01d5e7ba38e
|
[] |
no_license
|
cissaco/gee-negative-binomial
|
bdaf85ad2e491ae781e7dbf4ef2e1f6784d6ec86
|
e1de98aeddac9b08a52a9a3779461f272e439166
|
refs/heads/master
| 2021-01-10T13:41:44.243713
| 2016-01-21T19:55:41
| 2016-01-21T19:55:41
| 50,131,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 767
|
r
|
tentativafracassada.R
|
rm(list=ls())
library(COUNT)
library(geepack)
library(gee)
data(rwm5yr)
attach(rwm5yr)
tent1 = gee2(docvis ~ female + factor(edlevel), data=rwm5yr,family = negative.binomial(nu),id=id,corstr = "independence")
nbg <- glm.nb(docvis ~ female + factor(edlevel), data=rwm5yr)
summary(nbg) #Theta é o 1/alpha do livro ou seja o nu
nu = summary(nbg)$theta
alpha = 1/nu
c = coef(nbg)
#Mu)ij chapéu é calculado pelo fitted.values
mu = exp(c[1]+c[2]*female+c[3]*edlevel2+c[4]*edlevel3+c[5]*edlevel4)
head(fitted.values(nbg))
head(mu)
names(nbg)
names(summary(nbg))
#Residuos de pearson
r = residuals(nbg,type="pearson")
head(residuals(nbg,type="pearson"))
#para o calculo do phi
N = length(docvis)
p = length(c)
phi = 1/(sum(r^2)/(N-p))
phi
#para calculo do alpha
|
a98cc5d1cd4a317f81fc3d78a461a78c2528528a
|
404c1773175c9504cf4114b24e8343933a3e7d9b
|
/Rcode/R/sportradar/arkiv/regression_16.R
|
11fca241a91bf9081b9584f99d483d591a2d58e4
|
[] |
no_license
|
willisgram/master_thesis
|
4e0cb52c6d19cb97948bb4839d38efd3fe343d12
|
f6243ad401622e35370aecdd112e802c88ab756d
|
refs/heads/master
| 2021-05-02T06:12:15.785795
| 2018-05-03T16:08:00
| 2018-05-03T16:08:00
| 120,882,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,175
|
r
|
regression_16.R
|
#################
# Create regressors DF
#################
regressors_16 <- cbind(points_round_k_16,opponent_round_k_16,team_round_k_16,cost_round_k_16,
pos_round_k_16,trans_in_round_k_16,trans_out_round_k_16)
regressors_16$index <- as.factor(regressors_16$index)
#Needs generalization
regressors_train_16 <- regressors_16[1:6250,] %>% na.omit()
regressors_test_data_16 <- regressors_16[6251:6875,] %>% na.omit()
regressors_test_data_16 <- regressors_test_data_16 %>% filter(index %in% regressors_train_16$index)
regressors_test_16 <- regressors_test_data_16[,names(regressors_test_data_16) != "realized"]
#################
# Fit regression model
#################
##Fit model
options(stringsAsFactors = T)
model_3 <- lm(realized ~ index + prev_2 + prev_1 + opponent + team + cost + pos + trans_in_prev_2 +
trans_in_prev_2 + trans_out_prev_1 + trans_out_prev_2,
data = regressors_train_16)
summary(model_3)
##Predict (to find best out-of-sample fit)
predictions <- predict(object = model_3,newdata = regressors_test_16)
compare <- data.frame(pred = predictions, real = regressors_test_data_16$realized )
|
3f76df1970d56b6e459b078d31e340c75c59fd55
|
038795d15a01edf43666d2cce836612660a50ad0
|
/man/ol_user_get_apps.Rd
|
9d24c5aa36d2a5a30316a78f070a2361b4b3698e
|
[
"MIT"
] |
permissive
|
akgold/onelogin
|
1d919d9863ed9877d1a2b2ef583136e1159dcaae
|
3d5a5f26f8893e2a3faa6ab24d73d9f4b23d7e79
|
refs/heads/master
| 2020-06-23T03:17:25.428226
| 2019-08-29T21:08:57
| 2019-08-29T21:08:57
| 198,491,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 424
|
rd
|
ol_user_get_apps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/users.R
\name{ol_user_get_apps}
\alias{ol_user_get_apps}
\title{Get Apps for User}
\usage{
ol_user_get_apps(con, user_id)
}
\arguments{
\item{con}{a 'OneLogin' connection}
\item{user_id}{user id}
}
\value{
A tibble of user data (one row)
}
\description{
Get Apps for User
}
\examples{
if (interactive()) ol_user_get_apps(onelogin(), 54400533)
}
|
79738f61a4f4f1ea361e112c47b717a10b1fb201
|
db218968de67312de6ccc2318d08384d5b4fa602
|
/AfSoilGrids250mValidation/AfSoilGrids_OFRA_comparison.R
|
e42ba3a4246d6293c56c8cbc6793cb3154660555
|
[] |
no_license
|
lsuiyanka/ISRIC-AfSIS
|
8952f14121d94997e897682b5316939899f974d3
|
463afed3b76ec1bcfa639631fcfa4bfca3db0e7c
|
refs/heads/master
| 2020-04-07T19:20:12.541470
| 2015-09-28T07:28:14
| 2015-09-28T07:28:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,276
|
r
|
AfSoilGrids_OFRA_comparison.R
|
## Comparison AfSoilGrids250m validation using OFRA samples:
## by Tom.Hengl@isric.org
library(rgdal)
library(plotKML)
library(GSIF)
library(RSAGA)
library(raster)
library(plyr)
library(sp)
library(hexbin)
library(gridExtra)
library(lattice)
library(grDevices)
rm(list=ls(all=TRUE))
load("OFRA.Rdata")
ofra <- read.csv("G:\\soilstorage\\SoilData\\OFRA\\OFRA_RespFuncRev_9-10-15.csv")
str(ofra)
val.PHI <- ofra[,c("Text_ID_Fixed","Latitude","Longitude","Soil.pH")]
val.PHI$Longitude <- as.numeric(paste(val.PHI$Longitude))
sel <- !is.na(val.PHI$Longitude) & val.PHI$Latitude>-90 & val.PHI$Latitude<35 & val.PHI$Longitude > -20 & val.PHI$Longitude < 120 & val.PHI$Soil.pH > 2 & !is.na(val.PHI$Soil.pH)
val.PHI <- val.PHI[sel,]
coordinates(val.PHI) <- ~ Longitude + Latitude
proj4string(val.PHI) <- CRS("+proj=longlat +datum=WGS84")
## plot in Google Earth:
PHI.brks <- c(soil.legends[["PHIHOX"]]$MIN[1], soil.legends[["PHIHOX"]]$MAX)
val.PHI$Value <- cut(val.PHI$Soil.pH*10, PHI.brks)
PHI.pal <- as.character(soil.legends[["PHIHOX"]]$COLOR)
kml(val.PHI, folder.name = "pH (0-20 cm)", subfolder.name = "Observed", shape=shape, colour=Value, colour_scale=PHI.pal, file.name=paste0("OFRA_PHIHOX_20cm.kml"), labels=Value,size=.7, kmz=TRUE)
## Overlay / compare pH:
val.PHI.xy <- spTransform(val.PHI, CRS("+proj=laea +lat_0=5 +lon_0=20 +x_0=0 +y_0=0 +units=m +ellps=WGS84 +datum=WGS84"))
ov <- extract.list(path="H:\\AFSIS\\zipped", y=c("af_PHIHOX_T__M_sd1_250m.tif", "af_PHIHOX_T__M_sd2_250m.tif"), x=val.PHI.xy, ID="Text_ID_Fixed")
ov$Soil.pH.soilgrids <- ((0.05*ov$af_PHIHOX_T__M_sd1_250m.tif + 0.1*ov$af_PHIHOX_T__M_sd1_250m.tif)/.15)/10
ov$Soil.pH <- val.PHI$Soil.pH
## RMSE:
sqrt(sum((ov$Soil.pH.soilgrids-ov$Soil.pH)^2, na.rm=TRUE)/nrow(ov))
## 0.70
## plot the differences:
pfun <- function(x,y, ...){
panel.hexbinplot(x,y, ...)
panel.abline(0,1,lty=1,lw=2,col="black")
}
plt.OFRA <- hexbinplot(ov$Soil.pH.soilgrids~ov$Soil.pH, colramp=colorRampPalette(R_pal[["bpy_colors"]]), main="Soil pH (2197 points)", xlab="measured (OFRA)", ylab="predicted (AfSoilGrids250m)", type="g", lwd=1, lcex=8, inner=.2, cex.labels=.8, xlim=c(3,11), ylim=c(3,11), asp=1, xbins=25, density=40, panel=pfun) ## range(ov$Soil.pH)
plot(plt.OFRA)
save.image("OFRA.Rdata")
|
699073b42ce35edc2c7f3b3ca1f6d4a67a750812
|
f673b6686a887ec0b15da60b236d9a194d09e318
|
/man/survival_simulation_driver.Rd
|
4cd5fb4d53826bb1d3a8626415093bbb71f4f9b8
|
[
"MIT"
] |
permissive
|
mrguyperson/predpackplus
|
353a241ec0ae4a53da5521d387a2718434a1b33e
|
bbdc1ff517355a555753ce13cba71b5ec783a105
|
refs/heads/master
| 2023-07-19T05:24:59.312787
| 2021-09-16T15:20:24
| 2021-09-16T15:20:24
| 406,389,150
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,268
|
rd
|
survival_simulation_driver.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prey_survival_simulation.R
\name{survival_simulation_driver}
\alias{survival_simulation_driver}
\title{Run a Full Simulation}
\source{
defaults based on Steel et al. 2020. "Applying the mean free-path length model to juvenile Chinook salmon migrating in the Sacramento River, California"
and Michel et al. 2018. "Non-native fish predator density and molecular-based diet estimates suggest differing effects of predator species on Juvenile Salmon in the San Joaquin River, California"
}
\usage{
survival_simulation_driver(
number_of_fish,
mean_length,
n_transects,
sd_length = (1.7/14) * mean_length,
transect_length = 1000,
lit_zone_size = 5,
channel_width = 100,
grid_size = 15,
reaction_dis = 0.5
)
}
\arguments{
\item{number_of_fish}{number (pos integer) of prey fish desired}
\item{mean_length}{mean length of fish in cm}
\item{n_transects}{integer of transects in the model}
\item{sd_length}{std dev of fish length in cm; default is 1.7 and scales with mean}
\item{transect_length}{length of each transect in meters; default is 1000}
\item{lit_zone_size}{the size of the littoral zone (i.e., nearshore area) in meters; default is 5}
\item{channel_width}{width of the channel in meters; default is 100}
\item{grid_size}{length of side of raster grid in meters; default is 15}
\item{reaction_dis}{maximum distance (in m) away from a predator that can trigger an encounter; default is 0.50}
}
\value{
the proportion of surviving fish
}
\description{
Runs a full simulation with a user-specified number of fish. Users can also adjust fish
mean length and sd, environment size, grid size, and predator reaction distance.
The model runs through the following:
calculates predators and their positions,
calculates grid cells and their encounter probabilities,
calculates a unique path for each fish,
simulates and resolves encounters for each fish in each cell,
determines survival after all fish have gone through each cell.
}
\details{
The return value is the proportion of survivors.
}
\note{
this function can be parallelized; e.g., by setting plan(multisession)
}
\examples{
survival_simulation_driver (number_of_fish = 20, mean_length = 10, n_transects = 5)
}
|
67fcc40a5e1133d0ef1da9609f90035daa73538d
|
6fb04083c9d4ee38349fc04f499a4bf83f6b32c9
|
/man/t.test.Rd
|
b0b1e7b6afd1c92b2934f524b5aece33701c39ce
|
[] |
no_license
|
phani-srikar/AdapteR
|
39c6995853198f01d17a85ac60f319de47637f89
|
81c481df487f3cbb3d5d8b3787441ba1f8a96580
|
refs/heads/master
| 2020-08-09T10:33:28.096123
| 2017-09-07T09:39:25
| 2017-09-07T09:39:25
| 214,069,176
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 905
|
rd
|
t.test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/t.test.R
\name{t.test}
\alias{t.test}
\title{Student's t-Test}
\usage{
t.test(x, y = NULL, alternative = c("two.sided"), mu = 0,
paired = FALSE, var.equal = FALSE, conf.level = 0.95, tails = 2, ...)
}
\arguments{
\item{x}{FLVector}
\item{y}{FLVector}
\item{mu}{The value of hypothesized mean}
\item{var.equal}{a logical variable indicating whether to treat the two variances as being equal.
If TRUE then the pooled variance is used to estimate the
variance otherwise the Welch (or Satterthwaite) approximation to the degrees of freedom is used.}
}
\value{
A list with class "htest" containing the statistic and p-values.
}
\description{
Performs one and two sample t-tests on vectors of data.
}
\examples{
flx<-as.FLVector(rnorm(100))
fly<-as.FLVector(rnorm(100))
t.test(flx)
t.test(flx,fly)
t.test(flx,fly,var.equal=F)
}
|
1c607ff3024fde9c09580e894975c9ae3b7707de
|
19c861d31f78661a83c38a133edd8c4f6eac0336
|
/man/quantileSE.Rd
|
8efe2f5dca49e4a7a6a4f2562c28289618c8f6e8
|
[] |
no_license
|
cran/broman
|
8db38ff459ffda1645c01cb145b15aa4ea8e3647
|
90ae16237e25ee75600b31d61757f09edf72ad91
|
refs/heads/master
| 2022-07-30T16:46:40.198509
| 2022-07-08T14:30:09
| 2022-07-08T14:30:09
| 17,694,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,430
|
rd
|
quantileSE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quantileSE.R
\name{quantileSE}
\alias{quantileSE}
\title{Sample quantiles and their standard errors}
\usage{
quantileSE(x, p = 0.95, bw = NULL, na.rm = TRUE, names = TRUE)
}
\arguments{
\item{x}{Numeric vector whose sample quantiles are wanted.}
\item{p}{Numeric vector with values in the interval [0,1]}
\item{bw}{Bandwidth to use in the density estimation.}
\item{na.rm}{Logical; if true, and \code{NA} and \code{NaN}'s are
removed from \code{x} before the quantiles are computed.}
\item{names}{Logical; if true, the column names of the result is set to
the values in \code{p}.}
}
\value{
A matrix of size 2 x \code{length(p)}. The first row contains the
estimated quantiles; the second row contains the corresponding
estimated standard errors.
}
\description{
Calculate sample quantiles and their estimated standard errors.
}
\details{
The sample quantiles are calculated with the function
\code{\link[stats:quantile]{stats::quantile()}}.
Standard errors are obtained by the asymptotic approximation described
in Cox and Hinkley (1974). Density values are estimated using a
kernel density estimate with the function \code{\link[stats:density]{stats::density()}}.
}
\examples{
quantileSE(rchisq(1000,4), c(0.9,0.95))
}
\seealso{
\code{\link[stats:quantile]{stats::quantile()}}, \code{\link[stats:density]{stats::density()}}
}
\keyword{univar}
|
0d162a624954447b873a2917c81271449566f150
|
e05e827a1720dc0e28a5a62e14ea3d7bc7b5f787
|
/files/RClient/Ejercicio_solucion.R
|
854136c4614b2f43af69b5f1687eecacd3177e72
|
[] |
no_license
|
maorjuela73/SQL-CD
|
7af7748aa52200b8c96179a347293c38b72780f4
|
17a64c953e0bab448372e12889a2a768ece3fa32
|
refs/heads/master
| 2020-09-05T11:03:50.828292
| 2020-04-28T22:31:53
| 2020-04-28T22:31:53
| 220,084,006
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,167
|
r
|
Ejercicio_solucion.R
|
library(DBI)
library(RPostgres)
library(ggplot2)
pw<- {
"postgres"
}
con <- dbConnect(RPostgres::Postgres()
, host='localhost'
, port='5432'
, dbname='dvdrental'
, user='postgres'
, password=pw)
rm(pw) # removes the password
dbListTables(con)
dbListFields(con, "payment")
dbReadTable(con, "payment")
# Recaudo por estado del cliente
res <- dbSendQuery(con, "select active, sum(amount) from payment pay join customer cus on pay.customer_id = cus.customer_id group by active;")
data <- dbFetch(res)
plot <- ggplot(data) + geom_col(aes(x = active, y = sum))
dbClearResult(res)
# Top 10 recaudo por país
res <- dbSendQuery(con, "select country, sum(amount) from payment pay join customer cus on pay.customer_id = cus.customer_id
join address adr on cus.address_id=adr.address_id join city cty on adr.city_id=cty.city_id join country ctr on cty.country_id = ctr.country_id
group by country
order by sum(amount) desc
limit 10;")
data <- dbFetch(res)
plot <- ggplot(data) + geom_col(aes(x = country, y = sum))
dbClearResult(res)
# Disconnect from the database
dbDisconnect(con)
|
04abcaca824e27c1a779842364a9f890cb9b8a48
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mlbench/examples/plot.mlbench.Rd.R
|
2673959ee3c92ff9409ce410d94986bfa881bb1d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 250
|
r
|
plot.mlbench.Rd.R
|
library(mlbench)
### Name: plot.mlbench
### Title: Plot mlbench objects
### Aliases: plot.mlbench
### Keywords: hplot
### ** Examples
# 6 normal classes
p <- mlbench.2dnormals(500,6)
plot(p)
# 4-dimensiona XOR
p <- mlbench.xor(500,4)
plot(p)
|
8e5db85f2e7cd7f6328b792adcf1d97ec487edfb
|
c02b1b6252a59c992a0f3ebb542f08fb0cf261a4
|
/R/get_tournaments_pro.R
|
39c38f3a3053de0ff5c87c870e8eaa1174711db8
|
[] |
no_license
|
systats/lolR
|
d57b04d592b40906b70f0da1acc9a332b965aa23
|
f2b38453460cac1c9fe24861603e75bebf549669
|
refs/heads/master
| 2020-03-18T07:13:38.225502
| 2018-06-02T17:13:56
| 2018-06-02T17:13:56
| 134,439,850
| 0
| 2
| null | 2018-05-31T01:11:19
| 2018-05-22T15:58:05
|
HTML
|
UTF-8
|
R
| false
| false
| 1,046
|
r
|
get_tournaments_pro.R
|
#' get_tourn_match_list
#'
#' Get all matches from a tournament
#'
#' @param x a tournament row
#' @return get_tournament_matches look for output
#' @export
get_tourn_match_list <- function(x){
#x <- tourn_table_long[52,]
core <- x$tourn_url %>%
xml2::read_html() %>%
get_matches
#print("1")
rep_tourn <- 1:nrow(core) %>%
map(~return(x)) %>%
bind_rows()
#print("2")
out <- core %>%
dplyr::bind_cols(rep_tourn)
return(out)
}
#' get_tourn_matches
#'
#' Get all matches from a tournament
#'
#' @param data long tournament dataset
#' @return get_tournament_matches look for output
#' @export
get_tourn_matches <- function(data){
get_tourn_match_list_safely <- purrr::safely(get_tourn_match_list)
get_tourn_match_list_pro <- lolR::progressively(get_tourn_match_list_safely, nrow(data))
#print(1)
tourn_list <- data %>%
split(1:nrow(.)) %>%
#as.list() %>%
map(get_tourn_match_list_pro)
return(tourn_list)
}
|
3bd6c4a792c9a40f5ec05def2e789ae38cc2d3f5
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i25-m12-u5-v0.pddl_planlen=147/dungeon_i25-m12-u5-v0.pddl_planlen=147.R
|
25ee36c530f39ae859e9e1e92e7d0d19b41d86b6
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
dungeon_i25-m12-u5-v0.pddl_planlen=147.R
|
82f042c7afcaa486cf2b1201c9cbea6f dungeon_i25-m12-u5-v0.pddl_planlen=147.qdimacs 110583 1002496
|
47ec3b956e2b01293ed2a895acbdde15b9c13931
|
32d76c621b28f4f3653e709631045f2741ea9aaa
|
/app_comp.R
|
593e725bade8c8024bb98066917dd3e92d362296
|
[] |
no_license
|
williamsbenjamin/nesting-topics
|
7fb42fc58120cd74b414992da4b0cb6fa6152615
|
550c0aa4f1c232c814b2ca9a733fd61787a67882
|
refs/heads/master
| 2021-09-08T08:50:54.315293
| 2018-03-08T22:48:22
| 2018-03-08T22:48:22
| 114,163,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 981
|
r
|
app_comp.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(sunburstR)
library(shinydashboard)
library(readr)
library(nlme)
ui <- dashboardPage(
dashboardHeader(),
dashboardSidebar(
sidebarMenu(
menuItem("Sunburst Plot", tabName = "sunbrstPlot")
)
),
dashboardBody( tabBox(id = "sunbrstPlot", width = "100%", height = "1000px",
sunburstOutput("sunburstPlot", height = "750", width = "100%")
)
)
)
server <- function(input, output) {
cols_124_test <- read.csv("colors_124.csv")
# Create Sunburst plot
output$sunburstPlot <- renderSunburst({
computernested <- read_csv("lev_sunburst_with_names.csv")
sunburst(computernested[,-1],colors=cols_124_test)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
ce58c09ce09c43e7accc43f94899cc0f49b688b3
|
203053106dccd4b971fdef22956df0281dfc6f0e
|
/sobel.R
|
977156228d142006727579f491e9258dea21a110
|
[] |
no_license
|
JoonsukPark/examples
|
31918dcbdcd98be4796eb96e84990852b802b915
|
91e1f3a1c1396762530269e1338728783c981235
|
refs/heads/master
| 2020-06-06T13:52:01.439264
| 2019-08-11T17:45:56
| 2019-08-11T17:45:56
| 192,757,363
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
r
|
sobel.R
|
X <- iris[,1]
M <- iris[,2]
Y <- iris[,3]
fit1 <- lm(M ~ X)
fit2 <- lm(Y ~ X + M)
a_hat <- coef(fit1)[2]
b_hat <- coef(fit2)[3]
a_hat
b_hat
mean <- a_hat*b_hat
mean
sigma2_a_hat <- vcov(fit1)[2,2]
sigma2_b_hat <- vcov(fit2)[3,3]
sigma2_a_hat
sigma2_b_hat
se_ab <- sqrt(a_hat^2*sigma2_b_hat + b_hat^2*sigma2_a_hat)
se_ab
c(mean-1.96*se_ab, mean+1.96*se_ab)
library(multilevel)
fit <- sobel(X, M, Y)
fit$Indirect.Effect
fit$SE
c(fit$Indirect.Effect-1.96*fit$SE, fit$Indirect.Effect+1.96*fit$SE)
|
0ac3b75068cf275287315cf3126f78e7e81cfdc3
|
a43de5791440989fe91b07052b492355bd6f2eab
|
/Rscripts/analysis_of_types_of_compounds.R
|
403c884b8974e1feabefd96e00f78678f98a4f4d
|
[] |
no_license
|
quimaguirre/CAMDA2019-DILI
|
6a44fa618eca61a061f551c46da7a4cb1c60028c
|
ec4525a5c28e09af082ddad10afef624cb623513
|
refs/heads/master
| 2023-03-30T16:10:12.643580
| 2021-03-22T11:53:44
| 2021-03-22T11:53:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,640
|
r
|
analysis_of_types_of_compounds.R
|
### Load packages ###
library(cmapR)
library(ggplot2)
library(caret)
### Define variables ###
place = "home" #home or work
remove.outliers = FALSE
outliers = c('daunorubicin', 'vorinostat')
number.cv = 10
number.repetitions = 10
fraction_train = 0.7
if (place=="work"){
main_directory = "/home/quim/PHD/Projects/camda"
bigmem_directory = "/sbi/users/interchange/emre/quim/camda"
} else {
main_directory = "/Users/quim/Dropbox/UPF/PhD/Projects/camda"
bigmem_directory = "/Users/quim/Documents/DATA/camda"
}
### Define files ###
# Data files
drugs_file <- paste(main_directory, "camda_data/CAMDA_l1000_1314compounds-pert_iname.rda", sep="/")
drug_info_file <- paste(main_directory, "camda_data/CAMDA_l1000_1314compounds-info.rda", sep="/")
dilirank_file <- paste(main_directory, "camda_data/CAMDA_l1000_1314compounds-dilirank.v2.rda", sep="/")
expression_file <- paste(bigmem_directory, "CAMDA_l1000_1314compounds-GSE92742_Level5_gct.rda", sep="/")
landmark_genes_file <- paste(main_directory, "additional_data/GSE92742_Broad_LINCS_gene_info_delta_landmark.txt", sep="/")
gene_info_file <- paste(main_directory, "additional_data/GSE92742_Broad_LINCS_gene_info.txt", sep="/")
cell_info_file <- paste(main_directory, "additional_data/GSE92742_Broad_LINCS_cell_info.txt", sep="/")
functions_file <- paste(main_directory, "Rscripts/camda_functions.R", sep="/")
# Specific data files
phenotype2gene_file <- paste(main_directory, "guildify_data/phenotype2gene.tsv", sep="/")
redundantphenotypes_file <- paste(main_directory, "guildify_data/redundant_phenotypes.tsv", sep="/")
wilcox_file <- paste(main_directory, "results/reverse_engineering/reverse_signature_phh_notcorrected.txt", sep="/")
tanimoto_file <- paste(main_directory, "/additional_data/tanimoto_smiles.tsv", sep="/")
targets_file <- paste(main_directory, "additional_data/targets/targets_dgidb_hitpick_sea.tsv", sep="/")
# Output files
output.cv.rf <- paste(main_directory, "results/crossvalidation/cv_landmark_rf.txt", sep="/")
output.cv.gbm <- paste(main_directory, "results/crossvalidation/cv_landmark_gbm.txt", sep="/")
output.ind.rf = paste(main_directory, "camda_data/independent_validation/JAguirre_predictions_landmark_rf.txt", sep="/")
output.ind.gbm = paste(main_directory, "camda_data/independent_validation/JAguirre_predictions_landmark_gbm.txt", sep="/")
### Load files ###
source(functions_file)
load(drugs_file)
load(drug_info_file)
load(dilirank_file)
load(expression_file) # Requires cmapR
#### Subset drugs ####
drug.dataset <- subset.drug.dataset(drank.sel, outliers=outliers, remove.outliers=remove.outliers)
#### Get landmark genes ####
gene_info_df <- read.csv(gene_info_file, header=TRUE, sep="\t")
landmark_genes <- gene_info_df$pr_gene_id[gene_info_df$pr_is_lm==1]
### Get genes associated to phenotypes from DisGeNET ###
phenotype2gene <- read.csv(phenotype2gene_file, header=TRUE, sep="\t")
phenotypes <- unique(phenotype2gene$diseaseid)
curated_phenotypes <- unique(phenotype2gene$diseaseid[phenotype2gene$source == "CURATED"])
### Prepare SMILES data ###
tanimoto_df <- read.csv(tanimoto_file, header=TRUE, sep="\t", stringsAsFactors=FALSE)
tanimoto_df$DILIConcern[tanimoto_df$DILIConcern == "No-DILI-concern"] <- "No-DILI-Concern" # Correct the 4 entries with lowercase concern
tanimoto_df$pert_iname = rownames(tanimoto_df)
colnames(tanimoto_df)[match("DILIConcern", colnames(tanimoto_df))] <- "dilirank"
tanimoto_df$vDILIConcern <- NULL
tanimoto_ind_df <- tanimoto_df[colnames(tanimoto_df) %in% drug.dataset$independent_drugs, rownames(tanimoto_df) %in% drug.dataset$drugs]
tanimoto_df<-tanimoto_df[colnames(tanimoto_df) %in% drug.dataset$drugs, rownames(tanimoto_df) %in% drug.dataset$drugs]
### Prepare targets info ###
# Read targets and map them to dilirank info (the dilirank info on targets is wrong!)
targets_df <- read.csv(targets_file, header=TRUE, sep="\t", stringsAsFactors=FALSE)
colnames(targets_df)[match("drug", colnames(targets_df))] <- "pert_iname" # Change name to pert_iname
targets_ind_df <- targets_df[targets_df$pert_iname %in% drug.dataset$independent_drugs,] # Get independent set
targets_df <- merge(x = targets_df, y = drug.dataset$dilirank_df[c("pert_iname", "DILIConcern", "Severity.Class")], by = "pert_iname")
targets_df$DILI <- NULL
targets_df$severity <- NULL
targets_ind_df$DILI <- NULL
targets_ind_df$severity <- NULL
colnames(targets_df)[match("DILIConcern", colnames(targets_df))] <- "dilirank"
colnames(targets_df)[match("Severity.Class", colnames(targets_df))] <- "severity"
targets_df <- targets_df[targets_df$pert_iname %in% drug.dataset$drugs,]
### Load wilcoxon test analysis ###
wilcox_df = read.csv(wilcox_file, header = FALSE, sep = "\t")
#selected_genes <- unique(wilcox_df$gene_id[wilcox_df$p.value<0.05 & wilcox_df$landmark==TRUE])
selected_genes <- unique(wilcox_df[,1])
### Subset GCT object by landmark genes ###
# Subset the GCT object by cell ID PHH, 10 µM, 24 h
expression_df <- subset.expression(gct, landmark_genes, drug.dataset$drugs, drug.dataset$dilirank_df, cell_id="PHH", pert_idose="10 µM", pert_itime="24 h", merge_samples = TRUE)
# Subset gene expression for independent drugs as well
expression_ind_df <- subset.expression(gct, landmark_genes, drug.dataset$independent_drugs, drug.dataset$independent_df, cell_id="PHH", pert_idose="10 µM", pert_itime="24 h", merge_samples = TRUE)
### Subset GCT object by wilcoxon genes ###
# Subset the GCT object by cell ID PHH, 10 µM, 24 h
expression_wilcox_df <- subset.expression(gct, selected_genes, drug.dataset$drugs, drug.dataset$dilirank_df, cell_id="PHH", pert_idose="10 µM", pert_itime="24 h", merge_samples = TRUE)
# Subset gene expression for independent drugs as well
expression_wilcox_ind_df <- subset.expression(gct, selected_genes, drug.dataset$independent_drugs, drug.dataset$independent_df, cell_id="PHH", pert_idose="10 µM", pert_itime="24 h", merge_samples = TRUE)
### Prepare balanced machine learning datasets ###
datasets.list.landmark <- prepare.balanced.datasets(expression_df, number.repetitions, drug.dataset$most_concern_drugs, drug.dataset$less_concern_drugs, drug.dataset$no_concern_drugs, type_analysis = "discrete", fraction_train=fraction_train)
datasets.list.wilcoxon <- prepare.balanced.datasets(expression_wilcox_df, number.repetitions, drug.dataset$most_concern_drugs, drug.dataset$less_concern_drugs, drug.dataset$no_concern_drugs, type_analysis = "discrete", fraction_train=fraction_train)
datasets.list.smiles <- prepare.balanced.datasets(tanimoto_df, number.repetitions, drug.dataset$most_concern_drugs, drug.dataset$less_concern_drugs, drug.dataset$no_concern_drugs, type_analysis = "discrete", fraction_train=fraction_train)
datasets.list.targets <- prepare.balanced.datasets(targets_df, number.repetitions, drug.dataset$most_concern_drugs, drug.dataset$less_concern_drugs, drug.dataset$no_concern_drugs, type_analysis = "discrete", fraction_train=fraction_train)
### Define balanced drugs for machine learning datasets (disgenet/guildify) ###
balanced.drugs.list <- prepare.balanced.drugs(number.repetitions, drug.dataset$most_concern_drugs, drug.dataset$less_concern_drugs, drug.dataset$no_concern_drugs, fraction_train=fraction_train)
### Count the number of different drugs in each dataset ###
# Create new dataframe to store gene expression values for pca
cols <- c("Type of drug", "Num. DILIrank drugs", "Num. Most drugs", "Num. Less drugs", "Num. No drugs", "Num. independent drugs", "Num. train drugs", "Num. Most train drugs", "Num. Less train drugs", "Num. No train drugs", "Num. test drugs", "Num. Most test drugs", "Num. Less test drugs", "Num. No test drugs")
compounds_df <- data.frame(matrix(ncol = length(cols), nrow=0))
colnames(compounds_df) <- cols
compounds_df[nrow(compounds_df)+1,] <- c( "Landmark", length(expression_df$pert_iname), length(expression_df[expression_df$dilirank == "Most-DILI-Concern",]$pert_iname), length(expression_df[expression_df$dilirank == "Less-DILI-Concern",]$pert_iname), length(expression_df[expression_df$dilirank == "No-DILI-Concern",]$pert_iname), length(expression_ind_df$pert_iname),
length(datasets.list.landmark$training_datasets_with_names[[1]]$pert_iname), length(datasets.list.landmark$training_datasets_with_names[[1]][datasets.list.landmark$training_datasets_with_names[[1]]$dilirank == "Most-DILI-Concern",]$dilirank), length(datasets.list.landmark$training_datasets_with_names[[1]][datasets.list.landmark$training_datasets_with_names[[1]]$dilirank == "Less-DILI-Concern",]$dilirank), length(datasets.list.landmark$training_datasets_with_names[[1]][datasets.list.landmark$training_datasets_with_names[[1]]$dilirank == "No-DILI-Concern",]$dilirank),
length(datasets.list.landmark$testing_with_names$pert_iname), length(datasets.list.landmark$testing_with_names$dilirank[datasets.list.landmark$testing_with_names$dilirank == "Most-DILI-Concern"]), length(datasets.list.landmark$testing_with_names$dilirank[datasets.list.landmark$testing_with_names$dilirank == "Less-DILI-Concern"]), length(datasets.list.landmark$testing_with_names$dilirank[datasets.list.landmark$testing_with_names$dilirank == "No-DILI-Concern"]) )
compounds_df[nrow(compounds_df)+1,] <- c( "DILI Landmark", length(expression_wilcox_df$pert_iname), length(expression_wilcox_df[expression_wilcox_df$dilirank == "Most-DILI-Concern",]$pert_iname), length(expression_wilcox_df[expression_wilcox_df$dilirank == "Less-DILI-Concern",]$pert_iname), length(expression_wilcox_df[expression_wilcox_df$dilirank == "No-DILI-Concern",]$pert_iname), length(expression_wilcox_ind_df$pert_iname),
length(datasets.list.wilcoxon$training_datasets_with_names[[1]]$pert_iname), length(datasets.list.wilcoxon$training_datasets_with_names[[1]][datasets.list.wilcoxon$training_datasets_with_names[[1]]$dilirank == "Most-DILI-Concern",]$dilirank), length(datasets.list.wilcoxon$training_datasets_with_names[[1]][datasets.list.wilcoxon$training_datasets_with_names[[1]]$dilirank == "Less-DILI-Concern",]$dilirank), length(datasets.list.wilcoxon$training_datasets_with_names[[1]][datasets.list.wilcoxon$training_datasets_with_names[[1]]$dilirank == "No-DILI-Concern",]$dilirank),
length(datasets.list.wilcoxon$testing_with_names$pert_iname), length(datasets.list.wilcoxon$testing_with_names$dilirank[datasets.list.wilcoxon$testing_with_names$dilirank == "Most-DILI-Concern"]), length(datasets.list.wilcoxon$testing_with_names$dilirank[datasets.list.wilcoxon$testing_with_names$dilirank == "Less-DILI-Concern"]), length(datasets.list.wilcoxon$testing_with_names$dilirank[datasets.list.wilcoxon$testing_with_names$dilirank == "No-DILI-Concern"]) )
compounds_df[nrow(compounds_df)+1,] <- c( "DisGeNET", length(drug.dataset$drugs), length(drug.dataset$most_concern_drugs), length(drug.dataset$less_concern_drugs), length(drug.dataset$no_concern_drugs), length(drug.dataset$independent_drugs),
length(balanced.drugs.list$drugs_training_list[[1]]), length(balanced.drugs.list$drugs_training_list[[1]][balanced.drugs.list$drugs_training_list[[1]] %in% drug.dataset$most_concern_drugs]), length(balanced.drugs.list$drugs_training_list[[1]][balanced.drugs.list$drugs_training_list[[1]] %in% drug.dataset$less_concern_drugs]), length(balanced.drugs.list$drugs_training_list[[1]][balanced.drugs.list$drugs_training_list[[1]] %in% drug.dataset$no_concern_drugs]),
length(balanced.drugs.list$drugs_testing), length(balanced.drugs.list$drugs_testing[balanced.drugs.list$drugs_testing %in% drug.dataset$most_concern_drugs]), length(balanced.drugs.list$drugs_testing[balanced.drugs.list$drugs_testing %in% drug.dataset$less_concern_drugs]), length(balanced.drugs.list$drugs_testing[balanced.drugs.list$drugs_testing %in% drug.dataset$no_concern_drugs]) )
compounds_df[nrow(compounds_df)+1,] <- c( "GUILDify", length(drug.dataset$drugs), length(drug.dataset$most_concern_drugs), length(drug.dataset$less_concern_drugs), length(drug.dataset$no_concern_drugs), length(drug.dataset$independent_drugs),
length(balanced.drugs.list$drugs_training_list[[1]]), length(balanced.drugs.list$drugs_training_list[[1]][balanced.drugs.list$drugs_training_list[[1]] %in% drug.dataset$most_concern_drugs]), length(balanced.drugs.list$drugs_training_list[[1]][balanced.drugs.list$drugs_training_list[[1]] %in% drug.dataset$less_concern_drugs]), length(balanced.drugs.list$drugs_training_list[[1]][balanced.drugs.list$drugs_training_list[[1]] %in% drug.dataset$no_concern_drugs]),
length(balanced.drugs.list$drugs_testing), length(balanced.drugs.list$drugs_testing[balanced.drugs.list$drugs_testing %in% drug.dataset$most_concern_drugs]), length(balanced.drugs.list$drugs_testing[balanced.drugs.list$drugs_testing %in% drug.dataset$less_concern_drugs]), length(balanced.drugs.list$drugs_testing[balanced.drugs.list$drugs_testing %in% drug.dataset$no_concern_drugs]) )
compounds_df[nrow(compounds_df)+1,] <- c( "SMILES", length(tanimoto_df$pert_iname), length(tanimoto_df[tanimoto_df$dilirank == "Most-DILI-Concern",]$pert_iname), length(tanimoto_df[tanimoto_df$dilirank == "Less-DILI-Concern",]$pert_iname), length(tanimoto_df[tanimoto_df$dilirank == "No-DILI-Concern",]$pert_iname), length(tanimoto_ind_df$pert_iname),
length(datasets.list.smiles$training_datasets_with_names[[1]]$pert_iname), length(datasets.list.smiles$training_datasets_with_names[[1]][datasets.list.smiles$training_datasets_with_names[[1]]$dilirank == "Most-DILI-Concern",]$dilirank), length(datasets.list.smiles$training_datasets_with_names[[1]][datasets.list.smiles$training_datasets_with_names[[1]]$dilirank == "Less-DILI-Concern",]$dilirank), length(datasets.list.smiles$training_datasets_with_names[[1]][datasets.list.smiles$training_datasets_with_names[[1]]$dilirank == "No-DILI-Concern",]$dilirank),
length(datasets.list.smiles$testing_with_names$pert_iname), length(datasets.list.smiles$testing_with_names$dilirank[datasets.list.smiles$testing_with_names$dilirank == "Most-DILI-Concern"]), length(datasets.list.smiles$testing_with_names$dilirank[datasets.list.smiles$testing_with_names$dilirank == "Less-DILI-Concern"]), length(datasets.list.smiles$testing_with_names$dilirank[datasets.list.smiles$testing_with_names$dilirank == "No-DILI-Concern"]) )
compounds_df[nrow(compounds_df)+1,] <- c( "Targets", length(targets_df$pert_iname), length(targets_df[targets_df$dilirank == "Most-DILI-Concern",]$pert_iname), length(targets_df[targets_df$dilirank == "Less-DILI-Concern",]$pert_iname), length(targets_df[targets_df$dilirank == "No-DILI-Concern",]$pert_iname), length(targets_ind_df$pert_iname),
length(datasets.list.targets$training_datasets_with_names[[1]]$pert_iname), length(datasets.list.targets$training_datasets_with_names[[1]][datasets.list.targets$training_datasets_with_names[[1]]$dilirank == "Most-DILI-Concern",]$dilirank), length(datasets.list.targets$training_datasets_with_names[[1]][datasets.list.targets$training_datasets_with_names[[1]]$dilirank == "Less-DILI-Concern",]$dilirank), length(datasets.list.targets$training_datasets_with_names[[1]][datasets.list.targets$training_datasets_with_names[[1]]$dilirank == "No-DILI-Concern",]$dilirank),
length(datasets.list.targets$testing_with_names$pert_iname), length(datasets.list.targets$testing_with_names$dilirank[datasets.list.targets$testing_with_names$dilirank == "Most-DILI-Concern"]), length(datasets.list.targets$testing_with_names$dilirank[datasets.list.targets$testing_with_names$dilirank == "Less-DILI-Concern"]), length(datasets.list.targets$testing_with_names$dilirank[datasets.list.targets$testing_with_names$dilirank == "No-DILI-Concern"]) )
compounds_df
### Calculate the drugs without targets ###
drugs_no_target <- drug.dataset$drugs[!(drug.dataset$drugs %in% targets_df$pert_iname)]
independent_drugs_no_target <- drug.dataset$independent_drugs[!(drug.dataset$independent_drugs %in% targets_ind_df$pert_iname)]
drugs_no_target
independent_drugs_no_target
drug.dataset$most_concern_drugs[!(drug.dataset$most_concern_drugs %in% targets_df$pert_iname)]
drug.dataset$less_concern_drugs[!(drug.dataset$less_concern_drugs %in% targets_df$pert_iname)]
drug.dataset$no_concern_drugs[!(drug.dataset$no_concern_drugs %in% targets_df$pert_iname)]
|
f8c25ef36b8669135a1cf059307ff3204fdb285a
|
c7318557494fab35a01fd0ca8e239a060fc573ab
|
/Manuals/LR/2-3/transform_lr2.R
|
578e07971f7501f94ef241dae2efc63042a6f2ec
|
[] |
no_license
|
Fissssshhhhh/YSRS
|
e310aff19214c9139d634e7b7231ea5b7afb6619
|
f6c92a3a0dcb40e5c29e95570014a4013801de28
|
refs/heads/master
| 2023-09-06T04:21:11.634227
| 2021-11-12T11:53:14
| 2021-11-12T11:53:14
| 427,340,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,105
|
r
|
transform_lr2.R
|
library(dplyr)
# library(nycflights13)
flights <- readRDS("Data/flights.RData")
flights
#filter-----------
jan1 <- filter(flights, month == 1, day == 1)
(dec25 <- filter(flights, month == 12, day == 25))
# Floating point numbers
sqrt(2) ^ 2 == 2
1/49 * 49 == 1
near(sqrt(2) ^ 2, 2)
near(1 / 49 * 49, 1)
# The following code finds all flights that departed in November or December:
filter(flights, month == 11 | month == 12)
# It finds all months that equal 11 | 12, that evaluates to TRUE, i.e. 1 (January)
filter(flights, month == 11 | 12)
# %in%
nov_dec <- filter(flights, month %in% c(11, 12))
# Flights that weren’t delayed (on arrival or departure) by more than two hours
filter(flights, !(arr_delay > 120 | dep_delay > 120))
filter(flights, arr_delay <= 120, dep_delay <= 120)
# To preserve NA
df <- tibble(x = c(1, NA, 3))
filter(df, x > 1)
filter(df, is.na(x) | x > 1)
#arrange-----------
arrange(flights, year, month, day)
arrange(flights, desc(arr_delay))
# Missing values are always sorted at the end:
df <- tibble(x = c(5, 2, NA))
arrange(df, x)
arrange(df, desc(x))
#select-----------
select(flights, year, month, day)
# Select all columns between year and day (inclusive)
select(flights, year:day)
# Select all columns except those from year to day (inclusive)
select(flights, -(year:day))
# Rename variable
rename(flights, tail_num = tailnum)
# To move variables to the start of the data frame
select(flights, time_hour, air_time, everything())
#mutate-----------
flights_sml <- select(flights,
year:day,
ends_with("delay"),
distance,
air_time
)
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60
)
# One can refer to columns that you’ve just created:
mutate(flights_sml,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
# To keep only the new variables use transmute()
transmute(flights,
gain = arr_delay - dep_delay,
hours = air_time / 60,
gain_per_hour = gain / hours
)
transmute(flights,
dep_time,
hour = dep_time %/% 100,
minute = dep_time %% 100
)
#summarise----------
summarise(flights, delay = mean(dep_delay, na.rm = TRUE))
by_day <- group_by(flights, year, month, day)
View(summarise(by_day, delay = mean(dep_delay, na.rm = TRUE)))
#pipes--------------
by_dest <- group_by(flights, dest)
delay <- summarise(by_dest,
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
)
delay <- filter(delay, count > 20, dest != "HNL")
# It looks like delays increase with distance up to ~750 miles
# and then decrease. Maybe as flights get longer there's more
# ability to make up delays in the air?
ggplot(data = delay, mapping = aes(x = dist, y = delay)) +
geom_point(aes(size = count), alpha = 1/3) +
geom_smooth(se = FALSE)
# The same with pipes:
delays <- flights %>%
group_by(dest) %>%
summarise(
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(count > 20, dest != "HNL")
# Not cancelled
not_cancelled <- flights %>%
filter(!is.na(dep_delay), !is.na(arr_delay))
# %>%
# group_by(year, month, day) %>%
# summarise(mean = mean(dep_delay))
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay)
)
ggplot(data = delays, mapping = aes(x = delay)) +
geom_freqpoly(binwidth = 10)
#
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay, na.rm = TRUE),
n = n()
)
ggplot(data = delays, mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
#
delays %>%
filter(n > 25) %>%
ggplot(mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
# To remove grouping
daily <- group_by(flights, year, month, day)
daily %>%
ungroup() %>% # no longer grouped by date
summarise(flights = n())
|
968a52d02e58ced79787400ab492bdfdb8564463
|
69f18ac969d8061c6cce6b47cc479a528195e864
|
/src/main/resources/OccuranceOfWord.R
|
728f89e7f9396260c787c5598a4e7e3b1d38e950
|
[] |
no_license
|
sandy-sm/r-storm
|
99717538b1055d2fd470815ce92bbaf34939c9fc
|
709f9f5a915522ca73a1c1d00ff41d3ebe274b5e
|
refs/heads/master
| 2021-01-19T00:39:31.313853
| 2017-04-17T04:19:02
| 2017-04-17T04:19:02
| 87,197,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,629
|
r
|
OccuranceOfWord.R
|
## Function to find occurance of word #arg1 in text #arg2
occurance.word <- function(word, text) {
return(lengths(regmatches(word, gregexpr(word, text, ignore.case=TRUE))))
}
## Function to calculate proportion of #arg1 no. of occurances / Total no. of words
proportion.word <- function(word, text) {
return(100*lengths(regmatches(word, gregexpr(word, text, ignore.case=TRUE)))/lengths(regmatches(text, gregexpr("\\w+", text))))
}
proportion.word.bulk <- function(list, text) {
vector <- c()
for (word in list) {
vector <- c(vector, proportion.word(word, text))
}
vector <- c(vector, 0); #Not SPAM
return(vector);
}
text <- "Hello, this is test word for testing text,email, but text is now longer"
#word <- "test"
#occurance.word(word, text)
#proportion.word(word, text)
list_data <- c("make","address","all","3d","our","over","remove","internet", "order", "mail", "receive", "will",
"people", "report", "addresses", "free", "business","email", "you", "credit", "your"
,"font", "000","money","hp","hpl","george","650","lab", "labs", "telnet", "857", "data",
"415", "85", "technology", "1999", "parts", "pm", "direct", "cs", "meeting", "original",
"project", "re", "edu", "table", "conference",";", '\\(', '\\[','!','\\$','\\#',"\\W{8}","\\W{10}", "\\W{15}")
#print(list_data)
#proportion.word.bulk(list_data, text)
newDataTest <- dataTest[1,1:58]
newRecordVector <- proportion.word.bulk(list_data, text)
newDataTest[1,1:58] <- newRecordVector
##Initialize to No SPAM
pred <- 0
pred <- predict(x, newDataTest[1,1:57])
print(pred)
|
ace163f334ff3df95e5d53bd9195b377e5306539
|
06638f78ad22f0b25fbab84855b632f05c82b650
|
/man/stepjglm.Rd
|
6a1660358f37c36d6d6849670827893c53de292f
|
[] |
no_license
|
cran/stepjglm
|
46fd47372db977ae5bb24e37802768ca4a67f8fc
|
7860f6781066ace2504ce32b41168be2ffb4ffa2
|
refs/heads/master
| 2023-08-23T15:24:00.828128
| 2021-10-25T05:50:02
| 2021-10-25T05:50:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,996
|
rd
|
stepjglm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stepjglm.R
\name{stepjglm}
\alias{stepjglm}
\title{Variable selection in joint modeling of mean and dispersion}
\usage{
stepjglm(model,alpha1,alpha2,datafram,family,lambda1=1,lambda2=1,startmod=1,
interations=FALSE)
}
\arguments{
\item{model}{an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted. if \code{datafram} is a mixture data, \code{datafram} doesn't contain the principal mixture components.}
\item{alpha1}{significance level for testing add new terms on the mean models.}
\item{alpha2}{significance level for testing add new terms on the dispersion models.}
\item{datafram}{a data frame containing the data.}
\item{family}{a character string naming a family function or the result of a call to a family function. For \code{glm.fit} only the third option is supported. (See \code{family} for details of family functions). Describe the family function for the mean model (families implemented by package \code{stats}). For the dispersion model, the Gamma family whit log link is assumed.}
\item{lambda1}{some function of the sample size to calculate the \eqn{\tilde{R}_m^{2}} (See Pinto and Pereira (in press) and Zhang (2017) for more details). If equal to 1 (default), uses the standard correction for the \eqn{\tilde{R}_m^{2}}. If equal to "EAIC", uses the \eqn{EAIC} criterion.}
\item{lambda2}{some function of the sample size to calculate the \eqn{\tilde{R}_d^{2}} (See Pinto and Pereira (in press) and Zhang (2017) for more details). If equal to 1 (default), uses the standard correction for the \eqn{\tilde{R}_d^{2}}. If equal to "AIC", uses the corrected \eqn{AIC_c} criterion.}
\item{startmod}{if \code{datafram} is a mixture data, \code{startmod} is the principal mixture components, else, \code{startmod} must be equal to 1 (default).}
\item{interations}{if \code{TRUE} shows the outputs of iterations procedure step by step. The default is \code{FALSE}.}
}
\value{
\tabular{ll}{
\code{ model.mean} \tab a \code{glm} object with the adjustments for the mean model. \cr
\tab \cr
\code{model.disp} \tab a \code{glm} object with the adjustments for the dispersion model. \cr
\tab \cr
\code{EAIC} \tab a numeric object containing the \emph{Extended Akaike Information Criterion}. \cr
\tab For details, see Wang and Zhang (2009). \cr
\tab \cr
\code{EQD} \tab a numeric object containing the \emph{Extended Quasi Deviance}. \cr
\tab For details, see Nelder and Lee (1991). \cr
\tab \cr
\code{R2m} \tab a numeric object containing the standard correction for the \eqn{\tilde{R}_m^{2}}. \cr
\tab For details, see Pinto and Pereira (in press). \cr
\tab \cr
\code{R2d} \tab a numeric object containing the standard correction for the \eqn{\tilde{R}_d^{2}}. \cr
\tab For details, see Pinto and Pereira (in press). \cr
}
}
\description{
A Procedure for selecting variables in JMMD (including mixture models) based on hypothesis testing and the quality of the model's fit.
}
\details{
The function implements a method for selection of variables for both the mean and dispersion models in the JMMD introduced by Nelder and Lee (1991)
considering the \emph{Adjusted Quasi Extended Likelihood} introduced by Lee and Nelder (1998).
The method is a procedure for selecting variables, based on hypothesis testing and the quality of the model's fit.
A criterion for checking the goodness of fit is used, in each iteration of the selection process,
as a filter for choosing the terms that will be evaluated by a hypothesis test. For more details on selection algorithms, see Pinto and Pereira (in press).
}
\examples{
# Application to the bread-making problem:
data(bread_mixture)
Form =
as.formula(y~ x1:x2+x1:x3+x2:x3+x1:x2:(x1-x2)+x1:x3:(x1-x3)+
+ x1:z1+x2:z1+x3:z1+x1:x2:z1
+ x1:x3:z1+x1:x2:(x1-x2):z1
+ x1:x3:(x1-x3):z1
+ x1:z2+x2:z2+x3:z2+x1:x2:z2
+ x1:x3:z2+x1:x2:(x1-x2):z2
+x1:x3:(x1-x3):z2)
object=stepjglm(Form,0.1,0.1,bread_mixture,gaussian,sqrt(90),"AIC","-1+x1+x2+x3")
summary(object$modelo.mean)
summary(object$modelo.disp)
object$EAIC # Print the EAIC for the final model
# Application to the injection molding data:
form = as.formula(Y ~ A*M+A*N+A*O+B*M+B*N+B*O+C*M+C*N+C*O+D*M+D*N+D*O+
E*M+E*N+E*O+F*M+F*N+F*O+G*M+G*N+G*O)
data(injection_molding)
obj.dt = stepjglm(form, 0.05,0.05,injection_molding,gaussian,sqrt(nrow(injection_molding)),"AIC")
summary(obj.dt$modelo.mean)
summary(obj.dt$modelo.disp)
obj.dt$EAIC # Print the EAIC for the final model
obj.dt$EQD # Print the EQD for the final model
obj.dt$R2m # Print the R2m for the final model
obj.dt$R2d # Print the R2d for the final model
}
\references{
Hu, B. and Shao, J. (2008). Generalized linear model selection using \eqn{R^2}. \emph{Journal of Statistical Planning and Inference}, 138, 3705-3712.
Lee, Y., Nelder, J. A. (1998). Generalized linear models for analysis of quality improvement experiments. \emph{The Canadian Journal of Statistics}, v. 26, n. 1, pp. 95-105.
Nelder, J. A., Lee, Y. (1991). Generalized linear models for the analysis of Taguchi-type experiments. \emph{Applied Stochastic Models and Data Analysis}, v. 7, pp. 107-120.
Pinto, E. R., Pereira, L. A. (in press). On variable selection in joint modeling of mean and dispersion. \emph{Brazilian Journal of Probability and Statistics}. Preprint at \url{https://arxiv.org/abs/2109.07978} (2021).
Wang, D. and Zhang, Z. (2009). Variable selection in joint generalized linear models. \emph{Chinese Journal of Applied Probability and Statistics}, v. 25, pp.245-256.
Zhang, D. (2017). A coefficient of determination for generalized linear models. \emph{The American Statistician}, v. 71, 310-316.
}
\seealso{
\code{\link[stats]{glm}}
\code{\link[stats]{summary.glm}}
}
\author{
Leandro Alves Pereira, Edmilson Rodrigues Pinto.
}
|
757fdef137fab049d654b29c28076a3e64dcedb1
|
c053cc97c204c6af25664cf337d6dd94d984c591
|
/man-roxygen/return-prob.R
|
81bd9be97af35b637c8d13df27c4f1bc15702004
|
[
"MIT"
] |
permissive
|
tidymodels/yardstick
|
1b2454ae37da76b6c5c2b36682d573c7044767a7
|
e5c36f206fb737fc54b1a6161c09bc0d63b79beb
|
refs/heads/main
| 2023-08-19T03:29:20.953918
| 2023-08-08T21:32:57
| 2023-08-08T21:32:57
| 108,898,402
| 294
| 55
|
NOASSERTION
| 2023-08-08T21:32:59
| 2017-10-30T19:26:54
|
R
|
UTF-8
|
R
| false
| false
| 245
|
r
|
return-prob.R
|
#' @return
#'
#' A `tibble` with columns `.metric`, `.estimator`,
#' and `.estimate` and 1 row of values.
#'
#' For grouped data frames, the number of rows returned will be the same as
#' the number of groups.
# these don't have _vec() methods
|
3d41728b617f5189d2f8564771ecf85e3ae6252b
|
385e441bb73fec6fd36f1cebd67a6f94ff50b096
|
/run_analysis.R
|
6f827862a7f8f0fe01fbe3867100dc822a863f83
|
[] |
no_license
|
marcelotournier/HAR_UCI
|
b75c3b00809b995b09acf9376939435c85b2f823
|
45404788687969be4c99bc994c19ffa9a18f7a67
|
refs/heads/master
| 2021-01-10T07:13:31.564146
| 2015-11-20T03:38:41
| 2015-11-20T03:38:41
| 46,536,836
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,032
|
r
|
run_analysis.R
|
library(dplyr)
library(data.table)
# preparing observations from group 'test'
subject_test=read.fwf("subject_test.txt", header=FALSE, widths = 2, sep ='\n' ,col.names = "subject")
subject_test$Group = 'test'
Y_test=read.fwf("y_test.txt", header=FALSE, widths = 1, sep ='\n' ,col.names = "activity")
featurelist=read.fwf("features.txt", header=FALSE, widths = c(3,600), sep ="")
x_test=fread('X_test.txt', sep = " ",sep2 = "\n")
names(x_test)=as.character(featurelist$V2)
x_test_mean=select(x_test, contains("mean()"),contains("-std()"))
test_table=cbind(subject_test,Y_test,x_test_mean)
# preparing observations from group 'train'
subject_train=read.fwf("subject_train.txt", header=FALSE, widths = 2, sep ='\n' ,col.names = "subject")
subject_train$Group = 'train'
Y_train=read.fwf("y_train.txt", header=FALSE, widths = 1, sep ='\n' ,col.names = "activity")
x_train=fread('X_train.txt', sep = " ",sep2 = "\n")
names(x_train)=as.character(featurelist$V2)
x_train_mean=select(x_train, contains("mean()"),contains("-std()"))
train_table=cbind(subject_train,Y_train,x_train_mean)
# Joining subsets from "test" and "train" groups:
uci=rbind(test_table,train_table)
# Renaming activity labels:
uci$activity=gsub ("1","WALKING", uci$activity)
uci$activity=gsub ("2","WALKING_UPSTAIRS", uci$activity)
uci$activity=gsub ("3","WALKING_DOWNSTAIRS", uci$activity)
uci$activity=gsub ("4","SITTING", uci$activity)
uci$activity=gsub ("5","STANDING", uci$activity)
uci$activity=gsub ("6","LAYING", uci$activity)
#final touch (step 4)...
uci=arrange(uci,subject)
# Step 5 - From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
uci_means=select(uci, contains("subject"),contains("Group"),contains("activity"), contains("mean()"))
uci_means=ddply(uci_means, .(subject,Group,activity), numcolwise(mean))
names(uci_means)=gsub("-mean()","",names(uci_means))
write.table(uci,"uci.txt",row.name=FALSE)
write.table(uci_means,"uci_means.txt",row.name=FALSE)
|
bd265939f426664d2a6e59c1b185e2d63e0fb6bf
|
b15ab0ad95b50fc6ed4fc3172631792ba0363b07
|
/PE 58 - Spiral primes.R
|
569b64ae7d7c837d85d8e0be0dd60758b06fa077
|
[] |
no_license
|
stephen-kane/R-Repo
|
31d75765dec12f36ce41af5edcbc10e5bc487ea0
|
5c87594a83efd268479397977546bc30ddb8f625
|
refs/heads/master
| 2021-01-01T20:18:27.934671
| 2017-08-07T05:57:38
| 2017-08-07T05:57:38
| 79,204,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
PE 58 - Spiral primes.R
|
library(gmp)
for(i in seq(3,40000,2)){
z=seq(1,i,2)
a=seq(0,i,2)
# isprime returns 0.5 for big numbers so I replaced them with 1s
b=sum(replace(isprime(z^2-a)/2,which(isprime(z^2-a)/2==0.5),1))+sum(replace(isprime(z^2-2*a)/2,which(isprime(z^2-2*a)/2==0.5),1))+sum(replace(isprime(z^2-3*a)/2,which(isprime(z^2-3*a)/2==0.5),1))
s=b/(2*i-1) # 2*i-1 is the total number of diagonal numbers
if(s<0.1){
print(i)
break
}
}
|
3099e51e2c4b366599fda08482670fff46361baa
|
9f587d2c0badc8c2ea944a228663a1d4fdadfcff
|
/R/usmap.R
|
23b44a1020c79c696f07f4f8164d343b32532a53
|
[] |
no_license
|
cran/usmap
|
60219762dd7c5dc1fca750288c1ea0221a1cecf1
|
dec1bbba20dda7c7e1d60cd25192ceed3f0fc8c0
|
refs/heads/master
| 2023-06-23T21:16:30.289265
| 2023-06-13T07:50:02
| 2023-06-13T07:50:02
| 80,337,684
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,837
|
r
|
usmap.R
|
#' usmap: US maps including Alaska and Hawaii
#'
#' @description
#' It is usually difficult or inconvenient to create US maps that
#' include both Alaska and Hawaii in a convenient spot. All map
#' data frames produced by this package use the Albers Equal Area
#' projection.
#'
#' @section Map data frames:
#' Alaska and Hawaii have been manually moved to a new location so that
#' their new coordinates place them to the bottom-left corner of
#' the map. These maps can be accessed by using the [us_map] function.
#'
#' The function provides the ability to retrieve maps with either
#' state borders or county borders using the \code{regions} parameter
#' for convenience.
#'
#' States (or counties) can be included and excluded using the provided
#' \code{include} and \code{exclude} parameters. These parameters can be used
#' together with any combination of names, abbreviations, or FIPS code to
#' create more complex maps.
#'
#' @section FIPS lookup tools:
#' Several functions have been included to lookup the US state or county
#' pertaining to a FIPS code.
#'
#' Likewise a reverse lookup can be done where a FIPS code can be used to
#' retrieve the associated state(s) or county(ies). This can be useful when
#' preparing data to be merged with the map data frame.
#'
#' @section Plot US map data:
#' A convenience function [plot_usmap] has been included which
#' takes similar parameters to [us_map] and returns a [ggplot2::ggplot2]
#' object. Since the output is a \code{ggplot} object, other layers can be
#' added such as scales, themes, and labels. Including data in the function call
#' will color the map according to the values in the data, creating a choropleth.
#'
#' @author Paolo Di Lorenzo \cr
#' \itemize{
#' \item Email: \email{paolo@@dilorenzo.pl}
#' \item Website: \url{https://dilorenzo.pl}
#' \item GitHub: \url{https://github.com/pdil/}
#' }
#'
#' @seealso
#' Helpful links:
#' \itemize{
#' \item FIPS code information \cr
#' \url{https://en.wikipedia.org/wiki/FIPS_county_code}
#' \url{https://en.wikipedia.org/wiki/FIPS_state_code}
#' \item US Census Shapefiles \cr
#' \url{https://www.census.gov/geographies/mapping-files/time-series/geo/cartographic-boundary.html}
#' \item Map Features \cr
#' \url{https://en.wikipedia.org/wiki/Map_projection}
#' \url{https://en.wikipedia.org/wiki/Albers_projection}
#' \url{https://en.wikipedia.org/wiki/Choropleth}
#' }
#'
#' @references
#' Rudis, Bob. "Moving The Earth (well, Alaska & Hawaii) With R."
#' Blog post. Rud.is., 16 Nov. 2014. Web. 10 Aug. 2015.
#' \url{https://rud.is/b/2014/11/16/moving-the-earth-well-alaska-hawaii-with-r/}.
#'
#' @docType package
#' @name usmap
"_PACKAGE"
## Prevent R CMD check from catching the following variables
## as missing global variables (used in ggplot2::aes)
if (getRversion() >= "2.15.1")
utils::globalVariables(c("abbr", "county", "group", "x", "y"))
#' Retrieve US map data
#'
#' @param regions The region breakdown for the map, can be one of
#' (\code{"states"}, \code{"state"}, \code{"counties"}, \code{"county"}).
#' The default is \code{"states"}.
#' @param include The regions to include in the resulting map. If \code{regions} is
#' \code{"states"}/\code{"state"}, the value can be either a state name, abbreviation or FIPS code.
#' For counties, the FIPS must be provided as there can be multiple counties with the
#' same name. If states are provided in the county map, only counties in the included states
#' will be returned.
#' @param exclude The regions to exclude in the resulting map. If \code{regions} is
#' \code{"states"}/\code{"state"}, the value can be either a state name, abbreviation or FIPS code.
#' For counties, the FIPS must be provided as there can be multiple counties with the
#' same name. The regions listed in the \code{include} parameter are applied first and the
#' \code{exclude} regions are then removed from the resulting map. Any excluded regions
#' not present in the included regions will be ignored.
#'
#' @seealso [usmapdata::us_map()] of which this function is a wrapper for.
#'
#' @return A data frame of US map coordinates divided by the desired \code{regions}.
#'
#' @examples
#' str(us_map())
#'
#' df <- us_map(regions = "counties")
#' west_coast <- us_map(include = c("CA", "OR", "WA"))
#'
#' south_atl_excl_FL <- us_map(include = .south_atlantic, exclude = "FL")
#' @export
us_map <- function(regions = c("states", "state", "counties", "county"),
include = c(),
exclude = c()) {
# check for usmapdata
if (!requireNamespace("usmapdata", quietly = TRUE)) {
stop("`usmapdata` must be installed to use `plot_usmap`.
Use: install.packages(\"usmapdata\") and try again.")
}
usmapdata::us_map(regions = regions, include = include, exclude = exclude)
}
|
8bfad5b57ab2c2ad1eae3a5513f7c1bfae156fe6
|
eee0f8efd9e287df7381a1d7d9ca4e4ef7b103f4
|
/bin/knitLesson05.R
|
bded1a419ebc9d7dee0d8e570bc4f870d90c8635
|
[
"MIT",
"LicenseRef-scancode-public-domain",
"CC-BY-4.0"
] |
permissive
|
uw-madison-comps/R-plant-lesson
|
b0d178ee33720a0620a77e4f9de1cf7774d793e8
|
eb0fa2530082c5b4ebcb87524146fe63e8c344a3
|
refs/heads/gh-pages
| 2023-07-19T15:36:49.973948
| 2019-09-23T18:19:46
| 2019-09-23T18:19:46
| 198,238,729
| 0
| 3
|
NOASSERTION
| 2021-09-15T16:14:13
| 2019-07-22T14:22:05
|
HTML
|
UTF-8
|
R
| false
| false
| 181
|
r
|
knitLesson05.R
|
require(knitr)
src_rmd <- "_episodes_rmd/05-advanced-lesson.Rmd"
dest_md <- "_episodes/05-advanced-lesson.md"
## knit the Rmd into markdown
knitr::knit(src_rmd, output = dest_md)
|
855d0927f9f5d95dc86f958f1987ec43b70239d9
|
4adcf1c34f50716e6a2a04922f261891b2e7469d
|
/MACHINE-LEARNING/LABS-progs/ann/All_Labs/7Lab_cluster/pluton.r
|
9cf652275631f5a2a949c93b7fa0588e740706dd
|
[] |
no_license
|
BC30138/Sem10
|
913bf40a52dce308f46d0deb3aaa1f8124231faf
|
490748c3420cdb3458e418ec6556d6fb4951e5ba
|
refs/heads/master
| 2020-04-22T09:22:02.992576
| 2019-07-02T23:27:42
| 2019-07-02T23:27:42
| 170,269,664
| 1
| 0
| null | 2019-03-25T19:59:35
| 2019-02-12T07:06:10
|
TeX
|
UTF-8
|
R
| false
| false
| 338
|
r
|
pluton.r
|
#install.packages("cluster")
library(cluster)
data(pluton)
pluton <- pluton[,-c(1,2)]
cl <- kmeans(pluton, 3, iter.max = 1)
plot(pluton,col = cl$cluster)
cl2 <- kmeans(pluton, 3, iter.max = 1000)
points(pluton, col = cl2$cluster, cex = 4)
cl3 <- kmeans(pluton, 3, iter.max = 2000)
points(pluton, col = cl3$cluster, cex = 7)
|
fe8314c52de1a3713c5f5c6451cdbe4e651100a1
|
361106bd7dfe02dd5b2633571370b3274e263ead
|
/data-raw/example_model.R
|
9b77d6381a9d81ae27b346966f607e4d73b45ef3
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
dynverse/dyngen
|
10f7d469b10bfb5d431bbddd73469acd34e6af69
|
714475df2785413a8957a2a16eb6d668c9e5df01
|
refs/heads/master
| 2022-11-08T17:09:49.119807
| 2022-10-26T18:55:02
| 2022-10-26T18:55:02
| 84,569,175
| 66
| 11
|
NOASSERTION
| 2022-10-26T18:55:03
| 2017-03-10T14:40:49
|
R
|
UTF-8
|
R
| false
| false
| 998
|
r
|
example_model.R
|
library(tidyverse)
set.seed(1)
backbone <- backbone_bifurcating()
total_time <- simtime_from_backbone(backbone)
out <-
initialise_model(
backbone = backbone,
num_tfs = nrow(backbone$module_info),
num_targets = 10,
num_hks = 10,
verbose = FALSE,
num_cells = 100,
gold_standard_params = gold_standard_default(tau = .1, census_interval = 100),
simulation_params = simulation_default(
ssa_algorithm = ssa_etl(tau = .1),
census_interval = 500,
experiment_params = simulation_type_wild_type(num_simulations = 100),
compute_cellwise_grn = TRUE,
compute_rna_velocity = TRUE
)
) %>%
generate_dataset(format = "none")
example_model <- out$model
example_model$num_cores <- 1L
example_model$download_cache_dir <- NULL
map_df(
names(example_model),
function(nm) {
tibble(name = nm, size = (pryr::object_size(example_model[[nm]])))
}
) %>% arrange(size)
usethis::use_data(example_model, compress = "xz", overwrite = TRUE)
|
0b521d2a9e0dad5be06d1fd9c769e2d306394733
|
06b9d2ece554bda6b4402785bc9c7b7a627a6c2f
|
/man/runSequence.Rd
|
b2b9c37eb5e1ef8d522b90a139c967aa9a0dbceb
|
[
"MIT"
] |
permissive
|
wStockhausen/rTCSAM2015
|
4f2dd392b32d9a3ea9cce4703e25abde6440e349
|
7cfbe7fd5573486c6d5721264c9d4d6696830a31
|
refs/heads/master
| 2020-12-26T04:56:03.783011
| 2016-09-30T01:59:06
| 2016-09-30T01:59:06
| 26,103,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,739
|
rd
|
runSequence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runSequence.R
\name{runSequence}
\alias{runSequence}
\title{Function to run a sequence of TCSAM2015 models.}
\usage{
runSequence(os = "osx", path = ".", model = "tcsam2015",
path2model = "", configFile = "", numRuns = 4, mcmc = FALSE,
mc.N = 1e+06, mc.save = 1000, mc.scale = 1000, plotResults = FALSE)
}
\arguments{
\item{os}{- 'win' or 'mac' or 'osx'}
\item{path}{- path for model output}
\item{model}{- TCSAM2015 model executable name}
\item{path2model}{- path to model executable}
\item{configFile}{- full (absolute) path to model configuration file}
\item{numRuns}{- number of runs in sequence to make}
\item{mcmc}{- flag (T/F) to run mcmc on "best" model}
\item{mc.N}{- number of mcmc iterations to make}
\item{mc.save}{- number of iterations to skip when saving mcmc calculations}
\item{mc.scale}{- number of iterations to adjust scale for mcmc calculations}
\item{plotResults}{- T/F to plot final results using \code{plotTCSAM2013I}}
}
\value{
- list indicatng index of best run, the folder with the best run, and a list of results
from the parameter files for each model run.
}
\description{
This functions runs a sequence of TCSAM2015 model.
}
\details{
This function creates a shell script ('./tmp.sh') in the
working directory and uses it to run a version of the TCSAM2015 model. Pin files
are copied from the previous run's par file. The file 'best.txt' identifies the run
with the best objective function value. The "best" sub-folder contains results from
re-running the best run, this time estimating the hessian and obtaining the std file
(if the hessian is invertible).\cr\cr
Uses function \code{wtsUtilities::formatZeros()}.
}
|
03e6f59675a71cf481abe732084f9148d0d723fa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/netassoc/examples/plot_netassoc_matrix.Rd.R
|
336bc3e40df7a717574aff0dc4fffc8f00ffea1c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
r
|
plot_netassoc_matrix.Rd.R
|
library(netassoc)
### Name: plot_netassoc_matrix
### Title: Plots matrix with colormap
### Aliases: plot_netassoc_matrix
### ** Examples
nsp <- 10
nsites <- 30
obs <- matrix(rpois(n=nsp*nsites,10),
nrow=nsp,ncol=nsites,
dimnames=list(paste("Species",1:nsp),paste("Site",1:nsites)))
plot_netassoc_matrix(obs, onesided=TRUE, col=heat.colors(5))
int <- matrix(rnorm(n=nsp^2),
nrow=nsp,ncol=nsp,
dimnames=list(paste("Species",1:nsp),paste("Species",1:nsp)))
plot_netassoc_matrix(int, onesided=FALSE,
col=colorRampPalette(c("red","white","blue"))(50))
|
efefaabdc004e22a6535af0ff747ca8d4aec9640
|
0bebbba10f446ec5be7a378937be76c5e7610ab1
|
/tables.R
|
4e6780a513e06e02142a15c56499ee6cbc58f519
|
[] |
no_license
|
songxh0424/bankruptcy
|
c3920ca513fce346bb7a3cca35aa3a35fc0293e8
|
004aad6aefb8a3c706db2f2cb180e6e013587f52
|
refs/heads/master
| 2021-08-24T02:29:49.794192
| 2017-12-07T17:17:04
| 2017-12-07T17:17:04
| 113,479,011
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,887
|
r
|
tables.R
|
perform = function(model) {
attach(model)
temp = data.frame("train accu" = (traintable[1, 1] + traintable[2, 2]) / sum(traintable), "train sens" = traintable[1, 1] / sum(traintable[1, ]), "train spec" = traintable[2, 2] / sum(traintable[2, ]),
"test accu" = (table[1, 1] + table[2, 2]) / sum(table), "test sens" = table[1, 1] / sum(table[1, ]), "test spec" = table[2, 2] / sum(table[2, ]))
detach(model)
return(temp)
}
cvvalues = function(model) {
idx = which.max(model$model$results$ROC)
n = dim(model$model$results)[2]
return(model$model$results[idx, (n-5):n])
}
# original data
model.orig = list(model.lda, model.logis, model.knn, model.svm, model.nn, model.rf, model.ada, model.gbm)
table.orig = bind_rows(lapply(model.orig, perform))
table.orig = round(table.orig, digits = 4)
table.orig[7, ] = 1 - table.orig[7, ]
metnames = c("LDA", "Logistic", "kNN", "SVM", "NN", "RF", "AdaBoost", "GBM")
rownames(table.orig) = metnames
kable(table.orig)
# pca
model.pca = list(model.lda.pca, model.logis.pca, model.knn.pca, model.svm.pca, model.nn.pca, model.rf.pca, model.ada.pca, model.gbm.pca)
table.pca = bind_rows(lapply(model.pca, perform))
table.pca = round(table.pca, digits = 4)
table.pca[7, ] = 1 - table.pca[7, ]
rownames(table.pca) = metnames
kable(table.pca)
# smote
model.smote = list(model.na.lda.smote, model.na.logis.smote, model.na.knn.smote, model.na.svm.smote, model.na.nn.smote, model.na.rf.smote, model.na.ada.smote, model.na.gbm.smote)
table.smote = bind_rows(lapply(model.smote, perform))
table.smote = round(table.smote, digits = 4)
table.smote[7, ] = 1 - table.smote[7, ]
roc.smote = bind_rows(lapply(model.smote, cvvalues))
roc.smote = round(roc.smote, digits = 4)
roc.smote[7, 2:3] = 1 - roc.smote[7, 2:3]
rownames(table.smote) = metnames
rownames(roc.smote) = metnames
kable(table.smote)
kable(roc.smote)
# up
model.up = list(model.na.lda.up, model.na.logis.up, model.na.knn.up, model.na.svm.up, model.na.nn.up, model.na.rf.up, model.na.ada.up, model.na.gbm.up)
table.up = bind_rows(lapply(model.up, perform))
table.up = round(table.up, digits = 4)
table.up[7, ] = 1 - table.up[7, ]
roc.up = bind_rows(lapply(model.up, cvvalues))
roc.up = round(roc.up, digits = 4)
roc.up[7, 2:3] = 1 - roc.up[7, 2:3]
rownames(table.up) = metnames
rownames(roc.up) = metnames
kable(table.up)
kable(roc.up)
# down
model.down = list(model.na.lda.down, model.na.logis.down, model.na.knn.down, model.na.svm.down, model.na.nn.down, model.na.rf.down, model.na.ada.down, model.na.gbm.down)
table.down = bind_rows(lapply(model.down, perform))
table.down = round(table.down, digits = 4)
table.down[7, ] = 1 - table.down[7, ]
roc.down = bind_rows(lapply(model.down, cvvalues))
roc.down = round(roc.down, digits = 4)
roc.down[7, 2:3] = 1 - roc.down[7, 2:3]
rownames(table.down) = metnames
rownames(roc.down) = metnames
kable(table.down)
kable(roc.down)
|
29876ff5371796893d92278680ae3f192a0279b8
|
ea9c4fad1f9bcd50270f9cb4b20deb0b1afbe647
|
/man/Schumaker.Rd
|
8d83c24d07e13a23073e2df58d5829922900da35
|
[] |
no_license
|
s-baumann/schumaker
|
351522233711e7aaef898ba2f67e42bea6caf63a
|
1058e4313b6111851d36e956296bf58ed2db0cd8
|
refs/heads/master
| 2021-01-18T23:02:09.936450
| 2019-01-05T01:54:41
| 2019-01-05T01:54:41
| 40,301,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,846
|
rd
|
Schumaker.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Schumaker.R
\name{Schumaker}
\alias{Schumaker}
\title{Create a Schumaker Spline}
\usage{
Schumaker(x, y, ff = "Not-Supplied", Vectorised = TRUE,
Extrapolation = c("Curve", "Constant", "Linear"))
}
\arguments{
\item{x}{A vector of x coordinates}
\item{y}{A corresponding vector of y coordinates}
\item{ff}{(Optional) A corresponding vector of gradiants at the data points. If not supplied this is estimated.}
\item{Vectorised}{This is a boolean parameter. Set to TRUE if you want to be able to input vectors to the created spline. If you will only input single values set this to FALSE as it is a bit faster.}
\item{Extrapolation}{This determines how the spline function responds when an input is recieved outside the domain of x. The options are "Curve" which outputs the result of the point on the quadratic curve at the nearest interval, "Constant" which outputs the y value at the end of the x domain and "Linear" which extends the spline using the gradiant at the edge of x.}
}
\value{
A list with 3 spline functions. Thee first spline is is for the input points, the second spline is the first derivative of the first spline, the third spline is the second derivative. Each function takes an x value (or vector if Vectorised = TRUE) and outputs the interpolated y value (or relevent derivative).
}
\description{
Create a Schumaker Spline
}
\examples{
x = seq(1,6)
y = log(x)
SSS = Schumaker(x,y, Vectorised = TRUE)
Spline = SSS[[1]]
SplineD = SSS[[2]]
Spline2D = SSS[[3]]
xarray = seq(1,6,0.01)
Result = Spline(xarray)
Result2 = SplineD(xarray)
Result3 = Spline2D(xarray)
plot(xarray, Result, ylim=c(-0.5,2))
lines(xarray, Result2, col = 2)
lines(xarray, Result3, col = 3)
}
\references{
Judd (1998). Numerical Methods in Economics. MIT Press
}
|
1116f040fa25fe68e779e9046b8ef2565dd4dea7
|
441d6db1aa56908b413437afe99ba9a7e32cd30e
|
/02_code/utils.R
|
7a88c1906632843a1fe0d20ab7d1a539bcd87efd
|
[] |
no_license
|
jzhao0802/hcv
|
461e4eabd0d69edf9511b392faaa39e5ebca44c2
|
c053d6a083fc64f421f0d2e89a73e43f9498e21e
|
refs/heads/master
| 2021-09-05T12:18:06.876356
| 2018-01-09T10:54:39
| 2018-01-09T10:54:39
| 119,167,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,709
|
r
|
utils.R
|
summary_table <- function(data){
res<- data.frame(matrix(ncol=3, nrow=5))
colnames(res) <- c("Variable", "HCV", "non-HCV")
#counts
res[1,1] <- "Patient counts"
res[1,2] <- sum(data$label==1)
res[1,3] <- sum(data$label==0)
#age
res[2,1] <- "Age"
res[2,2] <- paste0(round(mean(data$PATIENT_AGE[data$label==1]),1),
" +/- ", round(sd(data$PATIENT_AGE[data$label==1]),1))
res[2,3] <- paste0(round(mean(data$PATIENT_AGE[data$label==0]),1),
" +/- ", round(sd(data$PATIENT_AGE[data$label==0]),1))
#gender
res[3,1] <- "Gender"
res[3,2] <- paste0("M: ", round(100*(sum(data$PAT_GENDER_CD[data$label==1] == "M")/sum(data$label==1)),1),
"% , F: ",round(100*(sum(data$PAT_GENDER_CD[data$label==1] == "F")/sum(data$label==1)),1), "%")
res[3,3] <-paste0("M: ", round(100*(sum(data$PAT_GENDER_CD[data$label==0] == "M")/sum(data$label==0)),1),
"% , F: ",round(100*(sum(data$PAT_GENDER_CD[data$label==0] == "F")/sum(data$label==0)),1), "%")
#treated for HCV
res[3,1] <- "Treated for HCV"
res[3,2] <- paste(round(100*sum(data$TREAT_FOR_HCV[data$label == 1]==1)/data$label==1,1), "%")
res[3,3] <- paste(round(100*sum(data$TREAT_FOR_HCV[data$label == 0]==1)/data$label==0,1), "%")
return(res)
}
get_curve <- function(prob, truth, x_metric, y_metric){
if(length(prob) != length(truth)){
stop("Length of prob and truth should be the same!")
}
# This was originally based on Hui's code
aucobj <- ROCR::prediction(prob, truth)
perf <- ROCR::performance(aucobj, y_metric, x_metric)
x <- perf@x.values[[1]]
y <- perf@y.values[[1]]
thresh <- perf@alpha.values[[1]]
# Ignore nans and inf
non_nan <- (!is.nan(x) & !is.nan(y) & !is.nan(thresh) & !is.infinite(x) &
!is.infinite(y) & !is.infinite(thresh))
x <- x[non_nan]
y <- y[non_nan]
thresh <- thresh[non_nan]
# Make and return df
data.frame(x=x, y=y, thresh=thresh)
}
bin_curve <- function(curve_df, bin_num, agg_func=mean){
curve_df <- curve_df %>%
dplyr::group_by(x_binned=cut(x, breaks = seq(0, 1, by=1/bin_num))) %>%
dplyr::summarise_each(funs(agg_func(., na.rm = TRUE)))
# move x_binned column to the end
curve_df <- curve_df[, c(2,3,4,1)]
}
perf_binned_perf_curve <- function(pred, bin_num = 20, x_metric = "rec",
y_metric = "prec", agg_func = mean, subdiv=200){
# get probabilities and truth
tp <- palabmod:::get_truth_pred(pred)
# compute and bin curve
curve_df <- palabmod:::get_curve(tp$prob, tp$truth, x_metric, y_metric)
# get auc
auc <- palabmod:::auc_curve(curve_df, subdiv)
curve_df <- palabmod:::bin_curve(curve_df, bin_num, agg_func)
# prepare df that we return
curve_df <- as.data.frame(curve_df[,c("x_binned", "y", "thresh")])
colnames(curve_df) <- c(paste(x_metric, "_binned", sep=""), y_metric, "thresh")
return(list(curve=curve_df, auc=auc))
}
date_format <- function(input_data, date_pattern) {
date_data <- dplyr::select(input_data, dplyr::contains(date_pattern))
formatted <- lapply(date_data, mdy)
df_date <- as.data.frame(formatted)
df <- data.frame(df_date)
return(df)
}
date_format_dmy <- function(input_data, date_pattern) {
date_data <- dplyr::select(input_data, dplyr::contains(date_pattern))
formatted <- lapply(date_data, dmy)
df_date <- as.data.frame(formatted)
df <- data.frame(df_date)
return(df)
}
create_date_diffs <- function(input, index_col = "index_date") {
date_cols <- input[, -which(colnames(input) == index_col)]
date_diffs <- as.data.frame(sapply(date_cols, function(x) {
input[[index_col]] - x
}))
return(date_diffs)
}
|
d38cfe8a94b36f8b91d522cfcf95b678b6f89999
|
c0c5b3c15b163596eb3aa3f6d1f7332c3b868e6a
|
/Scripts/corp_default_forecast_insample.R
|
41a2da761b1c9f1aca5607a1d8a2e65ba1099652
|
[
"MIT"
] |
permissive
|
lnsongxf/BottomUpMacroIndicators
|
0077d3386c37fa1c17415633bf2b237b837167a0
|
f7ef3f4f0aae14a4050121a4a54bd673ee054204
|
refs/heads/main
| 2023-01-23T04:38:15.230487
| 2020-12-04T18:20:29
| 2020-12-04T18:20:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,774
|
r
|
corp_default_forecast_insample.R
|
# File: corp_default_forecast_insample.R
# Authors: Clay Wagar, Tyler Pike
# Date:
# Note(s): Perform in-sample forecasting exercises
# Users will need access to FRB serves to access data, but may substitute public sources as desired
# clear enviroment
rm(list=ls())
# import libraries
library(lubridate)
library(dplyr)
library(tis)
library(stfm.helper, lib.loc="/stfm/shared1/R")
library(data.table)
#--------------------------------------------------------------------
# Pull other data for comparisons
#--------------------------------------------------------------------
# NFCH
nfch = read.csv('./Data/Ouput/indexes_aggregate.csv',stringsAsFactors = F) %>%
mutate(date = lubridate::ymd(date),
date = ceiling_date(date, unit = 'months')) %>%
select(date, meanDef = NFCH)
day(nfch$date) = day(nfch$date) - 1
# Chicago NFCI
quantmod::getSymbols('NFCI', src='FRED')
chicago = data.table(date = index(NFCI), coredata(NFCI))
chi = chicago[,list(nfci = mean(NFCI)),by = list(quarter(date),year(date))]
chi$quarter = paste0(chi$year,".",chi$quarter)
chi$year <- NULL
# Treasury yields
treas3month = getfame_dt("sveny0025","yields")
treas3month$year = year(treas3month$date)
treas3month$quarter = quarter(treas3month$date)
treas3month = na.omit(treas3month)
t3 = treas3month[,.SD[which.max(date),],by=list(quarter,year)]
t3$date <- NULL #since it will be different than other data sets
t3$quarter = paste0(t3$year,".",t3$quarter)
t3$year <- NULL
treas10year = getfame_dt("sveny1000","yields")
treas10year$year = year(treas10year$date)
treas10year$quarter = quarter(treas10year$date)
treas10year = na.omit(treas10year)
t10 = treas10year[,.SD[which.max(date),],by=list(quarter,year)]
t10$date <- NULL
t10$quarter = paste0(t10$year,".",t10$quarter)
t10$year <- NULL
treasury = merge(t3,t10,by="quarter")
# Federal funds rate
ff = getfame_dt("rifspff_n.b","us")
ff = ff[,list(ff = mean(rifspff_n.b,na.rm=TRUE)),by=list(quarter(date),year(date))]
ff$quarter = paste0(ff$year,".",ff$quarter)
ff$year <- NULL
# Core PCE inflation
pce = getfame_dt("ph.ce@maafe.q","us")
names(pce)[2] = "pce"
pce$quarter = paste0(year(pce$date),".",quarter(pce$date))
# EBP and GZ spread
gz = read.csv("./Input/ebp_public.csv")
gz = data.table(gz)
gz$date = as.character(gz$date)
gz$date = paste0(gz$date,"01")
gz$date = as.Date(gz$date,"%b%Y%d")
gz$quarter = quarter(gz$date)
gz$year = year(gz$date)
GZ = gz[,list(ebp = mean(ebp)),by = list(quarter,year)]
GZ$quarter = paste0(GZ$year,".",GZ$quarter)
GZ$year <- NULL
# Nonfarm payroll employment
payroll = getfame_dt("ee.q","us")
names(payroll)[2] = "payroll"
# Industrial production
ip = getfame_dt("jqi_i12ymf.m","us")
ip$year = year(ip$date)
ip$quarter = quarter(ip$date)
IP = ip[,list(ip = mean(jqi_i12ymf.m)),by = list(quarter,year)]
IP = ip[,list(ip = mean(jqi_i12ymf.m)),by = list(quarter,year)]
IP$quarter = paste0(IP$year,".",IP$quarter)
IP$year = NULL
# Real GDP
gdp = getfame_dt("gdp_xcw_09.q","us")
names(gdp)[2] = "gdp"
# Unemployment
unemp = getfame_dt("ruc.q","us")
names(unemp)[2] = "unemp"
# merge data
data = purrr::reduce(list(treasury,chi,ff,pce, GZ,IP), inner_join, by = 'quarter')
data = purrr::reduce(list(data, gdp, unemp, payroll, nfch), inner_join, by = 'date')
# filter by date
data = filter(data, date < as.Date('2020-01-01'))
# cast data as data table
data = data.table(data)
#--------------------------------------------------------------------
# Transform data and create controls
#--------------------------------------------------------------------
# term spread, defined as the 10-year minus the 3-month Treasury yield
data[,TS := sveny1000 - sveny0025]
# real federal funds rate is defined as the average effective federal funds rate less realized inflation
# is given by the log difference between core PCE in period t-1 and its value lagged a year earlier
data[,pce.lag := shift(pce,1,type="lag")]
data[,pce.lag.lag4 := shift(pce.lag,4,type="lag")]
data[,realizedInflation := log(pce.lag / pce.lag.lag4)*100]
data[,RFF := ff - realizedInflation]
# divide by standard deviation so we may interpret unit increases as a standard deviation increase
data[, meanDef := meanDef/sd(meanDef, na.rm = T)]
data[, ebp := ebp/sd(ebp, na.rm = T)]
data[, nfci := nfci/sd(nfci, na.rm = T)]
# create leads and lags, using Egon and Simon's timing and annualization
# NOTE: all dependent variables for regresion will be denoted gP where g means gradient
# Payroll
data[,payroll.lag := shift(payroll,1,type="lag")]
data[,payroll.lag2 := shift(payroll,2,type="lag")]
data[,payroll.lag3 := shift(payroll,3,type="lag")]
data[,payroll.lag4 := shift(payroll,4,type="lag")]
data[,payroll.lag5 := shift(payroll,5,type="lag")]
data[,payroll.l1 := shift(payroll,1,type="lead")]
data[,payroll.l2 := shift(payroll,2,type="lead")]
data[,payroll.l3 := shift(payroll,3,type="lead")]
data[,payroll.l4 := shift(payroll,4,type="lead")]
data[,gPayroll := 400/(1+0) * log(payroll / payroll.lag)] # h = 0
data[,gPayroll.l1 := 400/(1+1) * log(payroll.l1 / payroll.lag)] # h = 1
data[,gPayroll.l2 := 400/(1+2) * log(payroll.l2 / payroll.lag)] # h = 2
data[,gPayroll.l3 := 400/(1+3) * log(payroll.l3 / payroll.lag)] # h = 3
data[,gPayroll.l4 := 400/(1+4) * log(payroll.l4 / payroll.lag)] # h = 4
data[,gPayroll.lag1 := 400/(1+0) * log(payroll.lag / payroll.lag2)]
data[,gPayroll.lag2 := 400/(1+0) * log(payroll.lag2 / payroll.lag3)]
data[,gPayroll.lag3 := 400/(1+0) * log(payroll.lag3 / payroll.lag4)]
data[,gPayroll.lag4 := 400/(1+0) * log(payroll.lag4 / payroll.lag5)]
# Industrial production
data[,ip.lag := shift(ip,1,type="lag")]
data[,ip.lag2 := shift(ip,2,type="lag")]
data[,ip.lag3 := shift(ip,3,type="lag")]
data[,ip.lag4 := shift(ip,4,type="lag")]
data[,ip.lag5 := shift(ip,5,type="lag")]
data[,ip.l1 := shift(ip,1,type="lead")]
data[,ip.l2 := shift(ip,2,type="lead")]
data[,ip.l3 := shift(ip,3,type="lead")]
data[,ip.l4 := shift(ip,4,type="lead")]
data[,gIp := 400/(1+0) * log(ip / ip.lag)] # h = 0
data[,gIp.l1 := 400/(1+1) * log(ip.l1 / ip.lag)] # h = 1
data[,gIp.l2 := 400/(1+2) * log(ip.l2 / ip.lag)] # h = 2
data[,gIp.l3 := 400/(1+3) * log(ip.l3 / ip.lag)] # h = 3
data[,gIp.l4 := 400/(1+4) * log(ip.l4 / ip.lag)] # h = 4
data[,gIp.lag1 := 400/(1+0) * log(ip.lag / ip.lag2)]
data[,gIp.lag2 := 400/(1+0) * log(ip.lag2 / ip.lag3)]
data[,gIp.lag3 := 400/(1+0) * log(ip.lag3 / ip.lag4)]
data[,gIp.lag4 := 400/(1+0) * log(ip.lag4 / ip.lag5)]
# Real gdp
data[,gdp.lag := shift(gdp,1,type="lag")]
data[,gdp.lag2 := shift(gdp,2,type="lag")]
data[,gdp.lag3 := shift(gdp,3,type="lag")]
data[,gdp.lag4 := shift(gdp,4,type="lag")]
data[,gdp.lag5 := shift(gdp,5,type="lag")]
data[,gdp.l1 := shift(gdp,1,type="lead")]
data[,gdp.l2 := shift(gdp,2,type="lead")]
data[,gdp.l3 := shift(gdp,3,type="lead")]
data[,gdp.l4 := shift(gdp,4,type="lead")]
data[,gGdp := 400/(1+0) * log(gdp / gdp.lag)] # h = 0
data[,gGdp.l1 := 400/(1+1) * log(gdp.l1 / gdp.lag)] # h = 1
data[,gGdp.l2 := 400/(1+2) * log(gdp.l2 / gdp.lag)] # h = 2
data[,gGdp.l3 := 400/(1+3) * log(gdp.l3 / gdp.lag)] # h = 3
data[,gGdp.l4 := 400/(1+4) * log(gdp.l4 / gdp.lag)] # h = 4
data[,gGdp.lag1 := 400/(1+0) * log(gdp.lag / gdp.lag2)]
data[,gGdp.lag2 := 400/(1+0) * log(gdp.lag2 / gdp.lag3)]
data[,gGdp.lag3 := 400/(1+0) * log(gdp.lag3 / gdp.lag4)]
data[,gGdp.lag4 := 400/(1+0) * log(gdp.lag4 / gdp.lag5)]
# Unemployment
data[,unemp.lag := shift(unemp,1,type="lag")]
data[,unemp.lag2 := shift(unemp,2,type="lag")]
data[,unemp.lag3 := shift(unemp,3,type="lag")]
data[,unemp.lag4 := shift(unemp,4,type="lag")]
data[,unemp.lag5 := shift(unemp,5,type="lag")]
data[,unemp.l1 := shift(unemp,1,type="lead")]
data[,unemp.l2 := shift(unemp,2,type="lead")]
data[,unemp.l3 := shift(unemp,3,type="lead")]
data[,unemp.l4 := shift(unemp,4,type="lead")]
data[,gUnemp := 400/(1+0) * log(unemp / unemp.lag)] # h = 0
data[,gUnemp.l1 := 400/(1+1) * log(unemp.l1 / unemp.lag)] # h = 1
data[,gUnemp.l2 := 400/(1+2) * log(unemp.l2 / unemp.lag)] # h = 2
data[,gUnemp.l3 := 400/(1+3) * log(unemp.l3 / unemp.lag)] # h = 3
data[,gUnemp.l4 := 400/(1+4) * log(unemp.l4 / unemp.lag)] # h = 4
data[,gUnemp.lag1 := 400/(1+0) * log(unemp.lag / unemp.lag2)]
data[,gUnemp.lag2 := 400/(1+0) * log(unemp.lag2 / unemp.lag3)]
data[,gUnemp.lag3 := 400/(1+0) * log(unemp.lag3 / unemp.lag4)]
data[,gUnemp.lag4 := 400/(1+0) * log(unemp.lag4 / unemp.lag5)]
# ensure the same sample across tests
data = na.omit(data)
#--------------------------------------------------------------------
# Estimate regressions (in-sample forecasts)
#--------------------------------------------------------------------
# Payroll
fPay.4 <- lm(gPayroll.l4 ~ TS + RFF + gPayroll.lag1 + gPayroll.lag2 + gPayroll.lag3 + gPayroll.lag4 + nfci + ebp + meanDef ,data = data)
fPay.4.nfci <- lm(gPayroll.l4 ~ TS + RFF + gPayroll.lag1 + gPayroll.lag2 + gPayroll.lag3 + gPayroll.lag4 + nfci ,data = data)
fPay.4.nfch <- lm(gPayroll.l4 ~ TS + RFF + gPayroll.lag1 + gPayroll.lag2 + gPayroll.lag3 + gPayroll.lag4 + meanDef ,data = data)
fPay.4.ebp <- lm(gPayroll.l4 ~ TS + RFF + gPayroll.lag1 + gPayroll.lag2 + gPayroll.lag3 + gPayroll.lag4 + ebp ,data = data)
fPay.4.ebp.nfch <- lm(gPayroll.l4 ~ TS + RFF + gPayroll.lag1 + gPayroll.lag2 + gPayroll.lag3 + gPayroll.lag4 + ebp + meanDef,data = data)
fPay.4.nfci.nfch <- lm(gPayroll.l4 ~ TS + RFF + gPayroll.lag1 + gPayroll.lag2 + gPayroll.lag3 + gPayroll.lag4 + nfci + meanDef,data = data)
fPay.4.vcov <- lmtest::coeftest(fPay.4,vcov=sandwich::NeweyWest(fPay.4,prewhite=FALSE))
fPay.4.nfci.vcov <- lmtest::coeftest(fPay.4.nfci,vcov = sandwich::NeweyWest(fPay.4.nfci,prewhite = FALSE))
fPay.4.nfch.vcov <- lmtest::coeftest(fPay.4.nfch,vcov = sandwich::NeweyWest(fPay.4.nfch,prewhite = FALSE))
fPay.4.ebp.vcov <- lmtest::coeftest(fPay.4.ebp,vcov = sandwich::NeweyWest(fPay.4.ebp,prewhite = FALSE))
fPay.4.ebp.nfch.vcov <- lmtest::coeftest(fPay.4.ebp.nfch,vcov = sandwich::NeweyWest(fPay.4.ebp.nfch,prewhite = FALSE))
fPay.4.nfci.nfch.vocv <- lmtest::coeftest(fPay.4.nfci.nfch,vcov = sandwich::NeweyWest(fPay.4.nfci.nfch,prewhite = FALSE))
# Industrial Production
fIp.4 <- lm(gIp.l4 ~ TS + RFF + gIp.lag1 + gIp.lag2 + gIp.lag3 + gIp.lag4 + nfci + ebp + meanDef ,data = data)
fIp.4.nfci <- lm(gIp.l4 ~ TS + RFF + gIp.lag1 + gIp.lag2 + gIp.lag3 + gIp.lag4 + nfci ,data = data)
fIp.4.nfch <- lm(gIp.l4 ~ TS + RFF + gIp.lag1 + gIp.lag2 + gIp.lag3 + gIp.lag4 + meanDef ,data = data)
fIp.4.ebp <- lm(gIp.l4 ~ TS + RFF + gIp.lag1 + gIp.lag2 + gIp.lag3 + gIp.lag4 + ebp ,data = data)
fIp.4.ebp.nfch <- lm(gIp.l4 ~ TS + RFF + gIp.lag1 + gIp.lag2 + gIp.lag3 + gIp.lag4 + ebp + meanDef,data = data)
fIp.4.nfci.nfch <- lm(gIp.l4 ~ TS + RFF + gIp.lag1 + gIp.lag2 + gIp.lag3 + gIp.lag4 + nfci + meanDef,data = data)
fIp.4.vcov <- lmtest::coeftest(fIp.4,vcov=sandwich::NeweyWest(fIp.4,prewhite=FALSE))
fIp.4.nfci.vcov <- lmtest::coeftest(fIp.4.nfci,vcov = sandwich::NeweyWest(fIp.4.nfci,prewhite = FALSE))
fIp.4.nfch.vcov <- lmtest::coeftest(fIp.4.nfch,vcov = sandwich::NeweyWest(fIp.4.nfch,prewhite = FALSE))
fIp.4.ebp.vcov <- lmtest::coeftest(fIp.4.ebp,vcov = sandwich::NeweyWest(fIp.4.ebp,prewhite = FALSE))
fIp.4.ebp.nfch.vcov <- lmtest::coeftest(fIp.4.ebp.nfch,vcov = sandwich::NeweyWest(fIp.4.ebp.nfch,prewhite = FALSE))
fIp.4.nfci.nfch.vocv <- lmtest::coeftest(fIp.4.nfci.nfch,vcov = sandwich::NeweyWest(fIp.4.nfci.nfch,prewhite = FALSE))
# Real GDP
fGdp.4 <- lm(gGdp.l4 ~ TS + RFF + gGdp.lag1 + gGdp.lag2 + gGdp.lag3 + gGdp.lag4 + nfci + ebp + meanDef ,data = data)
fGdp.4.nfci <- lm(gGdp.l4 ~ TS + RFF + gGdp.lag1 + gGdp.lag2 + gGdp.lag3 + gGdp.lag4 + nfci ,data = data)
fGdp.4.nfch <- lm(gGdp.l4 ~ TS + RFF + gGdp.lag1 + gGdp.lag2 + gGdp.lag3 + gGdp.lag4 + meanDef ,data = data)
fGdp.4.ebp <- lm(gGdp.l4 ~ TS + RFF + gGdp.lag1 + gGdp.lag2 + gGdp.lag3 + gGdp.lag4 + ebp ,data = data)
fGdp.4.ebp.nfch <- lm(gGdp.l4 ~ TS + RFF + gGdp.lag1 + gGdp.lag2 + gGdp.lag3 + gGdp.lag4 + ebp + meanDef,data = data)
fGdp.4.nfci.nfch <- lm(gGdp.l4 ~ TS + RFF + gGdp.lag1 + gGdp.lag2 + gGdp.lag3 + gGdp.lag4 + nfci + meanDef,data = data)
fGdp.4.vcov <- lmtest::coeftest(fGdp.4,vcov=sandwich::NeweyWest(fGdp.4,prewhite=FALSE))
fGdp.4.nfci.vcov <- lmtest::coeftest(fGdp.4.nfci,vcov = sandwich::NeweyWest(fGdp.4.nfci,prewhite = FALSE))
fGdp.4.nfch.vcov <- lmtest::coeftest(fGdp.4.nfch,vcov = sandwich::NeweyWest(fGdp.4.nfch,prewhite = FALSE))
fGdp.4.ebp.vcov <- lmtest::coeftest(fGdp.4.ebp,vcov = sandwich::NeweyWest(fGdp.4.ebp,prewhite = FALSE))
fGdp.4.ebp.nfch.vcov <- lmtest::coeftest(fGdp.4.ebp.nfch,vcov = sandwich::NeweyWest(fGdp.4.ebp.nfch,prewhite = FALSE))
fGdp.4.nfci.nfch.vocv <- lmtest::coeftest(fGdp.4.nfci.nfch,vcov = sandwich::NeweyWest(fGdp.4.nfci.nfch,prewhite = FALSE))
# Unemployment
fUnemp.4 <- lm(gUnemp.l4 ~ TS + RFF + gUnemp.lag1 + gUnemp.lag2 + gUnemp.lag3 + gUnemp.lag4 + nfci + ebp + meanDef ,data = data)
fUnemp.4.nfci <- lm(gUnemp.l4 ~ TS + RFF + gUnemp.lag1 + gUnemp.lag2 + gUnemp.lag3 + gUnemp.lag4 + nfci ,data = data)
fUnemp.4.nfch <- lm(gUnemp.l4 ~ TS + RFF + gUnemp.lag1 + gUnemp.lag2 + gUnemp.lag3 + gUnemp.lag4 + meanDef ,data = data)
fUnemp.4.ebp <- lm(gUnemp.l4 ~ TS + RFF + gUnemp.lag1 + gUnemp.lag2 + gUnemp.lag3 + gUnemp.lag4 + ebp ,data = data)
fUnemp.4.ebp.nfch <- lm(gUnemp.l4 ~ TS + RFF + gUnemp.lag1 + gUnemp.lag2 + gUnemp.lag3 + gUnemp.lag4 + ebp + meanDef,data = data)
fUnemp.4.nfci.nfch <- lm(gUnemp.l4 ~ TS + RFF + gUnemp.lag1 + gUnemp.lag2 + gUnemp.lag3 + gUnemp.lag4 + nfci + meanDef,data = data)
fUnemp.4.vcov <- lmtest::coeftest(fUnemp.4,vcov=sandwich::NeweyWest(fUnemp.4,prewhite=FALSE))
fUnemp.4.nfci.vcov <- lmtest::coeftest(fUnemp.4.nfci,vcov = sandwich::NeweyWest(fUnemp.4.nfci,prewhite = FALSE))
fUnemp.4.nfch.vcov <- lmtest::coeftest(fUnemp.4.nfch,vcov = sandwich::NeweyWest(fUnemp.4.nfch,prewhite = FALSE))
fUnemp.4.ebp.vcov <- lmtest::coeftest(fUnemp.4.ebp,vcov = sandwich::NeweyWest(fUnemp.4.ebp,prewhite = FALSE))
fUnemp.4.ebp.nfch.vcov <- lmtest::coeftest(fUnemp.4.ebp.nfch,vcov = sandwich::NeweyWest(fUnemp.4.ebp.nfch,prewhite = FALSE))
fUnemp.4.nfci.nfch.vocv <- lmtest::coeftest(fUnemp.4.nfci.nfch,vcov = sandwich::NeweyWest(fUnemp.4.nfci.nfch,prewhite = FALSE))
#--------------------------------------------------------------------
# Create latex output
#--------------------------------------------------------------------
# Payroll Employment
stargazer::stargazer(
fPay.4.ebp,
fPay.4.nfci,
fPay.4.nfch,
fPay.4.ebp.nfch,
fPay.4.nfci.nfch,
fPay.4,
title = "Forecast Results: Payroll Employment",
align=TRUE,
#add.lines = list(c("Fixed effects?", "No", "No")),
se = list(fPay.4.ebp.vcov[,2],fPay.4.nfci.vcov[,2],fPay.4.nfch.vcov[,2],fPay.4.ebp.nfch.vcov[,2],fPay.4.nfci.nfch.vocv[,2],fPay.4.vcov[,2]),
omit = c("lag","Constant","TS","RFF"),
omit.stat = c("rsq","f","ser","n"),
model.numbers = FALSE,
dep.var.caption = "Payroll Employment",
dep.var.labels = "",
covariate.labels = c("EBP","NFCI",'NFCD'),
notes.align = "l",
omit.table.layout = "n",
star.cutoffs = c(0.1,0.05,0.01),
p = list(fPay.4.ebp.vcov[,4],fPay.4.nfci.vcov[,4],fPay.4.nfch.vcov[,4],fPay.4.ebp.nfch.vcov[,4],fPay.4.nfci.nfch.vocv[,4],fPay.4.vcov[,4]),
style = "aer")
# Industrial Production
stargazer::stargazer(
fIp.4.ebp,
fIp.4.nfci,
fIp.4.nfch,
fIp.4.ebp.nfch,
fIp.4.nfci.nfch,
fIp.4,
title = "Forecast Results: Industrial Production",
align=TRUE,
#add.lines = list(c("Fixed effects?", "No", "No")),
se = list(fIp.4.ebp.vcov[,2],fIp.4.nfci.vcov[,2],fIp.4.nfch.vcov[,2],fIp.4.ebp.nfch.vcov[,2],fIp.4.nfci.nfch.vocv[,2],fIp.4.vcov[,2]),
omit = c("lag","Constant","TS","RFF"),
omit.stat = c("rsq","f","ser","n"),
model.numbers = FALSE,
dep.var.caption = "Payroll Employment",
dep.var.labels = "",
covariate.labels = c("EBP","NFCI",'NFCD'),
notes.align = "l",
omit.table.layout = "n",
star.cutoffs = c(0.1,0.05,0.01),
p = list(fIp.4.ebp.vcov[,4],fIp.4.nfci.vcov[,4],fIp.4.nfch.vcov[,4],fIp.4.ebp.nfch.vcov[,4],fIp.4.nfci.nfch.vocv[,4],fIp.4.vcov[,4]),
style = "aer")
# Real GDP
stargazer::stargazer(
fGdp.4.ebp,
fGdp.4.nfci,
fGdp.4.nfch,
fGdp.4.ebp.nfch,
fGdp.4.nfci.nfch,
fGdp.4,
title = "Forecast Results: Industrial Production",
align=TRUE,
#add.lines = list(c("Fixed effects?", "No", "No")),
se = list(fGdp.4.ebp.vcov[,2],fGdp.4.nfci.vcov[,2],fGdp.4.nfch.vcov[,2],fGdp.4.ebp.nfch.vcov[,2],fGdp.4.nfci.nfch.vocv[,2],fGdp.4.vcov[,2]),
omit = c("lag","Constant","TS","RFF"),
omit.stat = c("rsq","f","ser","n"),
model.numbers = FALSE,
dep.var.caption = "Payroll Employment",
dep.var.labels = "",
covariate.labels = c("EBP","NFCI",'NFCD'),
notes.align = "l",
omit.table.layout = "n",
star.cutoffs = c(0.1,0.05,0.01),
p = list(fGdp.4.ebp.vcov[,4],fGdp.4.nfci.vcov[,4],fGdp.4.nfch.vcov[,4],fGdp.4.ebp.nfch.vcov[,4],fGdp.4.nfci.nfch.vocv[,4],fGdp.4.vcov[,4]),
style = "aer")
# Unemployment
stargazer::stargazer(
fUnemp.4.ebp,
fUnemp.4.nfci,
fUnemp.4.nfch,
fUnemp.4.ebp.nfch,
fUnemp.4.nfci.nfch,
fUnemp.4,
title = "Forecast Results: Industrial Production",
align=TRUE,
#add.lines = list(c("Fixed effects?", "No", "No")),
se = list(fUnemp.4.ebp.vcov[,2],fUnemp.4.nfci.vcov[,2],fUnemp.4.nfch.vcov[,2],fUnemp.4.ebp.nfch.vcov[,2],fUnemp.4.nfci.nfch.vocv[,2],fUnemp.4.vcov[,2]),
omit = c("lag","Constant","TS","RFF"),
omit.stat = c("rsq","f","ser","n"),
model.numbers = FALSE,
dep.var.caption = "Payroll Employment",
dep.var.labels = "",
covariate.labels = c("EBP","NFCI",'NFCD'),
notes.align = "l",
omit.table.layout = "n",
star.cutoffs = c(0.1,0.05,0.01),
p = list(fUnemp.4.ebp.vcov[,4],fUnemp.4.nfci.vcov[,4],fUnemp.4.nfch.vcov[,4],fUnemp.4.ebp.nfch.vcov[,4],fUnemp.4.nfci.nfch.vocv[,4],fUnemp.4.vcov[,4]),
style = "aer")
|
355ce22654b5654809979e10c4db01769e6e87f7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PortRisk/examples/portvol.Bayes.Rd.R
|
bbef8b3e4150470e5b97fa63c5105963034caa5a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,255
|
r
|
portvol.Bayes.Rd.R
|
library(PortRisk)
### Name: portvol.Bayes, mctr.Bayes, cctr.Bayes
### Title: Portfolio Volatility and Contribution to Total Volatility Risk
### (MCTR & CCTR): Bayesian Approach
### Aliases: portvol.Bayes mctr.Bayes cctr.Bayes
### ** Examples
data(SnP500Returns)
# consider the portfolio containing the first 4 stocks
pf <- colnames(SnP500Returns)[1:4]
st <- "2013-01-01" # start date
en <- "2013-01-31" # end date
# suppose the amount of investments in the above stocks are
# $1,000, $2,000, $3,000 & $1,000 respectively
wt <- c(1000,2000,3000,1000) # weights
# portfolio volatility for the portfolio 'pf' with equal (default) weights
pv1 <- portvol(pf, start = st, end = en,
data = SnP500Returns)
# portfolio volatility for the portfolio 'pf' with weights as 'wt'
pv2 <- portvol(pf, weights = wt, start = st, end = en,
data = SnP500Returns)
# similarly,
# mctr for the portfolio 'pf' with weights as 'wt'
mc <- mctr(pf, weights = wt, start = st, end = en,
data = SnP500Returns)
# cctr for the portfolio 'pf' with weights as 'wt'
cc <- cctr(pf, weights = wt, start = st, end = en,
data = SnP500Returns)
sum(cc) == pv2
# note that, sum of the cctr values is the portfolio volatility
|
0f33319d2e3b4a80507d7ed00648f4d347461a17
|
07b9c2784bf81e3706e257d7988eeb4f369d3cb3
|
/lessons/06_week_examDetailsAndReview/sandbox/practical1_inclass.R
|
58a3452a6724a5b1fde39ffc0df26aa7d3e88c0e
|
[] |
no_license
|
Allegheny-Computer-Science-301-F2020/classDocs
|
fa4f838ae510b6bee3a705f94b3fb601bac04478
|
27dff60aee59967217326d9803142e28aad5823d
|
refs/heads/main
| 2023-01-23T00:36:10.054814
| 2020-12-01T04:48:05
| 2020-12-01T04:48:05
| 292,433,841
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,259
|
r
|
practical1_inclass.R
|
# practical_01
# name: # TODO
rm(list = ls()) # remove all variables in memory
#install.packages("tidyverse")
library(tidyverse)
#install.packages("nycflights13")
library(nycflights13)
#install.packages("plotly")
library(plotly)
###################################
# View the dataset, flights
View(flights)
# 1. Show code to view the column header names
names(flights)
# 2. Show code to obtain the first row of the flights data set
flights[1,]
# 3. Show code to obtain the first column ("year") of the flights data set
flights[,c(1:4)]
c(1:4) # gives 1, 2, 3, 4
# 4. Show code to obtain a new variable called, "myData" from the first, second and third rows of the columns, "dep_time", "sched_dep_time" and "dep_delay". Hint: Use numbered sequences. The columns are numbered; 4 through 6 and the rows are numberd 1 through 3. Your code will create a 3 x 3 matrix.
# TODO
flights[c(1:3),c(4:6)]
flights[c(1,2,3),c(4,5,6)]
myRows <- c(3,4,5)
myCols <- c(7,8,9)
flights[myRows, myCols]
# 5. Show code to use filter() to select the rows of data for which the departure delay is greater than 800.
filter(flights, dep_delay > 800)
# 6. Show code to use select() to obtain cols of departure and arrival data (i.e., sched_dep_time, dep_delay, sched_arr_time and arr_time). Hint: to combine these columns, you will beed to create a vector using the vector making function, c().
# TODO
select(flights, c(sched_dep_time, dep_delay, sched_arr_time, arr_time))
names(flights)
###################################
# Plotting
# 7. Show code to use ggplot() and geom_line() to prepare plots of x = Sched_dep_time vs. y = Sched_arr_time, in red. Devise an explanation for his plot's pattern
?ggplot()
ggplot(flights) + geom_line(mapping = aes(x = sched_dep_time, y = sched_arr_time), color = "red") + ylab("sched_dep_time vs sched_arr_time")
# 8. Show code to use ggplot() and geom_line() to prepare plots of of x = dep_time vs. y = arr_time, in blue. Devise an explanation for his plot's pattern
ggplot(flights) + geom_line(mapping = aes(x = dep_time, y = arr_time), color = "blue") + ylab("dep_time vs arr_time")
# 9. Show code to combine both of these ggplot() and geom_line() plots (the red and blue, from above). Explain what you see.
ggplot(flights) +
geom_line(mapping = aes(x = sched_dep_time, y = sched_arr_time), color = "red") +
geom_line(mapping = aes(x = dep_time, y = arr_time), color = "blue") + ylab("RED: sched_dep_time vs sched_arr_time AND BLUE: dep_time vs arr_time")
# 10. Show code to use ggplot() and geom_point() to prepare plots of of x = dep_time vs. y = arr_time, in red Devise an explanation for his plot's pattern
ggplot(flights) +
geom_point(mapping = aes(x = dep_time, y = arr_time), color = "red") +
ylab("dep_tim vs arr_time")
# 11. Show code to use ggplot() and geom_point() to prepare plots of of x = sched_dep_time vs. y = sched_arr_time, in blue. Devise an explanation for his plot's pattern
ggplot(flights) +
geom_point(mapping = aes(x = sched_dep_time, y = sched_arr_time), color = "blue") +
ylab("sched_dep_tim vs sched_arr_time")
# 12. Show code to combine both of these ggplot() and geom_point() plots (the red and blue, from above). Explain what you see.
ggplot(flights) +
geom_point(mapping = aes(x = dep_time, y = arr_time), color = "red") +
geom_point(mapping = aes(x = sched_dep_time, y = sched_arr_time), color = "blue") +
ylab("red: dep_tim vs arr_time, blue: sched_dep_tim vs sched_arr_time")
# 13. Create a dataset for which the sched_dep_time is less than 1000 and then use the plotly library to plot x = dep_tim vs. y = sched_dep_time
dat <- filter(flights, flights$sched_dep_time<1000)
# scatter: Sched_dep_tim vs Sched_dep_time using plotly
p <- ggplot(dat, aes(x = dep_time, y = sched_dep_time)) +
geom_point(color = "blue" ) +
ylab("dep_time vs sched_dep_time")
# explain this pattern
p <- ggplotly(p)
p
###################################
###################################
# plotly example code
###################################
# ref: https://plot.ly/ggplot2/stat_smooth/
#install.packages("plotly")
p <- ggplot(mpg, aes(displ, hwy, color = cty))
p <- p + geom_point() + stat_smooth()
p <- ggplotly(p)
p
|
ad3215a5f332e652438a2c4014fcfe95092a194e
|
82cd98324bf3a372d0efaf11d1595a52974dae39
|
/The reproducibility and comabatibility of RFs.R
|
f711eb71577fef5b1bceb98c6fb1c4f1a647982e
|
[] |
no_license
|
AbdallaIbrahim/The-reproducibility-and-ComBatability-of-Radiomic-features
|
0748d2d29922147bf4e878cd97667a817a46c5a4
|
6453dda2e67347a1efb46d482c52672a9476a3cd
|
refs/heads/main
| 2023-07-19T07:46:38.448772
| 2021-09-11T13:28:39
| 2021-09-11T13:28:39
| 334,902,956
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,571
|
r
|
The reproducibility and comabatibility of RFs.R
|
library(caret)
library(epiR)
library(sva)
#LOAD THE DATA FILE
Data<- read.csv("Pyradiomics_CCR1_OriginalFts.csv",header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "", stringsAsFactors = F)
#DEFINE THE BATCH BASED ON WHICH SCANS WILL BE ASSESSED AND HARMONIZED
A<- unique(Data$Batch)
B<- unique(Data$Batch)
#CREATE THE MATRICES TO RECORD THE NUMBER OF FEATURES IN EACH PAIRWISE SCENARIO
Mat_Rep<- as.data.frame(matrix(0, ncol=length(A), nrow= length(B)), row.names = as.character(B))
colnames(Mat_Rep)<- c(A)
Mat_ComBat<-as.data.frame(matrix(0, ncol=length(A), nrow= length(B)), row.names = as.character(B))
colnames(Mat_ComBat)<- c(A)
#RUN THE CCC AND COMBAT ON EACH PAIR
for (i in 1:length(A)){
for (j in 1:length(B)){
if (A[i]<B[j]){
print(paste(paste("Iteration no ", i, sep=""), paste("Row no ", j, sep="")))
Batches<- c(A[i], B[j])
Features_All<- colnames(Data[,6:(dim(Data)[2])])
OCC_All<- as.data.frame(matrix(0, ncol=2, nrow= length(Features_All)))
OCC_All$V1<- as.character(Features_All)
for (l in 1:length(Features_All)){
Data2<- as.data.frame(matrix(0, ncol=(length(Batches)+1), nrow=160))
Data2$V1<- as.character(Features_All[l])
for (m in 2:dim(Data2)[2]){
k<- m-1
Data2[,m]<- Data[Data$Batch==Batches[k],Features_All[l]]
}
CCC<- epi.occc(Data2[,2:(length(Batches)+1)], pairs = T)
CCC_All[l,2]<- CCC$pairs
}
Rep_All_CCC<- c()
for (n in 1:length(Features_All)){
if(!(is.na(CCC_All[n,2]))){
if (CCC_All[n,2]=='NaN'){
Rep_All_CCC<- append(Rep_All_CCC,CCC_All[n,1])
}
else if (CCC_All[n,2]>0.90){
Rep_All_CCC<- append(Rep_All_CCC,CCC_All[n,1])
}
}
}
Mat_Rep[j,i]<- length(Rep_All_CCC)
#START THE COMBAT PROCESS
Harmonize<-Data[Data$Batch %in% Batches, ]
#REMOVE VARIABLES WITH (Near)ZERO VARIANCE
badCols <- nearZeroVar(Harmonize)
to_remove<- c()
if(length(badCols)>0){
for (z in 1:length(badCols)){
if (badCols[z]>5){
to_remove<-append(to_remove, badCols[z])
}
}
}
if (length(to_remove > 0)){
Harmonize <- Harmonize[, -to_remove]
}
#REMOVE FEATURES WITH NA VALUES
Harmonize<- Harmonize[, colMeans(is.na(Harmonize)) == 0]
tryCatch({
Harmonized<- as.data.frame(t(ComBat(as.matrix(t(Harmonize[,6:(dim(Harmonize)[2])])), batch = Harmonize$Batch, mod=NULL, par.prior = T)))
}, error=function(e){})
if (!(exists("Harmonized"))){
Mat_ComBat[j,i]<- as.character('Error')
}
else if (exists('Harmonized') & (dim(Harmonized[,colMeans(is.na(Harmonized)) == 0])[2])>0){
Features_All<- colnames(Harmonized)
Harmonized<- cbind(Harmonize[,1:6], Harmonized)
CCC_All_ComBatted<- as.data.frame(matrix(0, ncol=2, nrow= length(Features_All)))
CCC_All_ComBatted$V1<- as.character(Features_All)
for (h in 1:length(Features_All)){
Data2<- as.data.frame(matrix(0, ncol=(length(Batches)+1), nrow=160))
Data2$V1<- as.character(Features_All[h])
for (o in 2:dim(Data2)[2]){
v<- o-1
Data2[,o]<- Harmonized[Harmonized$Batch==Batches[v],Features_All[h]]
}
CCC<- epi.occc(Data2[,2:(length(Batches)+1)], pairs= T)
CCC_All_ComBatted[h,2]<- CCC$pairs
}
ComBatable_All_CCC<- c()
for (q in 1:length(Features_All)){
if (CCC_All_ComBatted[q,2]=='NaN'){
ComBatable_All_CCC<- append(ComBatable_All_CCC,CCC_All_ComBatted[q,1])
}
else if (CCC_All_ComBatted[q,2]>0.9){
ComBatable_All_CCC<- append(ComBatable_All_CCC,CCC_All_ComBatted[q,1])
}
}
Mat_ComBat[j,i]<- paste(as.character(length(ComBatable_All_CCC)), as.character(length(Features_All)), sep = "/")
rm("Harmonized")
}
}
}
}
#SAVE THE MATRICES TO CSV FILES
write.csv(Mat_Rep, "reproducible_CCR1_pairs_0.9.csv")
write.csv(Mat_ComBat, "ComBatable_CCR1_pairs_0.9.csv")
|
582e5fe010e412c44b39da0fb548cb1935ed0f50
|
5079a09c258edac7c1e73e3383f17073dbe7c877
|
/scripts/process.ecosis_californiatraits.R
|
c266f17d38538301d8bd1a777c78f348ff1103ac
|
[] |
no_license
|
ashiklom/rspecan
|
acc6f62091da259864248f552bb6d70c7ec3b4e7
|
2376e273103d88a7a63b83cfefe6952e4c7783c3
|
refs/heads/master
| 2021-09-20T08:28:42.694220
| 2017-11-19T22:04:36
| 2017-11-19T22:04:36
| 111,217,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,122
|
r
|
process.ecosis_californiatraits.R
|
library(rspecan)
sethere()
data_name <- "ecosis_californiatraits"
data_longname <- "Fresh Leaf Spectra to Estimate Leaf Traits for California Ecosystems"
ecosis_file <- "raw_data/fresh-leaf-spectra-to-estimate-leaf-traits-for-california-ecosystems.csv"
dat_full <- read_csv(ecosis_file)
wave_rxp <- "^[[:digit:]]+$"
dat_sub <- dat_full %>%
select(-matches(wave_rxp))
dat <- dat_sub %>%
transmute(
# metadata
data_name = !!data_name,
sample_id = `sample name`,
spectra_id = spectra,
spectra_type = recode(measurement, `REFL` = "reflectance"),
replicate = `Replicate`,
collection_date = 41365 + lubridate::as_date("1900-01-01"),
latitude = Latitude,
longitude = Longitude,
instrument = `Instrument Model`,
genus = `Latin Genus`,
species = `Latin Species`,
USDA_code = `species`,
# traits
cellulose = Cellulose,
cellulose_unit = "%",
LMA = `Leaf mass per area`,
LMA_unit = "g m-2",
Nmass = `Leaf nitrogen content per leaf dry mass`,
Nmass_unit = "%",
LWC_pct = `Leaf relative water content`,
lignin = `Lignin`,
lignin_unit = "%",
target_type = `Target Type`,
leaf_age = `age`
)
spectra <- dat2specmat(dat_full, "spectra", wave_rxp)
str(spectra)
wl <- getwl(spectra)
if (FALSE) {
matplot(wl, spectra, type = "l")
}
wl_prospect <- wl >= 400 & wl <= 2500
wl_bad <- FALSE
wl_keep <- wl_prospect & !wl_bad
data_wl_inds <- which(wl_keep)
wl_kept <- wl[wl_keep]
prospect_wl_inds <- which(prospect_wl %in% wl_kept)
sp_good <- spectra[data_wl_inds, ]
if (FALSE) {
matplot(wl_kept, sp_good, type = "l")
}
store_path <- file.path(processed_dir, paste0(data_name, ".rds"))
datalist <- list(
data_name = data_name,
data_longname = data_longname,
data_filename = ecosis_file,
self_filename = store_path,
metadata = dat,
spectra = spectra,
data_wl_inds = data_wl_inds,
prospect_wl_inds = prospect_wl_inds
)
check_datalist(datalist)
submit_df <- dat %>%
filter(spectra_type == "reflectance") %>%
select(data_name, spectra_id)
saveRDS(datalist, store_path)
write_submit_file(submit_df, data_name)
|
1310d07b1e4923d5e2f9c3645feaf4d73b96eedb
|
9fc8774d1c5a7d7f823de9f520822cfb9acd2e62
|
/Parallel_Computing/R/Large_Data_Processing_R/parallel_computation/R/plot_scaling_data.R
|
306f48731c3748b21edb49bedff7b94c863a12b5
|
[] |
no_license
|
fasrc/User_Codes
|
8827335131925da4c47b1f00943895a6d20c84b5
|
3f0f2cae6a194f719a764293204e6cd3ccaf2f58
|
refs/heads/master
| 2023-08-31T07:10:34.249138
| 2023-08-15T17:40:13
| 2023-08-15T17:40:13
| 44,996,115
| 65
| 20
| null | 2022-06-01T14:41:09
| 2015-10-26T20:43:56
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 324
|
r
|
plot_scaling_data.R
|
data <- read.csv("mpi_data.csv")
ggplot(data, aes(log2(ncore), log2(wc))) +
geom_point() +
geom_smooth(method = "lm") +
coord_fixed(ratio = 1, xlim = NULL, ylim = NULL,
expand = TRUE, clip = "on")
lm(data = data, formula = log2(wc) ~ log2(ncore))
|
38421afc1d951fb018ad59c687e1e79fa09b5d4e
|
6bd2964d55729982d61c10bde2dbfa2054325226
|
/man/gammaqInv.Rd
|
3c88546b8c7d3b2a964aba5fc8139b69ef666322
|
[] |
no_license
|
cran/RxODE
|
b18e443eba330ec58501739afb9925ed37a1d636
|
e3df2ae1504238cfd6cde29522e5a47352df1264
|
refs/heads/master
| 2022-05-18T00:18:01.276190
| 2022-03-23T00:10:12
| 2022-03-23T00:10:12
| 48,087,995
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,016
|
rd
|
gammaqInv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{gammaqInv}
\alias{gammaqInv}
\alias{gammaqInva}
\title{gammaqInv and gammaqInva: Inverses of normalized gammaq function}
\usage{
gammaqInv(a, q)
gammaqInva(x, q)
}
\arguments{
\item{a}{The numeric 'a' parameter in the upper
incomplete gamma}
\item{q}{The numeric 'q' parameter in the upper
incomplete gamma}
\item{x}{The numeric 'x' parameter in the upper incomplete gamma}
}
\value{
inverse gammaq results
}
\description{
gammaqInv and gammaqInva: Inverses of normalized gammaq function
}
\details{
With the equation:
q = gammaq(a, x)
The 'gammaqInv' function returns a value 'x' that satisfies the
equation above
The 'gammaqInva' function returns a value 'a' that satisfies the
equation above
NOTE: gammaqInva is slow
}
\examples{
gammaqInv(1:3, 0.5)
gammaqInv(1, 1:3 / 3)
gammaqInv(1:3, 1:3 / 3.1)
gammaqInva(1:3, 1:3 / 3.1)
}
\author{
Matthew L. Fidler
}
|
1e3298b7130ae18fcff2e7f0f6f8dde39258e3cb
|
1a92a72c70efb815245ed345aca0b109b40d5741
|
/Scripts/GetKMStuff.R
|
2a4e2993539bb9e3960107e241232b52d74e57af
|
[] |
no_license
|
shenmskcc/LungIMPACT
|
0569a564fbf176c66a77d11f6a2b716e512d405f
|
b5ee5ff20d96168a37bf57a84cc0470b69b5d697
|
refs/heads/master
| 2021-03-30T16:31:45.420593
| 2018-05-16T16:10:37
| 2018-05-16T16:10:37
| 103,300,529
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 677
|
r
|
GetKMStuff.R
|
KMStuff <-function(data,average.risk,topHits,numGroups,cuts,geneList) {
LT = T
cuts <- as.numeric(cuts)
numGroups <- as.numeric(numGroups)
OUT <- MakeKM(data,average.risk,topHits,LT,numGroups,cuts,geneList)
#KM <- OUT$KM_Plot
#Mut <- OUT$mut_Plot
#survSum <- OUT$SurvSum
#MajorCasesKM <- OUT$MajorCasesKM
PieChart <- OUT$PieChart
GenesUsed <- OUT$GenesUsed
#coMutPlot <- OUT$coMutation
return(list(#"KMPlot" = KM,"MutPlot" =Mut,"SurvSumTable"=survSum,
"PieChart" = PieChart,"GenesUsed"=GenesUsed))#,"MajorCasesKM" =MajorCasesKM,"coMutPlot" = coMutPlot))
}
#test2 <- KMStuff(test$data.out,test$average.risk,test$topHits,2,0.5)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.