blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
90dfb52d559076b6d148e4da721161d8c2188f1f
|
4cb5426e8432d4af8f6997c420520ffb29cefd3e
|
/P6.R
|
047efecbcdd36924cd64e63fa1eee9bf0829aea5
|
[
"CC0-1.0"
] |
permissive
|
boyland-pf/MorpheusData
|
8e00e43573fc6a05ef37f4bfe82eee03bef8bc6f
|
10dfe4cd91ace1b26e93235bf9644b931233c497
|
refs/heads/master
| 2021-10-23T03:47:35.315995
| 2019-03-14T21:30:03
| 2019-03-14T21:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,274
|
r
|
P6.R
|
# making table data sets
library(dplyr)
library(tidyr)
library(MorpheusData)
#############benchmark 1
dat <- read.table(text=
"GeneID D.1 T.1 D.8 T.8
A2M 8876.5 510.5 4318.3 8957.7 4092.4
ABL1 2120.8 480.3 1694.6 2471 1784.1
ACP1 1266.6 213.8 1337.9 831.5 814.1
", header=T)
write.csv(dat, "data-raw/p6_input1.csv", row.names=FALSE)
df_out = dat %>%
gather(pt.num.type, value, 2:4) %>%
separate(pt.num.type, c("type", "pt.num")) %>%
group_by(GeneID, type) %>%
summarise(sum = sum(value))
write.csv(df_out, "data-raw/p6_output1.csv", row.names=FALSE)
p6_output1 <- read.csv("data-raw/p6_output1.csv", check.names = FALSE)
fctr.cols <- sapply(p6_output1, is.factor)
int.cols <- sapply(p6_output1, is.integer)
p6_output1[, fctr.cols] <- sapply(p6_output1[, fctr.cols], as.character)
p6_output1[, int.cols] <- sapply(p6_output1[, int.cols], as.numeric)
save(p6_output1, file = "data/p6_output1.rdata")
p6_input1 <- read.csv("data-raw/p6_input1.csv", check.names = FALSE)
fctr.cols <- sapply(p6_input1, is.factor)
int.cols <- sapply(p6_input1, is.integer)
p6_input1[, fctr.cols] <- sapply(p6_input1[, fctr.cols], as.character)
p6_input1[, int.cols] <- sapply(p6_input1[, int.cols], as.numeric)
save(p6_input1, file = "data/p6_input1.rdata")
|
28deabd1d0cb7b2fbf60669961eda591d9f8080e
|
04f6c2eb3c2dca28f79094b05297aa1a182d4695
|
/01-data_cleaning-post-strat1.R
|
a2d473bc02c4d79ccd2f751c87ae2f0f30c4ede6
|
[
"MIT"
] |
permissive
|
jingwennnn/Prediction-of-2020-United-States-Presidential-Election-Result
|
a7deb28efc7a5e0e486bf28df62394281f8250d8
|
e67164a478b9332b5cb232cc46e4919520bffe3f
|
refs/heads/main
| 2023-01-05T02:41:46.372103
| 2020-11-03T04:08:02
| 2020-11-03T04:08:02
| 309,485,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,186
|
r
|
01-data_cleaning-post-strat1.R
|
#### Preamble ####
# Purpose: Prepare and clean the survey data downloaded from https://usa.ipums.org/usa/index.shtml
# Author: Yuchen Cong, Jingwen Deng, Ruoxi Guan, Yuwei Sun
# Date: 2 November 2020
# Contact: jingwen.deng@mail.utoronto.ca
# License: MIT
#### Workspace setup ####
library(haven)
library(tidyverse)
# Read in the raw data.
raw_census <- read_dta("/Users/macbookair/Desktop/STA304/PS3/usa_00001.dta")
colnames(raw_census)
# Add the labels
raw_census <- labelled::to_factor(raw_census)
# Just keep some variables that may be of interest (change
# this depending on your interests)
reduced_census <-
raw_census %>%
select(perwt,
citizen,
age,
sex,
race,
educd,
statefip,
labforce,
)
reduced_data <-
reduced_data %>%
count(age) %>%
group_by(age)
reduced_data <-
reduced_data %>%
filter(age != "less than 1 year old") %>%
filter(age != "90 (90+ in 1980 and 1990)")
reduced_data$age <- as.integer(reduced_data$age)
# Saving the census data as a csv file in my
# working directory
write_csv(reduced_census, "/Users/macbookair/Desktop/STA304/PS3/census_data.csv")
|
feac75c5a62db13de8a9580992dd4a1fcb5a27e3
|
0b8c23f9e629e3063eeaccf12d74f0c1423ebb1a
|
/man/GradientDescent.Rd
|
98ceff4afd6b4a7663fcfe83b1235914f4bd5d3c
|
[] |
no_license
|
sh0829kk/R-package-imbedding-Newton-Nestrov-Gradient
|
53830dde2f036ba512f6bb940dc5a5062f53e2a5
|
15b690df77832c7d1d9ccd4aa8d19e3cd8f2f73a
|
refs/heads/master
| 2023-08-10T16:48:05.995506
| 2021-01-25T18:14:37
| 2021-01-25T18:14:37
| 331,316,612
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,156
|
rd
|
GradientDescent.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GradientDescent.R
\name{GradientDescent}
\alias{GradientDescent}
\title{GradientDescent}
\usage{
GradientDescent(
beta,
X,
y,
maxLength = 10000,
learningRate = 0.001,
sigma = 0.001,
regulation = 0,
lamda = 0.5
)
}
\arguments{
\item{beta}{beta is a matrix}
\item{X}{X is a dataframe, it is the attribute of the data set}
\item{y}{y is a dataframe, it is the label of the data set}
\item{maxLength}{maxLength is a default param, which means the maximum length of the iteration.}
\item{learningRate}{learningRate is a number}
\item{sigma}{sigma is the threshold set to judge the stop condition.}
\item{regulation}{regulation is whether to add regularization to the model.}
\item{lamda}{lamda is the coefficient of the regulation term.}
}
\value{
a number
}
\description{
It is used to perform the GradientDescent model.
}
\details{
This is some description of this function
You can choose whether to add regularization to the model, by setting default regulation 0 to 2,
which means you adopt, GradientDescent model with L2 norm regulation term.
}
|
3d37c7ef5b5c1b5c0833f4869ee721b529545218
|
fc54f6fcc80d7f63b3cde85346408814e19a1f3a
|
/R/solver.mst.R
|
315148d95416b46ea4e5a7661121678e3df33777
|
[] |
no_license
|
MartinWolke/salesperson
|
888422ffb435171b96cda19da003e41861dfd46b
|
7ec9e0075dc3cb74e5ed3fb5344c00c8fa1abe41
|
refs/heads/master
| 2021-01-17T05:43:28.267481
| 2015-11-26T15:30:26
| 2015-11-26T15:30:26
| 38,509,913
| 0
| 0
| null | 2015-07-03T20:43:51
| 2015-07-03T20:43:50
|
R
|
UTF-8
|
R
| false
| false
| 468
|
r
|
solver.mst.R
|
#' @export
makeTSPSolver.mst = function() {
makeTSPSolverInternal(
cl = "mst",
short.name = "MST",
name = "MST (minimum spanning tree) heuristic",
properties = c("euclidean", "external", "requires.tsplib", "deterministic"),
par.set = makeParamSet()
)
}
#' @export
# @interface see runTSPSolver
run.mst = function(solver, instance, solver.pars, ...) {
callAustralianSolverInterface(instance, solver.pars, bin = solver$bin, solver = "2APP")
}
|
6b70c63dd8836ad69bfd5b9a7f7200b004f2444d
|
a2513ff9dc0be7fb01034bd50207199fd5d25d8c
|
/inputs/caribou_ranges.R
|
429cecd0ee736bd3717c367b25021e6155153ec6
|
[] |
no_license
|
fRI-Research/LandWeb
|
fd4003a50e77670a35fbdac874f1e7601463e11c
|
4640c2dcd92324c9d5dde4ea414eca26427aa9c4
|
refs/heads/master
| 2020-09-11T05:37:34.543009
| 2019-11-15T15:32:49
| 2019-11-15T15:32:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,310
|
r
|
caribou_ranges.R
|
### LandWeb was previosuly only using Boreal Caribou Ranges for results
### This adds Mountain Caribou Ranges for AB and BC, as requested by several partners
library(magrittr)
library(map)
library(stringr)
ml <- mapAdd(layerName = "Boreal Caribou Ranges",
useSAcrs = TRUE, poly = TRUE, overwrite = TRUE,
url = "https://drive.google.com/file/d/1PYLou8J1wcrme7Z2tx1wtA4GvaWnU1Jy/view?usp=sharing",
columnNameForLabels = "Name", isStudyArea = FALSE, filename2 = NULL)
prj <- proj4string(ml[["Boreal Caribou Ranges"]])
## Info: https://geodiscover.alberta.ca/geoportal/catalog/search/resource/details.page?uuid=%7BA4588C9B-3310-46D6-A37C-F2C6AB1B86A2%7D
## Data: https://extranet.gov.ab.ca/srd/geodiscover/srd_pub/LAT/FWDSensitivity/CaribouRange.zip
## See Also: https://open.alberta.ca/dataset/932d6c22-a32a-4b4e-a3f5-cb2703c53280/resource/8335d979-394e-4959-ac5c-014dc2106df9/download/albertacaribouranges-map-nov2017.pdf
ab.caribou <- shapefile("~/GitHub/LandWeb/inputs/CaribouRange/Caribou_Range.shp") %>%
spTransform(., prj)
## TODO: need to extract the non-boreal ranges and add them to the caribou range map
boreal.sub.ab <- ml[["Boreal Caribou Ranges"]][["Name"]] %>%
str_remove(., ".\\([A-Z]{3}\\)") %>%
str_remove(., ".River")
omit.ab <- pmatch(boreal.sub.ab, ab.caribou[["SUBUNIT"]]) %>% na.omit() %>%
c(., which(ab.caribou[["STATUS"]] != "Active")) %>%
c(., which(ab.caribou[["LOCALRANGE"]] == "East Side Athabasca")) %>%
c(., which(ab.caribou[["SUBUNIT"]] == "Bischto"))
ab.caribou[-omit.ab,]$SUBUNIT
## Info: https://catalogue.data.gov.bc.ca/dataset/caribou-herd-locations-for-bc
bc.caribou <- shapefile("~/GitHub/LandWeb/inputs/BCGW_7113060B_1561059191195_8572/GCPB_CARIBOU_POPULATION_SP/GCBP_CARIB_polygon.shp") %>%
spTransform(., prj)
## TODO: need to extract the non-boreal ranges and add them to the caribou range map
boreal.sub.bc <- ml[["Boreal Caribou Ranges"]][["Name"]] %>% str_remove(., ".\\([A-Z]{3}\\)")
omit.bc <- pmatch(boreal.sub.bc, bc.caribou[["HERD_NAME"]]) %>% na.omit() %>%
c(., which(bc.caribou[["ECOTYPE"]] == "Boreal")) %>%
c(., which(bc.caribou[["HERD_STAT"]] != "Herd"))
bc.caribou[-omit,][["HERD_NAME"]]
## Plotting
plot(ml[["Boreal Caribou Ranges"]], col = "pink")
plot(ab.caribou[-omit.ab,], add = TRUE, col = "blue")
plot(bc.caribou[-omit.bc,], add = TRUE, col = "orange")
### merge these 3 layers, keeping herd names and matching shinyLabel
# drop all columns except 'SUBUNIT' in ab.caribou; rename to 'Name'
ab.caribou.new <- ab.caribou[-omit.ab, which(names(ab.caribou) == "SUBUNIT")]
names(ab.caribou.new) <- "Name"
# TODO: drop all columns except 'HERD_NAME' in bc.caribou; rename to 'Name'
bc.caribou.new <- bc.caribou[-omit.bc, which(names(bc.caribou) == "HERD_NAME")]
names(bc.caribou.new) <- "Name"
ab.bc.caribou <- rbind(ab.caribou.new, bc.caribou.new, makeUniqueIDs = TRUE)
lw.caribou.new <- ml[["Boreal Caribou Ranges"]][, which(names(ml[["Boreal Caribou Ranges"]]) == "Name")]
lw.caribou <- rbind(ab.bc.caribou, lw.caribou.new, makeUniqueIDs = TRUE)
lw.caribou$shinyLabel <- lw.caribou[["Name"]]
plot(lw.caribou, col = "lightblue")
dd <- "~/GitHub/LandWeb/inputs/Caribou_Ranges_LandWeb"
if (!dir.exists(dd)) dir.create(dd)
shapefile(lw.caribou, file = file.path(dd, "caribou_landweb.shp"))
|
1cfbf3dcc9d46a64c73aad2d6a2c8aab221cc782
|
d161a144cca6f876557c5f716d43e4fc40fe0eb9
|
/R/MCMC_functions.R
|
f82608884f77e5b48cea973fdd6e26309bc7306b
|
[] |
no_license
|
SimoneTiberi/BANDITS
|
d57c02cf85ec56c87900265ed3264d106480640d
|
3c42091edf5533197695b2d8bf2a1e22d7cc754d
|
refs/heads/master
| 2022-06-19T01:23:45.396288
| 2022-05-20T14:56:55
| 2022-05-20T14:56:55
| 178,011,248
| 19
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,678
|
r
|
MCMC_functions.R
|
##############################################################################################################################
# General MCMC functions:
##############################################################################################################################
# initialize pi new (matrix) object
create_pi_new = function(g, K, N){
matrix( 1/K[g], nrow = N, ncol = K[g])
}
# Fast posterior mode computation:
find.mode <- function(x, adjust, ...) {
dx <- density(x, adjust = adjust, ...)
dx$x[which.max(dx$y)]
}
# fast heidelberg diagnostic computation:
my_heidel.diag = function(x, R, by., pvalue = 0.01){
start.vec <- seq(from = 1, to = R/2, by = by.)
S0 <- my_spectrum0.ar(window(x, start = R/2), R/2+1)
converged <- FALSE
for(i in seq(along = start.vec)){
x <- window(x, start = start.vec[i])
n <- R + 1 - start.vec[i] # niter(x)
B <- cumsum(x) - sum(x) * seq_len(n)/n
Bsq <- (B * B)/(n * S0)
I <- sum(Bsq)/n
p = my_pcramer(I)
if(converged <- !is.na(I) && p < 1 - pvalue){
break
}
}
if( !converged || is.na(I) ) {
nstart <- NA
}else {
nstart <- start.vec[i]
}
return(c(converged, nstart, 1 - p))
}
my_pcramer = function(q, eps = 1e-05){
log.eps <- log(eps)
# y = sapply(seq(0, 3, by = 1), function(k){
# z <- gamma(k + 0.5) * sqrt(4 * k + 1)/(gamma(k + 1) *
# pi^(3/2) * sqrt(q))
# u <- (4 * k + 1)^2/(16 * q)
# ifelse(u > -log.eps, 0, z * exp(-u) * besselK(x = u,
# nu = 1/4))
# })
y = vapply(seq(0, 3, by = 1), function(k){
z <- gamma(k + 0.5) * sqrt(4 * k + 1)/(gamma(k + 1) *
pi^(3/2) * sqrt(q))
u <- (4 * k + 1)^2/(16 * q)
ifelse(u > -log.eps, 0, z * exp(-u) * besselK(x = u,
nu = 1/4))
}, FUN.VALUE = numeric(1))
return(sum(y))
}
my_spectrum0.ar = function(x, R){
lm.out <- lm(x ~ seq_len(R) )
if(identical(all.equal(sd(residuals(lm.out)), 0), TRUE)) {
v0 <- 0
}else{
ar.out <- ar(x, aic = TRUE)
v0 <- ar.out$var.pred/(1 - sum(ar.out$ar))^2
}
# return(list(spec = v0, order = order))
v0
}
# char_in_char looks for string a in b.
char_in_char = function(a, b){
len_a = nchar(a)
len_b = nchar(b)
# s represents the start of the sub-string of b
#for(s in seq_len(len_b - len_a + 1) ){
# if( a == substring(b, s, s+len_a-1) ){
# return(TRUE)
# }
#}
res = vapply( seq_len(len_b - len_a + 1), function(s){
a == substring(b, s, s+len_a-1)
}, FUN.VALUE = logical(1))
any(res)
}
|
9efcee796ba34d00f7eab27107e89cff4c77dc8b
|
7898b2dfe59e4dda0456d68f015ae22448159520
|
/plot3.R
|
630b62afe0eb4ba6ef42069986e9e09362866515
|
[] |
no_license
|
creato-zoom/ExData_Plotting1
|
359c103ac1921249f4233c84933daa4521b0b04a
|
69ddbc5119aee35eeb20e934a83538bd3dd6de77
|
refs/heads/master
| 2021-01-16T18:14:10.509777
| 2015-10-10T20:48:14
| 2015-10-10T20:48:14
| 43,940,631
| 0
| 0
| null | 2015-10-09T07:45:57
| 2015-10-09T07:45:57
| null |
UTF-8
|
R
| false
| false
| 1,360
|
r
|
plot3.R
|
plot3 <- function()
{
if(!file.exists("household_power_consumption.txt")){
message("File household_power_consumption.txt does not exist in working directory.")
message("Please add household_power_consumption.txt to working directory and try again")
return()
}
#Data Setup
powerData <- read.csv2(file = "household_power_consumption.txt", header=TRUE, na.strings = '?')
powerData$Date <- as.Date(powerData$Date, format = "%d/%m/%Y")
subPowerData <- powerData[powerData$Date>=as.Date("2007-02-01") & powerData$Date<=as.Date("2007-02-02") & !is.na(powerData$Global_active_power),]
subPowerData$DateTime <- strptime(paste(subPowerData$Date,subPowerData$Time), format = "%Y-%m-%d %H:%M:%S")
#Draw Plot
png(file="plot3.png")
plot(subPowerData$DateTime,
as.numeric(as.character(subPowerData$Sub_metering_1)),
type="n",
xlab="",
ylab="Energy sub metering")
lines(subPowerData$DateTime,as.numeric(as.character(subPowerData$Sub_metering_1)))
lines(subPowerData$DateTime,as.numeric(as.character(subPowerData$Sub_metering_2)),col='red')
lines(subPowerData$DateTime,as.numeric(as.character(subPowerData$Sub_metering_3)),col='blue')
legend(x="topright", legend = c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),
lty=c(1,1,1),
col=c('black','red','blue'))
dev.off()
}
plot3()
|
8cb8a8f7f292158350789f003df184aa30316920
|
828ab27d1bf27f9ba5f18542af72a8e3de1ef567
|
/plot3.R
|
d9681be1cacd38cc1cb97679dfb296cc38e77a19
|
[] |
no_license
|
ntquyen/ExData_Plotting1
|
930b44b3d532e275826c240f9ac47b083aba6e41
|
464f0e07783e261744eacb4bcda4eb2187d46e45
|
refs/heads/master
| 2020-12-25T04:01:17.772883
| 2015-06-11T01:00:33
| 2015-06-11T01:00:33
| 31,745,264
| 0
| 0
| null | 2015-03-06T00:57:01
| 2015-03-06T00:57:01
| null |
UTF-8
|
R
| false
| false
| 91
|
r
|
plot3.R
|
# source("download_data.R")
png("plot3.png")
source("plotting.R")
plot3(subData)
dev.off()
|
dbc2719f8e99db04cb6ff2b10602636beaec1ac4
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/borrowr/inst/testfiles/matchesToCor/libFuzzer_matchesToCor/matchesToCor_valgrind_files/1609957931-test.R
|
cdd9cd36e842387de9b4574fe29f2d8778eef664
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,315
|
r
|
1609957931-test.R
|
testlist <- list(x = structure(c(2.12199581912701e-314, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.62895107548006e-299, 2.46679008847656e-308, 4.52353163074667e-310, 6.21470200082845e+228, 2.02822087723472e-110, 7.2846496044813e+199, 2.34729120679865e+251, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 1.25316159904095e-304, 1.38828157879118e-307, 1.39137530284047e+93, 2.66174494183479e+233, 3.94604863549254e-114, 1.16665664795483e+224, 4.99772448688146e-310, 2.07222913602242e-314, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 3.78262556195887e-307, 4.55281492642709e-320, 2.90440397967035e-316, 4.94065645841247e-324, -Inf, 2.52778488871817e-34, 5.56268464626775e-308, 7.21408662741556e-229, 2.07238799541986e-317, -Inf, 1.09415410343162e-314, 1.44027374932068e+277, 2.48670778993655e-316, NaN, 1.46814865197002e-76, NaN, 1.38137765183616e-314, 2.6855396515741e+122, NaN, 2.61841934347842e+122, 2.45066441650175e-319, 7.04152911727793e-09, 1.78049325879469e-307, 1.33547805083667e-307, 7.21408662741556e-229, 131072.040659609), .Dim = 8:7))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result)
|
4f2dcd4e332dedc0dca8f06a8faa46d01ec87340
|
7afe5683bcecc755ab598a9d21400dd35e33bd50
|
/resources/make.assocplot.R
|
857f84232f58344b0142a0c0b89737f4f2100a31
|
[] |
no_license
|
AnalysisCommons/assocplot
|
9d03a3010923445b221b1ad624e51822a67718a8
|
376e88a6ff21492b3b4c19fd0d973b99ad61f1f3
|
refs/heads/master
| 2020-06-27T10:13:09.928600
| 2017-06-15T20:35:18
| 2017-06-15T20:35:18
| 94,249,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,187
|
r
|
make.assocplot.R
|
# Script generalized from 'make.plot_P.value_forManning.R' script written by Jaeyoung Hong
LOCALTEST <- FALSE
if (LOCALTEST) {
## Load libraries and source files
library(data.table)
setwd("/home/mbrown/DNAnexus/assocplot/resources/")
source("modified.assocplot_P.value.R")
## Setup paths
datafile <- "/home/mbrown/DNAnexus/data/mb_test_data_chr8.Rda"
# index.path
ld.path <- "/home/mbrown/DNAnexus/"
# ld.path <- "/home/mbrown/DNAnexus/test.ped.LD"
# ld.path <- "/home/mbrown/DNAnexus/ppp1r3b_locus_EU.ped.LD.txt"
# plot.path <- getwd()
## Setup parameters
snp.name <- "Name"
gene.name <- "gene"
chr.name <- "chr"
pos.name <- "pos"
p.name <- "p"
freq.name <- "maf"
df.name <- "HetDF"
wh.snp <- "8:9044912"
pop <- "CEU"
traitname <- ""
groupname <- ""
} else {
## Parse the arguments
args <- commandArgs(trailingOnly=TRUE)
args <- paste(args,collapse=" ")
print(args)
args <- unlist(strsplit(args,";"))
print(args)
## Mandatory parameters
datafile <- args[1]
ld.path <- args[2]
## Optional parameters
ld.type <- args[3]
snp.name <- args[4]
gene.name <- args[5]
chr.name <- args[6]
pos.name <- args[7]
p.name <- args[8]
freq.name <- args[9]
df.name <- args[10]
wh.snp <- args[11]
region.width <- as.numeric(args[12])
pop <- args[13]
chr.num <- args[14]
wh.gene <- args[15]
wh.region <- args[16]
traitname <- args[17]
groupname <- args[18]
output.type <- args[19]
output.args <- args[20]
## Load libraries and source files
install.packages("/chron_2.3-47.tar.gz", repos=NULL, type="source")
install.packages("/data.table_1.9.6.tar.gz", repos=NULL, type="source")
library(data.table)
source("/modified.assocplot_P.value.R")
}
# Functions
get.delim <- function(f) {
d <- readLines(f,n=1)
delim <- ifelse(grepl(" ", d)," ",ifelse(grepl(",",d),",","\t"))
return(delim)
}
## Load data -- current datatype is seqMeta file
print("Reading in data...")
# Check filetype
if (grepl("Rda$",datafile)) {
load(datafile)
} else {
delim <- get.delim(datafile)
sing <- read.table(datafile, header=T, as.is=T, sep=delim)
}
# data0 <- fread(datafile,data.table = FALSE)
# i <- which(data0$MarkerName==wh.snp)
i <- which(sing[,snp.name]==wh.snp)
# snp0 <- data0[i,]$indexsnp
# pos0 <- data0[i,]$pos
# chr0 <- data0[i,]$chr
# gene0 <- data0[i,]$Locus
# pval0<-min(data0[i,]$P.value.index.aa, data0[i,]$Pval.index.ma)
snp0 <- sing[i,snp.name]
pos0 <- sing[i,pos.name]
chr0 <- sing[i,chr.name]
gene0 <- sing[i,gene.name]
pval0 <- sing[i,p.name]
# data1 <- data0[,c("MarkerName","Chr","Pos","P.value")]
# data1$neg.log10.pval <- -log10(data1$P.value)
data1 <- sing[,c(snp.name,chr.name,pos.name,p.name)]
data1$logp <- -log10(sing[,p.name])
# data <- data1[data1$pos>=pos0-250000 & data1$pos<=pos0+250000 & !is.na(data1$pos) & data1$chr==chr0,]
# names(data) <- c("SNP","CHR","POS","P.value","neg.log10.pval")
# data <- data[order(data$P.value),]
data <- data1[data1[,pos.name]>=pos0-250000 & data1[,pos.name]<=pos0+250000 & !is.na(data1[,pos.name]) & data1[,chr.name]==chr0,]
names(data) <- c("SNP","CHR","POS","P.value","neg.log10.pval")
data <- data[order(data$P.value),]
# ld <- read.table(paste(ld.path,"ld",snp0,"_",pop,".txt",sep=""), header=T, sep="\t")
# Read in the LD data
ld <- read.table(ld.path, header=T, as.is=T)
if (ld.type=="PLINK") {
ld1 <- ld[,c("SNP_A","SNP_B","R2")]
names(ld1) <- c("L1","L2","r.2")
} else {
names(ld)[3] <- "D"
ld1 <- ld[ld$L1==snp0,c("L1","L2","r.2")]
ld1 <- rbind(c(ld1$L1[1],ld1$L1[1],1),ld1)
}
print("Preparing data files")
locus <- merge(data,ld1,by.x="SNP",by.y="L2")
names(locus)[which(names(locus)=="r.2")] <- "RSQR"
if(is.na(locus[locus$SNP==snp0,"CHR"])) locus[locus$SNP==snp0,"CHR"] <- chr0
if(is.na(locus[locus$SNP==snp0,"POS"])) locus[locus$SNP==snp0,"POS"] <- pos0
locus <- locus[!is.na(locus$POS),]
locus <- locus[order(-locus$neg.log10.pval),]
for(r in 1:nrow(locus)){
if(is.na(locus$RSQR[r])) {locus$RSQR[r] <- -1}
} # for r
ldpop <- pop
# plot y-axix #
topPval <- -log10( min( data$P.value, na.rm=T ))
yrange<-topPval+topPval/10
yaxis_scale<-ceiling(yrange/3)
if (output.args!="" & !is.na(output.args) & !is.null(output.args) & output.args!="NA" & output.args!="NULL") {
output.args <- paste(",",output.args)
} else {
if (output.type=="pdf") {
output.args <- ", width=7,height=4.67"
}
else {
output.args <- ", width=720,height=480,units=\"px\""
}
}
print("Making plot")
# png("assoc.png",width=720,height=480)
eval(parse(text=paste0(output.type,"(\"assoc.",output.type,"\"",output.args,")")))
make.plot(snp0,gene0,chr0,locus,region.width,yrange,yaxis_scale,traitname=traitname,groupname=groupname,ldpop,ld.type,LOCALTEST)
dev.off()
|
385bf7d3c6aed41ceba01e7d1c27159b859a3603
|
0f9f7c99088cf725acd5cf435840044ab2b12e03
|
/src/engine_all_dataset.R
|
9bb8070613677ae2ccce024b93d684f1bd52832e
|
[
"MIT"
] |
permissive
|
hiplot/gmiec-shiny
|
64d20b21d8be64517cd1b701968ac9002cea4cf9
|
9c88d89c7b8223ef4b56e7597659c06999e77e2a
|
refs/heads/master
| 2023-03-09T18:02:03.778476
| 2021-02-28T14:24:59
| 2021-02-28T14:24:59
| 343,124,446
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,398
|
r
|
engine_all_dataset.R
|
engine_all_dataset<-function(input_for_klar2,dfPatientForAnalysis_GAC,clusters){
print("Step 6: Run klaR")
print(dim(input_for_klar2[,-1]))
print(dim(unique(input_for_klar2[,-1])))
resKLAR = kmodes(input_for_klar2[,-1], clusters)
ifkl<-cbind(clusters=resKLAR$cluster,input_for_klar2)
list_rules<-list()
#Find the rules for each clusters
unique_clusters<-unique(ifkl[,"clusters"])
print(unique_clusters)
for(cksearch in 1:length(unique_clusters)){
ck<-unique_clusters[cksearch]
current_rule_value<-apply(ifkl[ifkl[,"clusters"]==ck,-c(1,2)],2,FUN=function(X){sum(as.numeric(X))})#computer number of genes with a properties
current_rule_names<-names(apply(ifkl[ifkl[,"clusters"]==ck,-c(1,2)],2,FUN=function(X){sum(as.numeric(X))}))
current_rules_defined<-paste(current_rule_names,current_rule_value,sep=":",collapse=";")
list_rules[[cksearch]]<-current_rules_defined
}
###create a data.frame in with two columns: clusters, rule
df_clusters_rules<-data.frame(clusters=unique_clusters,rule=unlist(list_rules))
mergeGAC_COM<-merge(ifkl,df_clusters_rules,by="clusters")
mergeGAC_COM_sort<-mergeGAC_COM[order(mergeGAC_COM$clusters,decreasing=F),]
rb1<-merge(dfPatientForAnalysis_GAC,mergeGAC_COM_sort) #of default use the same columns
mergeGAC_COM_res_K_2<-rb1
return(mergeGAC_COM_res_K_2)
}
|
3b9ce5fd17140646a7fd8b98c4d90dfe1d1780e8
|
061a7c01302d3d865869e45448b9c6d10bb02b87
|
/man/assign_label_cex.Rd
|
f69d8c8bc608eb4e245e83406932c5de3a5df3ac
|
[] |
no_license
|
naikai/sake
|
4da891e257ff551adbfc7e01c7dcbc8627ed5e8f
|
68b8c688c2eaf985de96d020deaaa7af319079f4
|
refs/heads/master
| 2023-02-19T22:41:38.865443
| 2023-02-08T18:47:21
| 2023-02-08T18:47:21
| 60,200,366
| 29
| 14
| null | 2021-03-18T23:37:38
| 2016-06-01T18:14:57
|
R
|
UTF-8
|
R
| false
| true
| 407
|
rd
|
assign_label_cex.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_utils.R
\name{assign_label_cex}
\alias{assign_label_cex}
\title{Adjust label cex based on its number}
\usage{
assign_label_cex(n.samples)
}
\arguments{
\item{love}{Do you love cats? Defaults to TRUE.}
}
\description{
This function allows you to express your love of cats.
}
\examples{
assign_label_cex(100)
}
\keyword{cats}
|
320088858bb1510d053b0a1d071c711c96c6629f
|
f5d2dd91994929a25bd36dc78b246bee85202adf
|
/R/RcppExports.R
|
732344bd320d15798e3cdeab8526c1c0a4372fb5
|
[] |
no_license
|
environmentalinformatics-marburg/Reot
|
1350feb80c342aa6c94172d68c58d5e55ae8ad1c
|
1a3e09b08e960b80b236d571d3c637b8e29272fd
|
refs/heads/master
| 2020-04-24T22:32:13.185940
| 2014-08-25T09:29:07
| 2014-08-25T09:29:07
| 11,943,730
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 783
|
r
|
RcppExports.R
|
# This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
corC <- function(x, y) {
.Call('Reot_corC', PACKAGE = 'Reot', x, y)
}
lmC <- function(x, y) {
.Call('Reot_lmC', PACKAGE = 'Reot', x, y)
}
predRsquaredSum <- function(pred_vals, resp_vals, standardised) {
.Call('Reot_predRsquaredSum', PACKAGE = 'Reot', pred_vals, resp_vals, standardised)
}
respLmParam <- function(x, y, cell) {
.Call('Reot_respLmParam', PACKAGE = 'Reot', x, y, cell)
}
findudC <- function(x) {
.Call('Reot_findudC', PACKAGE = 'Reot', x)
}
iodaC <- function(x, y) {
.Call('Reot_iodaC', PACKAGE = 'Reot', x, y)
}
iodaSumC <- function(pred_vals, resp_vals) {
.Call('Reot_iodaSumC', PACKAGE = 'Reot', pred_vals, resp_vals)
}
|
61b5b3b10fd0b672d8bd9e4efdefb48a206fe522
|
8707c19f9ba5faafa304dde5b00fab2ab345dd2e
|
/plot2.R
|
fb0b019f3bb0632a4107d045e3400f6ad9aeae4a
|
[] |
no_license
|
LiuyinC/ExData_Plotting1
|
e0a314896385b60e7f40eef319f977ca721ecab9
|
39a787f831e797e1636a067a8b0b0c14667ff3d4
|
refs/heads/master
| 2021-01-18T02:10:46.093926
| 2014-06-02T19:45:22
| 2014-06-02T19:45:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 467
|
r
|
plot2.R
|
powerData <- read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
plotdata <- subset(powerData, Date == "2/2/2007" | Date == "1/2/2007")
TimeDataCha <- paste(plotdata[["Date"]], plotdata[["Time"]], sep=" ")
TimeData <- strptime(TimeDataCha, format = "%d/%m/%Y %H:%M:%S")
png(filename = "plot2.png")
plot(TimeData, plotdata[["Global_active_power"]],type = "l", xlab = "", ylab = "Global active power (kilowatts)")
dev.off()
|
63b36c615a05a9bcbc9c2009da65c7b37a703c5b
|
5331c39119c2e02eb2d91016f2201be33609c9b4
|
/extras/plot-aucs.R
|
aff7d3959f2d51bdf8aa0c07be0f7f1bd93c73f2
|
[
"MIT"
] |
permissive
|
project-aero/measles-ews
|
034957e3247b19419181aacf54cbb0cf520aa763
|
a67845c1b056299cd2896263f584b55aba6bf24b
|
refs/heads/master
| 2023-04-14T01:36:24.801773
| 2021-11-23T13:07:11
| 2021-11-23T13:07:11
| 136,212,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,772
|
r
|
plot-aucs.R
|
# plot-aucs.R
# Script to plot the AUC results from emergence and endemic simulation
# tests of EWS.
#
# Author:
# Andrew Tredennick (atredenn@gmail.com)
# Load libraries ----------------------------------------------------------
library(tidyverse)
library(viridis)
# Load results ------------------------------------------------------------
emergence_aucs <- read.csv("../results/emergence-grid-aucs.csv")
elimination_aucs <- read.csv("../results/elimination-grid-aucs.csv")
star_tbl <- tibble(
city = "Niamey",
x = as.factor(rep(1e-04, 2)),
y = c(5.7, 7.7)
)
# Make the plots ----------------------------------------------------------
emerge_plot <- ggplot() +
geom_tile(data = emergence_aucs, aes(x = as.factor(susc_discount), y = metric, fill = abs(AUC-0.5))) +
geom_text(data = star_tbl, aes(x = x, y = y, label = "*"), color = "white", size = 6) +
scale_fill_viridis(limits = c(0,0.5), direction = -1, option = "C", name = "| AUC - 0.5 |") +
facet_wrap(~city, nrow = 1) +
labs(x = "Level of susceptible depletion", y = NULL) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(panel.spacing = unit(1, "lines"),
plot.title = element_text(size = 11, face = "bold")) +
ggtitle("Anticipating emergence")
eliminate_plot <- ggplot() +
geom_tile(data = elimination_aucs, aes(x = as.factor(vacc_speed*10000), y = metric, fill = abs(AUC-0.5))) +
scale_fill_viridis(limits = c(0,0.5), direction = -1, option = "C", name = "| AUC - 0.5 |") +
facet_wrap(~city, nrow = 1) +
labs(x = expression(paste("Rate to full vaccine coverage (", phantom()%*%phantom(), 10^4, ")")), y = NULL) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(panel.spacing = unit(1, "lines"),
plot.title = element_text(size = 11, face = "bold")) +
ggtitle("Anticipating elimination")
auc_plot <- cowplot::plot_grid(emerge_plot, eliminate_plot, nrow = 2, align = "v", labels = "AUTO", label_size = 12)
ggsave(filename = "../figures/simulation-grid-aucs.pdf", plot = auc_plot, width = 8.5, height = 5.5, units = "in")
# Load moving window results ----------------------------------------------
emergence_aucs <- read.csv("../results/emergence-mvw-grid-aucs.csv")
elimination_aucs <- read.csv("../results/elimination-mvw-grid-aucs.csv")
# Make the plots ----------------------------------------------------------
emerge_plot <- ggplot() +
geom_tile(data = emergence_aucs, aes(x = as.factor(susc_discount), y = metric, fill = abs(AUC-0.5))) +
scale_fill_viridis(limits = c(0,0.5), direction = -1, option = "C", name = "| AUC - 0.5 |") +
facet_wrap(~city, nrow = 1) +
labs(x = "Level of susceptible depletion", y = NULL) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(panel.spacing = unit(1, "lines"),
plot.title = element_text(size = 11, face = "bold")) +
ggtitle("Anticipating emergence over a moving window")
eliminate_plot <- ggplot() +
geom_tile(data = elimination_aucs, aes(x = as.factor(vacc_speed*10000), y = metric, fill = abs(AUC-0.5))) +
scale_fill_viridis(limits = c(0,0.5), direction = -1, option = "C", name = "| AUC - 0.5 |") +
facet_wrap(~city, nrow = 1) +
labs(x = expression(paste("Rate to full vaccine coverage (", phantom()%*%phantom(), 10^4, ")")), y = NULL) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
theme(panel.spacing = unit(1, "lines"),
plot.title = element_text(size = 11, face = "bold")) +
ggtitle("Anticipating elimination over a moving window")
auc_mvw_plot <- cowplot::plot_grid(emerge_plot, eliminate_plot, nrow = 2, align = "v", labels = "AUTO", label_size = 12)
ggsave(filename = "../figures/simulation-mvw-grid-aucs.pdf", plot = auc_mvw_plot, width = 8.5, height = 5.5, units = "in")
# endemic_aucs <- read.csv("../results/endemic-aucs.csv") %>%
# mutate(simulation = "Endemic")
#
# emergence_aucs <- read.csv("../results/emergence-aucs.csv") %>%
# mutate(simulation = "Emergence")
#
# auc_tbl <- bind_rows(
# endemic_aucs,
# emergence_aucs
# )
#
#
# # Plot and save -----------------------------------------------------------
#
# auc_plot <- ggplot(auc_tbl, aes(x = metric, y = abs(AUC-0.5), fill = AUC)) +
# geom_col(position = position_dodge()) +
# scale_y_continuous(limits = c(0,0.5)) +
# scale_fill_viridis_c(limits = c(0,1), direction = -1, option = "C") +
# facet_grid(simulation~city) +
# theme_minimal() +
# labs(x = NULL, y = "| AUC - 0.5 |")+
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
# theme(panel.spacing = unit(1, "lines"))
#
# ggsave(filename = "../figures/simulation-aucs.pdf", plot = auc_plot, width = 8.5, height = 5, units = "in")
|
575c82a84260f1321962a6ae4a6603824d402767
|
6b28896f46eabbddaf8f14cd08554cfeab591263
|
/R/fem.main.R
|
591b40060c6e0c12e6649ee86a5f5c032e9c60a5
|
[] |
no_license
|
cran/FisherEM
|
3b565591d45e456884b8296af38047421f8ade93
|
4ff2e8e609a42942801b74290158e79e3c18c751
|
refs/heads/master
| 2021-06-02T08:28:58.187744
| 2020-09-28T13:10:02
| 2020-09-28T13:10:02
| 17,679,260
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,748
|
r
|
fem.main.R
|
fem.main <- function(Y,K,init,nstart,maxit,eps,Tinit,model,kernel='',method){
# Initialization
colnames = colnames(Y)
Y = as.matrix(Y)
n = nrow(Y)
p = ncol(Y)
d = min((K-1),(p-1))
# Compute S
m = colMeans(Y)
XX = as.matrix(Y - t(m*t(matrix(1,n,p))))
S = t(XX) %*% XX /n
# New objects
Lobs = rep(c(-Inf),1,(maxit+1))
# Initialization of T
if (init=='user'){ T = Tinit}
else if (init=='hclust'){
T = matrix(0,n,K)
ind = cutree(hclust(dist(Y),method='ward.D2'),K)
for (i in 1:n){ T[i,ind[i]] = 1 }
}
else if (init=='kmeans' || init=='random'){
Ltmp = rep(NA,nstart); TT = list()
for (i in 1:nstart){
if (init=='random'){TT[[i]] = t(rmultinom(n,1,c(rep(1/K,K))))}
else{
T = matrix(0,n,K)
ind = kmeans(Y,K,nstart=10)$cluster
for (i in 1:n){ T[i,ind[i]] = 1 }
TT[[i]] = T
}
V = switch(method,
'svd'= fstep.fisher(XX,TT[[i]],S,kernel),
'gs'= fstep.GramSc(XX,TT[[i]],S,kernel),
'reg' = fstep.qiao(Y,TT[[i]],kernel))
prms = fem.mstep(Y,V,TT[[i]],model=model,method=method)
res.estep = fem.estep(prms,Y,V)
Ltmp[i] = res.estep$loglik
}
T = TT[[which.max(Ltmp)]]
}
V = switch(method,'svd'= fstep.fisher(XX,T,S,kernel),
'gs'= fstep.GramSc(XX,T,S,kernel),
'reg' = fstep.qiao(Y,T,kernel))
prms = fem.mstep(Y,V,T,model=model,method=method)
res.estep = fem.estep(prms,Y,V)
Lobs[1] = res.estep$loglik
# Main loop
Linf_new = Lobs[1]
for (i in 1:maxit){
# The three main steps F, M and E
V = switch(method,
'svd'= fstep.fisher(XX,T,S,kernel),
'gs'= fstep.GramSc(XX,T,S,kernel),
'reg' = fstep.qiao(Y,T,kernel))
prms = fem.mstep(Y,V,T,model=model,method=method)
if (prms$test !=0) {warning("some classes become empty\n",call.=F); break}
res.estep = fem.estep(prms,Y,V)
T = res.estep$T
Lobs[i+1] = res.estep$loglik
# Stop criterion
if (i>=3){
acc = (Lobs[i+1] - Lobs[i]) / (Lobs[i] - Lobs[i-1])
Linf_old = Linf_new
Linf_new <- try( Lobs[i] + 1/(1-acc) * (Lobs[i+1] - Lobs[i]))
if (is.na(Linf_new)){warning("some classes become empty\n",call.=F); break}
if (abs(Linf_new - Linf_old) < eps) {break}
}
}
# Returning the results
cls = max.col(T)
crit = fem.criteria(Lobs[(i+1)],T,prms,n)
rownames(V) = colnames
colnames(V) = paste('U',1:d,sep='')
list(model=prms$model,K=K,cls=cls,P=T,U=V,mean=prms$my,prop=prms$prop,D=prms$D,beta=prms$b,
aic=crit$aic,bic=crit$bic,icl=crit$icl,comp=crit$comp,loglik.all=Lobs[2:(i+1)],
loglik=Lobs[i+1],method=method)
}
|
d4ff529833bf6c764de41396a15e10b5fbf1c509
|
fe612f81a3118bf3ebef644bae3281bd1c156442
|
/man/h2o.removeAll.Rd
|
3bf89913be2dd243bc31559531a13e2adf6e3b92
|
[] |
no_license
|
cran/h2o
|
da1ba0dff5708b7490b4e97552614815f8d0d95e
|
c54f9b40693ae75577357075bb88f6f1f45c59be
|
refs/heads/master
| 2023-08-18T18:28:26.236789
| 2023-08-09T05:00:02
| 2023-08-09T06:32:17
| 20,941,952
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,141
|
rd
|
h2o.removeAll.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kvstore.R
\name{h2o.removeAll}
\alias{h2o.removeAll}
\title{Remove All Objects on the H2O Cluster}
\usage{
h2o.removeAll(timeout_secs = 0, retained_elements = c())
}
\arguments{
\item{timeout_secs}{Timeout in seconds. Default is no timeout.}
\item{retained_elements}{Instances or ids of models and frames to be retained. Combination of instances and ids in the same list is also a valid input.}
}
\description{
Removes the data from the h2o cluster, but does not remove the local references.
Retains models, frames and vectors specified in retained_elements argument.
Retained elements must be instances/ids of models and frames only. For models retained, training and validation frames are retained as well.
Cross validation models of a retained model are NOT retained automatically, those must be specified explicitely.
}
\examples{
\dontrun{
library(h2o)
h2o.init()
prostate_path <- system.file("extdata", "prostate.csv", package = "h2o")
prostate <- h2o.uploadFile(path = prostate_path)
h2o.ls()
h2o.removeAll()
h2o.ls()
}
}
\seealso{
\code{\link{h2o.rm}}
}
|
bfc39c11504a111f9a4d0da5dc7d31258f02fbfe
|
b12382e16a602599d0725b5446bb6e5680da3e66
|
/R/Newton-Raphson/NLS_NR/Functions/expitm1.R
|
057531f490db4f1a0104c8aff84bf0489afa50a7
|
[] |
no_license
|
hhnguyen2/Summer-2016
|
edd9de12fff00d5af3c058c30a7d07e71ba4289b
|
bccd6810ce3c49f8a0925b1954cff7643eb9648c
|
refs/heads/master
| 2021-01-20T17:20:31.080061
| 2016-08-04T20:11:56
| 2016-08-04T20:11:56
| 60,541,681
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 131
|
r
|
expitm1.R
|
# expitm1: evaluate (exp(x)-1)/(exp(x)+1) = 1 - 2/(exp(x)+1)
# Read: "expit minus one"
expitm1 <- function(x){
1 - 2/{exp(x)+1}
}
|
f7f256d189751fdef1678e14866a35962351c0d9
|
596338eae0a9ad8879e01ace95ab52027c8795cc
|
/toRun.R
|
f4b1463c719031d9755b0c2a3ddb20b1c4644100
|
[] |
no_license
|
databio/Methylation_Age_Prediction
|
ddb92f93d90348e505f98e45d380725e176318a9
|
b369c15e0bb72e2ff02105130a7466303cf4e0bc
|
refs/heads/master
| 2020-05-02T20:57:52.790718
| 2019-04-25T13:53:04
| 2019-04-25T13:53:04
| 178,206,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 876
|
r
|
toRun.R
|
##Install dependencies, commented incase dependencies are already present.
#source("https://bioconductor.org/biocLite.R")
# install.packages("knitr", repos='http://cran.us.r-project.org')
# install.packages("rmarkdown", repos='http://cran.us.r-project.org')
#biocLite("preprocessCore")
#install.packages("installr")
#require(installr)
#install.pandoc()
#Put here the location of the package
#setwd("./PredictionPackage_20170215/")
setwd("/Users/anant/MouseEpigeneticClock-master")
RdataF = "./PredictionPackage_20170215.Rdata"
functionFile = "./PredictorFunctions.R"
covFol = "./bismarkFiles/"
readDepth = 5
params = list(covFolder = covFol, RdataFile =RdataF, sourceFunctions=functionFile, ReadDepth = readDepth)
#Render mark down
rmarkdown::render('./PredictingAgeLeftOutSample.Rmd', params = params,output_file = paste('./PredictingAgeLeftOutSample2.html',sep=""))
|
e255a6f164a38409871b12875de6be11bef0d495
|
c746b5f40c118fb4f41a2d7cb88024738476d40f
|
/Data_Generation/Results/combine_adaelnet75_500.R
|
56773119bc84dc3b6886c63fa540a692bab49c4b
|
[] |
no_license
|
multach87/Dissertation
|
5548375dac9059d5d582a3775adf83b5bc6c0be7
|
d20b4c6d3087fd878a1af9bc6e8543d2b94925df
|
refs/heads/master
| 2023-06-25T20:09:25.902225
| 2021-07-23T18:51:07
| 2021-07-23T18:51:07
| 281,465,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,423
|
r
|
combine_adaelnet75_500.R
|
#load data
half.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/500_data_10052020.RData")
adaelnet75.final <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/MainResults_Storage/adaelnet75_resultmain_500.RData")
#initialize dataframe
adaelnet75.results <- data.frame(matrix(ncol = ncol(adaelnet75.final[[1]]$info)))
colnames(adaelnet75.results) <- colnames(adaelnet75.final[[1]]$info)
#initialize error vector
adaelnet75.errors <- numeric()
#fill results
##Errors at:
for(i in 1:length(adaelnet75.final)) {
if(is.null(adaelnet75.final[[i]]$error)) {
adaelnet75.results[i , ] <- adaelnet75.final[[i]]$info
} else {
cat("error at i = " , i , "\n")
adaelnet75.results[i , 1:7] <- half.data[[i]]$conditions
adaelnet75.errors <- c(adaelnet75.errors , i)
}
}
mean(adaelnet75.results[ , "fpr"] , na.rm = T)
mean(adaelnet75.results[ , "fnr"] , na.rm = T)
mean(adaelnet75.results[!is.infinite(adaelnet75.results[ , "mpe"]) , "mpe"] , na.rm = T)
#save results
saveRDS(adaelnet75.results , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/MainResults_Storage/adaelnet75_resultDF_500.RData")
#saveRDS(adaelnet75.errors , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Error_Storage/adaelnet75_errorindices_500.RData")
|
cc6afafb1012b2b92df37cdb76f36d4e79369649
|
0ae69401a429092c5a35afe32878e49791e2d782
|
/trinker-lexicon-4c5e22b/man/profanity_banned.Rd
|
6ed7cdf0467e0d412a99db83d7dd2b1a18a50585
|
[] |
no_license
|
pratyushaj/abusive-language-online
|
8e9156d6296726f726f51bead5b429af7257176c
|
4fc4afb1d524c8125e34f12b4abb09f81dacd50d
|
refs/heads/master
| 2020-05-09T20:37:29.914920
| 2019-06-10T19:06:30
| 2019-06-10T19:06:30
| 181,413,619
| 3
| 0
| null | 2019-06-05T17:13:22
| 2019-04-15T04:45:06
|
Jupyter Notebook
|
UTF-8
|
R
| false
| true
| 838
|
rd
|
profanity_banned.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/profanity_banned.R
\docType{data}
\name{profanity_banned}
\alias{profanity_banned}
\title{bannedwordlist.com's List of Profane Words}
\format{A character vector with 77 elements}
\usage{
data(profanity_banned)
}
\description{
A dataset containing a character vector of profane words from bannedwordlist.com.
}
\section{Disclaimer}{
From the original author: "These lists are free to download. You may use them for
any purpose you wish and may copy, modify and distribute them freely. The
swear words lists are provided "as-is" without any warranty or guarantee
whatsoever. Don't blame me when the users of your forum, blog or community
find more creative ways of offending people."
}
\references{
\url{http://www.bannedwordlist.com}
}
\keyword{datasets}
|
cb44cedaf0e205281b99c3c9b416494c6d99b5a6
|
7815bb69f7e5d07aec5d853be6d705bee6c444a6
|
/Assignment3/best.R
|
040724ef3a9d79f8cdbd8b130e439cd3b9c8bf2a
|
[] |
no_license
|
fiamen/RProgramming
|
082c7cde4f9c6367ac2df6ef0a3ebf7bdbf46883
|
0ecc60edc099325a73812b1ca57558f96980f3c4
|
refs/heads/master
| 2021-01-18T18:25:51.464176
| 2014-11-17T02:10:01
| 2014-11-17T02:10:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,281
|
r
|
best.R
|
best <- function(state, outcome) {
## Read outcome data with measures of outcomes of care
Outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state is valid
st<-unique(Outcome$State)
styes<-which(st==state)
if(length(styes)==0) stop("invalid state")
## Check that outcome is valid
out<-c("heart attack","heart failure","pneumonia")
outyes<-which(outcome==out)
if(length(outyes)==0) stop("invalid outcome")
## subset the table to hospital name and lowest 30 day death rate variable
narrow<-Outcome[Outcome$State==state,c(2,13,19,25)]
## creates a variable to associate outcome variable
n<-outyes+1
## this function order the table in from the lowest to the highest and in alphabetical order
ordert<-function(n){
narrow[,n]<-as.numeric(narrow[,n])
ordered<-narrow[order(narrow[,n],decreasing =FALSE),]
ordered[1,1]
}
## Return hospital name in that state with lowest 30-day death rate and suppress warning message
suppressWarnings(ordert(n))
}
|
f0e615083df4fe144dc800c1e1251518699b1d11
|
2eae755d5619934c814a2aec3e8ff01a69ee727f
|
/07/ReactionTimeGibbs.R
|
4b97b31f04662162ed73e151ff76235b7b6de509
|
[] |
no_license
|
tjwhalenUVA/664-Homework
|
8535877e0f2400ae3544888d52a5f052f2f8144d
|
2cb524132d0906d89a65aec3f5d5562d889d6e5c
|
refs/heads/master
| 2021-05-02T00:34:02.722399
| 2018-06-08T17:30:24
| 2018-06-08T17:30:24
| 120,946,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,476
|
r
|
ReactionTimeGibbs.R
|
# Reaction time example from Unit 5 and 6
# Reaction time data (log reaction times) for first non-schizophrenic subject
reaction.times=c(5.743, 5.606, 5.858, 5.656, 5.591, 5.793, 5.697, 5.875, 5.677, 5.73,
5.69, 5.919, 5.981, 5.996, 5.635, 5.799, 5.537, 5.642, 5.858, 5.793,
5.805, 5.73, 5.677, 5.553, 5.829, 5.489, 5.724, 5.793, 5.684, 5.606)
require(coda) # load the coda package
x = reaction.times
xbar = mean(x)
n = length(x)
#####
# First consider conjugate model of Unit 5
# Assume non-informative prior distribution
# Normal-Gamma with mu0=0, k0=0, alpha0=-1/2, beta0=infinity
# Posterior hyperparameters mu1, k1, alpha1, beta1
mu1 <- xbar
k1 <- n
alpha1 <- -1/2 + n/2
beta1 <- 1/(0.5*sum((x-xbar)^2))
spread1 <- sqrt(1/(k1*alpha1*beta1))
thetaVals <- 5.64+(0:100)/500
stdVals <- (thetaVals - mu1)/spread1
thetaMargDens <- dt(stdVals,df=2*alpha1)/spread1
normDens <- dnorm(thetaVals,mu1,spread1)
dens <- cbind(thetaMargDens,normDens)
matplot(thetaVals,dens,type=c("l","l"),col=c("red","blue"),xlab="Theta",ylab="Probability Density")
legend(5.76,15,c("Unknown SD (t)","Known SD (Normal)"),col=c("red","blue"),lty=c(1,2))
#Set simulation sample size
numSim <- 10000
# Simulate directly from the posterior distribution
rhoDirect <- rgamma(numSim,shape=alpha1,scale=beta1)
sigmaDirect <- 1/sqrt(rhoDirect)
thetaDirect <- rnorm(numSim,mean=mu1,sd=sigmaDirect/sqrt(k1))
####
# Now use Gibbs sampling for semi-conjugate distribution
thetaGibbs<-xbar #Initial guess for mean
sigmaGibbs<-sd(x) #Initial guess for stdev
rhoGibbs<-1/sigmaGibbs[1]^2 # Initial guess for precision
for (k in 2:numSim) {
thetaGibbs[k]<- # note posterior mean is mu1 because k is zero
rnorm(1,mean=mu1,sd=sigmaGibbs[k-1]/sqrt(k1))
alphaG<-alpha1 # This is unncecessary because alpha1 does not change
betaG<-1/(0.5*sum((x-thetaGibbs[k])^2))
rhoGibbs[k]<-rgamma(1,shape=alphaG,scale=betaG)
sigmaGibbs[k]<-1/sqrt(rhoGibbs[k])
}
#Plot theoretical and Monte Carlo density functions
plot(density(thetaDirect),col="darkgreen",lty=2,main="",xlab="Theta")
densityGibbs<-density(thetaGibbs)
lines(densityGibbs$x,densityGibbs$y,col="blue",lty=3)
lines(thetaVals,thetaMargDens,col="red")
legend(5.76,15,c("Direct MC KD","Gibbs KD","Theoretical t"),col=c("darkgreen","blue","red"),lty=c(2,3,1))
#Calculate effective sample size
effectiveSize(thetaDirect)
effectiveSize(thetaGibbs)
effectiveSize(sigmaDirect)
effectiveSize(sigmaGibbs)
|
8df7a69d6345cda5dcb7446dbaaf15b668295864
|
52e900e1cd7820ee4f9d298c7b1eee2fb975461e
|
/plot1.R
|
a802e4a55bec2699dc7893fc2c3d7afa6663223c
|
[] |
no_license
|
cscarvalho/ExData_Plotting1
|
38a1aab0806d699e2c2237a914af8b7fcc1a5dd8
|
1bd81811e5089001d5c258775ae5612831bc2a88
|
refs/heads/master
| 2021-01-18T06:44:07.817790
| 2015-12-13T21:32:37
| 2015-12-13T21:32:37
| 47,933,561
| 0
| 0
| null | 2015-12-13T19:41:27
| 2015-12-13T19:41:27
| null |
UTF-8
|
R
| false
| false
| 355
|
r
|
plot1.R
|
##plot1.R
datafile="household_power_consumption.txt"
data=read.table(datafile,sep=";",header=TRUE,stringsAsFactors=FALSE,dec=".")
#head(data)
subdata=data[data$Date %in% c("1/2/2007","2/2/2007"),]
x=as.numeric(subdata$Global_active_power)
png(file="plot1.png")
hist(x,main="Global Active Power",xlab="Global Active Power (kilowatts)",col="red")
dev.off()
|
6a66a10604576d0d43ae867e6184e4ff67080ad7
|
4e929f4a92a2533e713b87adb06f773748814126
|
/R/RProjects/HITHATStats/R/dl20.R
|
4bdc1ee222259f873a4720b6522e21ee600aa953
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
jlthomps/EflowStats
|
f4fe56f17cb675bcc1d618bc838003c2f2e9f81b
|
016c9cb65a2f13a041af3eb87debb4f83793238a
|
refs/heads/master
| 2021-01-01T05:33:44.189671
| 2013-12-04T23:30:21
| 2013-12-04T23:30:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 895
|
r
|
dl20.R
|
#' Function to return the DL20 hydrologic indicator statistic for a given data frame
#'
#' This function accepts a data frame that contains a column named "discharge" and
#' calculates the number of zero-flow months for the entire record
#'
#' @param qfiletempf data frame containing a "discharge" column containing daily flow values
#' @return dl20 numeric containing the number of zero-flow months for the given data frame
#' @export
#' @examples
#' load_data<-paste(system.file(package="HITHATStats"),"/data/obs_data.csv",sep="")
#' qfiletempf<-read.csv(load_data)
#' dl20(qfiletempf)
dl20 <- function(qfiletempf) {
sumbymonyr <- aggregate(qfiletempf$discharge,list(qfiletempf$month_val,qfiletempf$year_val),FUN=sum,na.rm=TRUE)
if (min(sumbymonyr$x)==0) {
zeromon <- subset(sumbymonyr$x,sumbymonyr$x==0)
dl20 <- nrow(zeromon)
}
else {
dl20 <- 'NA'
}
return(dl20)
}
|
4831f85c47595d545121b7ff5a590908d66a8b71
|
a40171fbebb7daff29f1c0ea023e5bf17f025365
|
/R/Query.R
|
f8c65a6e9ff64b364b3c062e84e2fef31fd89d1a
|
[] |
no_license
|
SimonPBR/reviewR_prep
|
d4e7d7abb79f03aa1481c8247e65b98f485ccf25
|
fc3e007c0832e702cb942bd98fad44f23d49f3a3
|
refs/heads/master
| 2020-09-29T17:45:00.357010
| 2019-12-11T09:55:25
| 2019-12-11T09:55:25
| 227,086,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 455
|
r
|
Query.R
|
getYear <- function(title) {
title <- enquo(title)
movies %>%
filter(title == !!title) -> temp
return(temp$year)
}
# function(data, metric = title) {
# # Convert vor NSE
# metric <- enquo(metric)
# # Open data
# movies %>%
# # Filter for the title specified in function -> outputs vector of title
# filter(title == !!metric) -> mYear
# # Return ONLY year comlumn of vector
# return(mYear$year)
# }
|
43a92ea6aea307070abf778d4032edf315d1ea98
|
1f8bdf4a8638978a38c25293848057ea40b3854f
|
/R/report.r
|
b20befbd350e2c49921bc2254af3463e8da8c784
|
[] |
no_license
|
jcal3/rj
|
b8bff846c14983c92504fed452a908cd2515bc28
|
3b51a477f139c19f3dd6fd1b8d533e713720719a
|
refs/heads/master
| 2021-01-16T19:32:52.314482
| 2013-06-04T20:59:06
| 2013-06-04T20:59:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,028
|
r
|
report.r
|
#' Generate a status report.
#'
#' @param articles list of articles to generate report for. Defaults to
#' all active reports in \file{Submissions/}.
#' @export
report <- function(articles = active_articles()) {
rpt <- do.call("rbind", lapply(articles, report_line))
rpt <- rpt[order(rpt$date, rpt$ed), ]
rpt$status <- factor(rpt$status, order_status(rpt$status))
structure(rpt, class = c("report", "data.frame"))
}
last_status <- function(x) {
stopifnot(is.article(x))
x$status[[length(x$status)]]
}
summary_status <- function(x) {
stopifnot(is.article(x))
status <- last_status(x)$status
if (status %in% final_status) {
"complete"
} else if (empty(x$editor)) {
"needs editor"
} else if (empty(x$reviewers)) {
"needs reviewers"
} else {
status
}
}
report_line <- function(x) {
stopifnot(is.article(x))
sstatus <- summary_status(x)
status <- last_status(x)
last_date <- last_status(x)$date
days_taken <- difftime(Sys.Date(), last_date, "days")
stars <- sum(days_taken > deadlines(sstatus))
data.frame(
status = sstatus,
ed = editor_abbr(x$editor),
id = format(x$id),
title = str_trunc(x$title, 34),
date = last_date,
stars = str_dup("*", stars),
stringsAsFactors = FALSE
)
}
#' @S3method print report
print.report <- function(x, ...) {
cat("BY STATUS:\n")
parts <- split(x, x$status)
for (nm in names(parts)) {
part <- parts[[nm]]
str_sub(nm, 1, 1) <- toupper(str_sub(nm, 1, 1))
cat(str_pad(nm, 60, "right", "-"), "\n")
out <- capture.output(print.data.frame(part[, -1], row.names = FALSE,
right = FALSE))
cat(paste(out[-1], collapse = "\n"), "\n\n")
}
cat("BY EDITOR:\n")
actionable <- subset(x, ed != "" &
!(status %in% c("accepted", "online", "complete")))
parts <- split(actionable, actionable$ed)
for (nm in names(parts)) {
part <- parts[[nm]]
str_sub(nm, 1, 1) <- toupper(str_sub(nm, 1, 1))
cat(str_pad(nm, 60, "right", "-"), "\n")
out <- capture.output(print.data.frame(part[, c("id", "status", "date", "stars")], row.names = FALSE,
right = FALSE))
cat(paste(out[-1], collapse = "\n"), "\n\n")
}
}
order_status <- function(x) {
x <- unique(x)
first <- intersect(c("needs editor", "needs reviewers", "out for review"), x)
last <- intersect(c("accepted", "copy edited", "online", "proofed", "complete"), x)
c(first, setdiff(x, c(first, last)), last)
}
# Takes a summary status as input, and returns number of days before it's due
deadlines <- function(sstatus) {
if (sstatus %in% final_status) {
return(c(Inf, Inf))
}
special <- list(
"needs editor" = c(7L, 14L),
"needs reviewers" = c(7L, 14L),
"submitted" = c(3L, 7L),
"proofed" = c(7L, 14L),
"major revision" = c(60L, 90L)
)
if (sstatus %in% names(special)) {
special[[sstatus]]
} else {
c(4L, 6L) * 7L
}
}
editor_abbr <- function(x) {
if (empty(x)) return("")
toupper(str_c(str_sub(str_split(x, " ")[[1]], 1, 1), collapse = ""))
}
|
bed8bbc390cf4256d9c59aedab0066840a23cb2b
|
5bf5969c93eed2a9484f9aa22f90d7946572be8f
|
/graficar.R
|
f0a63009a3f5a952fcc6ad0e84b09422b5bed2fd
|
[] |
no_license
|
SergioMateosSanz/Rexamples
|
891ec1b6ecf4dd3bf2d4080838ee2c3126177243
|
85a2fa59312eb0b79422f42e4c47044757405618
|
refs/heads/main
| 2023-04-26T01:55:02.257440
| 2021-05-27T18:15:59
| 2021-05-27T18:15:59
| 330,391,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,246
|
r
|
graficar.R
|
# Diferentes formas de graficar en r
# 1. graficar con base graphics (froma tradicional)
year <- c('2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018')
disney <- c(11, 13, 11, 8, 12, 11, 12, 8, 10)
# graficando con codigo
plot(x = year,
y = disney)
# editando la grafica
plot(x = year,
y = disney,
main = 'disney',
xlab = 'year',
ylab = 'films',
col = 'cornflowerblue',
pch = 16,
panel.first = grid())
# otras funciones para hacer gráficas básicas son 'barplot()', 'hist()', 'pie()' (diagrama de barras, histograma y diagrama de tarta)
# 2. graficar con ggplot2
# cargar paquete ggplot2
library(ggplot2)
# hacer dataframe
peliculas <- data.frame(year,
disney)
# graficar utilizando ggplot
ggplot(data = peliculas,
mapping = aes(x = year,
y = disney)) +
geom_point() +
labs(title = 'disney')
# galería con código para graficar: https://www.r-graph-gallery.com/
# libro R Graphics Cookbook: https://r-graphics.org/
# otros paquetes para graficar
# Flexdashboard - para dashboards
# RGL - para gráficos en 3D
# Plotly - para gráficos interactivos
# Leaflet - para mapas
# RColorBrewer - para manejo de color avanzado
|
8d7f955f68e027699216630c1735f1402d9ff4a3
|
3d0787d24620c700303ecdc4453325b6235c0e5d
|
/02-decision_loop/break_next.R
|
c5a5a75d1e4e4eaf5c3e3cd3e466cf8a5002acf4
|
[] |
no_license
|
lincolnbrito/r-examples
|
2244e0160944f65c276a58c9e97b5a4ead8923b4
|
ab75c88576026ee839befb0f9acccf52ec6f2e4e
|
refs/heads/master
| 2020-03-08T10:55:10.518422
| 2018-04-05T03:21:00
| 2018-04-05T03:21:00
| 128,084,946
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158
|
r
|
break_next.R
|
x <- 1:5
for (val in x){
if (val == 4) {
break
}
print(val)
}
for (val in x) {
if (val == 3) {
next #go to next iteration
}
print(val)
}
|
04d004abb765f503fdb8fea554c3f74e924d6509
|
28bf4e2873739174b39181ebb6a52ab1b685f8f2
|
/src/archive/junk.R
|
4dc9c10b9806cb2d78ea8d2ff858d0d7bbe72d35
|
[] |
no_license
|
camroach87/capture-recapture-code
|
95ec4d9e584ca7a7af34c52b2d338bec464de463
|
e1e00b1c65e3c1fc379dc67d08f86e8a5380ef46
|
refs/heads/master
| 2021-01-22T14:20:17.610587
| 2014-06-04T12:09:53
| 2014-06-04T12:09:53
| 17,207,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,845
|
r
|
junk.R
|
#### 23/9/2013 ####
CR_RobustDesign(10,sim="Y", N.0=4000, p=0.02, nsampOcc=200, pBirth=0.03)
#### 22/7/2013 ####
mkOpenSimMtrx(N.0=5000, t=14, p=0.05, phi=0.9,pBirth=0.25, pImmigration=0.25)
table(rowSums(output[[1]][,1:8]))
CR_RobustDesign(1, sim="Y", N.0=5000, p=0.02, phi=0.9,pBirth=0.25, pImmigration=0.25)
#### 19/7/2013 ####
#Comparing Chao estimator to CJS
mtrxCapt <- mkCloseSimMtrx(1000,15,0.01)
p <- TestRMark(mtrxCapt)
colSums(mtrxCapt)/p
mean(colSums(mtrxCapt)/p)
ChaoEst(mtrxCapt)
#### 16/7/2013 ####
# output capture matrix to a text file
# Need to source CR_RobustDesign.R
mtrxCapt[mtrxCapt>1]<-1
write.table(as.matrix(mtrxCapt),"Programs/Data/MarkTest1.inp",col.names = F, row.names = F, sep="", quote=FALSE)
#Format for RMARK
library(RMark)
mtrxCapt_f <- apply(format(mtrxCapt), 1, paste, collapse="")
mtrxCapt_f <- paste(mtrxCapt_f, " 1;")
write.table(as.matrix(mtrxCapt_f),"Programs/Data/MarkTest1.inp",col.names = F, row.names = F, sep="", quote=FALSE)
# Read by RMark
mtrxCapt_f <- convert.inp("Programs/Data/MarkTest1.inp")
mark(mtrxCapt_f)
#### 9/7/2013 ####
# Testing CR_RobustDesign.R
bla <- CR_RobustDesign(3,"Y", pCapture=0.03, pBirth=0.2)
plot(1:11, bla[[1]][,1], type="l", col="green", ylim=c(0,5000))
lines(1:14, bla[[2]])
############################### 24//2012 ###############################
#adult fish
data1 <- data[data$totallength>=250,]
JollySeber(data1)
data2 <- data[data$totallength<250,]
JollySeber(data2)
#########################################################################
############################### 24/9/2012 ###############################
#########################################################################
#Jolly Seber
n <- CalcTotal(data)
m <- CalcMarked(data)
R <- CalcReleased(data)
Z <- CalcZ(data)
r <- CalcRecapt(data)
M <- m + R*Z/r
N <- n*M[1:14]/m
plot(N ~ years, type="l", col="green")
#Peterson Lincoln estimator
Npl <-c()
for (i in 2:13) Npl[i] <- n[i]n[i-1]/m[i]
#########################################################################
############################### 23/9/2012 ###############################
#########################################################################
# 23/9/2012 old version of CalcDistinct - not functioning correctly
CalcDistinct <- function(data) {
years <- sort(unique(data$year))
minYear <- min(years)
maxYear <- max(years)
curYear <- minYear
k <- maxYear - minYear
captFish <- NA
fishYear <- list(NA)
M <- NA
for (j in 1:(k+1)) {
fishYear[[j]] <- data$idfish[data$year == j + minYear - 1]
}
# calculate distinct fish captured each year
for (j in 1:(k+1)) {
if (j != 1) {
### ERROR HERE
M[j] <- sum(!(fishYear[[j]] %in% captFish))
###not handling first distinct occasion correctly
} else {
M[1] <- 0
}
if (j == 1) {
captFish <- fishYear[[1]]
} else {
captFish <- append(captFish, fishYear[[j]])
}
}
M[k+1] <- sum(M)
return(M)
}
#########################################################################
############################### OLD #####################################
#########################################################################
#plot(table(D)) date values are spaced sequentially rather than in their correct time position.
# thought - maybe just add a new column to existing data frame with the year each fish was caught - will then be able to easily sort
# the below program would be useful if captures did not occur yearly, but since they do there is little reason to do this (although it would be fun)
GetYearCounts <- function(tabD) {
dStart <- as.Date("1999/1/1")
dEnd <- as.Date("2012/1/1")
dCurrent <- startD
dNext <- seq(dCurrent, by = "1 year", length = 2)[2]
}
|
d72c84a25f55f316f723c116c71fd9a83caa2b29
|
2287e839ddf6bde1773161ac723e7aeda0baedea
|
/tests/test-mfdb_sql.R
|
17d5da3fdf3988dd03f113b3417a963d33217638
|
[] |
no_license
|
gadget-framework/mfdb
|
3e95a0d2331b310328f096bff87319c28ce6f0b3
|
1a5d866ddf9e82a178b63f34f269b9e171c70c58
|
refs/heads/6.x
| 2023-04-19T00:14:08.733554
| 2021-03-17T12:12:30
| 2021-03-17T12:15:03
| 19,570,112
| 2
| 3
| null | 2021-03-17T09:16:27
| 2014-05-08T10:48:44
|
R
|
UTF-8
|
R
| false
| false
| 10,313
|
r
|
test-mfdb_sql.R
|
library(mfdb)
library(unittest, quietly = TRUE)
helpers <- c('utils/helpers.R', 'tests/utils/helpers.R') ; source(helpers[file.exists(helpers)])
logging::logReset() # Don't let logging messages sneak into test output
ok_group("sql_quote", {
sql_quote <- mfdb:::sql_quote
ok(cmp_error(sql_quote(c()), "empty"), "Empty vector results in error")
ok(sql_quote("") == "''")
ok(sql_quote("3") == "'3'")
ok(sql_quote(4) == "4")
ok(sql_quote("Greengrocer's") == "'Greengrocer''s'")
ok(sql_quote("3", always_bracket = TRUE) == "('3')")
ok(sql_quote(c(1,2), always_bracket = TRUE) == "(1,2)")
ok(sql_quote(c(1,2), brackets = "[]") == "[1,2]")
ok(sql_quote(c(1, 2, 3)) == "(1,2,3)")
ok(sql_quote(c("a", "bee's", "c")) == "('a','bee''s','c')")
ok(sql_quote(c(1, NA, 3)) == "(1,NULL,3)")
ok(sql_quote(1, always_quote = TRUE) == "'1'")
ok(sql_quote(1:5, always_quote = TRUE) == "('1','2','3','4','5')")
})
ok_group("sql_create_index", {
ci <- mfdb:::sql_create_index
ok(cmp(ci("tbl", "col"), "CREATE INDEX ON tbl (col)"))
ok(cmp(ci("tbl", c("A", "B")), "CREATE INDEX ON tbl (A,B)"))
})
ok_group("mfdb_insert", {
mfdb_insert <- function (table_name, data_in, returning = "", extra = c()) {
return(capture.output({
out <- mfdb:::mfdb_insert(fake_mdb(), table_name, data_in, returning, extra)
cat("Returned:", out, "\n")
}))
}
ok(cmp(mfdb_insert("moo", list(moo_id = 8, oink = "a", baa = 78)), c(
"INSERT INTO moo (moo_id,oink,baa) VALUES (8,'a',78)",
"Returned: 1 ")),
"Insert single row")
ok(cmp(mfdb_insert("moo", data.frame(moo_id = c(1,2,3), oink = c("x","y","z"), bah = 43:45)), c(
"INSERT INTO moo (moo_id,oink,bah) VALUES (1,'x',43),(2,'y',44),(3,'z',45)",
"Returned: 3 ")),
"Insert a data frame in one batch")
ok(cmp(mfdb_insert("moo", data.frame(moo_id = 1:2005, oink = 1:2005)), c(
paste0("INSERT INTO moo (moo_id,oink) VALUES ", paste0(vapply(1:999, function (i) { paste0("(", i, ",", i, ")")}, ""), collapse = ",")),
paste0("INSERT INTO moo (moo_id,oink) VALUES ", paste0(vapply(1000:1999, function (i) { paste0("(", i, ",", i, ")")}, ""), collapse = ",")),
paste0("INSERT INTO moo (moo_id,oink) VALUES ", paste0(vapply(2000:2005, function (i) { paste0("(", i, ",", i, ")")}, ""), collapse = ",")),
"Returned: 2005 ")),
"Insert multiple batches, inserts for each batch gets summed")
ok(cmp(mfdb_insert("moo", data.frame(moo_id = 1:2005, oink = 1:2005), returning = 'moo_id'), c(
paste0("INSERT INTO moo (moo_id,oink) VALUES ", paste0(vapply(1:999, function (i) { paste0("(", i, ",", i, ")")}, ""), collapse = ","), " RETURNING moo_id"),
paste0("INSERT INTO moo (moo_id,oink) VALUES ", paste0(vapply(1000:1999, function (i) { paste0("(", i, ",", i, ")")}, ""), collapse = ","), " RETURNING moo_id"),
paste0("INSERT INTO moo (moo_id,oink) VALUES ", paste0(vapply(2000:2005, function (i) { paste0("(", i, ",", i, ")")}, ""), collapse = ","), " RETURNING moo_id"),
"Returned: 999 1000 6 ")),
"Insert multiple batches, get back a vector (this is the closest we get to returning)")
ok(cmp(mfdb_insert("moo", list(moo_id = 8, oink = "x"), returning = 'moo_id'), c(
"INSERT INTO moo (moo_id,oink) VALUES (8,'x') RETURNING moo_id",
"Returned: 1 ")), # TODO: This needs to actually test RETURNING
"Insert single row, returning something")
ok(cmp(mfdb_insert("moo", list(moo_id = 8, oink = "a"), extra = c("aardvark" = 99)), c(
"INSERT INTO moo (moo_id,oink,aardvark) VALUES (8,'a',99)",
"Returned: 1 ")),
"Insert single row, with extra data")
ok(cmp(mfdb_insert("moo", data.frame(moo_id = c(1,2,3), oink = c("x","y","z")), extra = c("aardvark" = 99)), c(
"INSERT INTO moo (moo_id,oink,aardvark) VALUES (1,'x',99),(2,'y',99),(3,'z',99)",
"Returned: 3 ")),
"Insert a data frame in one batch")
})
ok_group("mfdb_update", {
mfdb_update <- function (table_name, data_in, returning = "", extra = c(), where = c()) {
return(capture.output({
out <- mfdb:::mfdb_update(fake_mdb(), table_name, data_in, returning, extra, where)
cat("Returned:", out, "\n")
}))
}
ok(cmp(mfdb_update("moo", list(moo_id = 8, oink = "a", baa = 78)), c(
"UPDATE moo SET oink='a',baa=78 WHERE moo_id = 8",
"Returned: 1 ")),
"update single row")
ok(cmp(mfdb_update("moo", data.frame(moo_id = c(1,2,3), oink = c("x","y","z"), baa = 78)), c(
"UPDATE moo SET oink='x',baa=78 WHERE moo_id = 1",
"UPDATE moo SET oink='y',baa=78 WHERE moo_id = 2",
"UPDATE moo SET oink='z',baa=78 WHERE moo_id = 3",
"Returned: 3 ")),
"update multiple rows")
ok(cmp(mfdb_update("moo", list(moo_id = 8, oink = "a", baa = 78), extra = list(badger = 'mo'), where = list(case_study_id = 34)), c(
"UPDATE moo SET oink='a',baa=78,badger='mo' WHERE moo_id = 8 AND case_study_id=34",
"Returned: 1 ")),
"update single row, with extras and where")
})
ok_group("mfdb_disable_constraints", {
default_constraint_list <- function(table_name) data.frame(
name = rep(c("const1", "const2", "const3"), length(table_name)),
table_name = rep(table_name, each = 3),
definition = rep(c("a", "b", "c"), length(table_name)),
stringsAsFactors = FALSE)
disable_constraints <- function(table_name, code_block, am_owner = 1, constraint_list = default_constraint_list(table_name)) {
mdb <- fake_mdb()
mdb$ret_rows <- list(
"tableowner = current_user" = data.frame(count = am_owner),
"SELECT relname" = constraint_list
)
out <- capture.output(tryCatch({
out <- mfdb:::mfdb_disable_constraints(mdb, table_name, code_block)
cat("Returned:", out, "\n")
}, error = function (e) e$message))
if (am_owner == 0) {
ok(grepl("SELECT.*current_user", out[[1]]), "First query selects ownership")
return(out[2:length(out)])
}
ok(grepl("SELECT.*current_user", out[[1]]), "First query selects ownership")
ok(grepl("SELECT.*pg_get_constraintdef", out[[2]]), "Second query selects constraints")
return(out[3:length(out)])
}
ok(cmp(disable_constraints("tbl1", cat("executing code block\n")), c(
"ALTER TABLE fake_schema.tbl1 DROP CONSTRAINT const3",
"ALTER TABLE fake_schema.tbl1 DROP CONSTRAINT const2",
"ALTER TABLE fake_schema.tbl1 DROP CONSTRAINT const1",
"executing code block",
"ALTER TABLE fake_schema.tbl1 ADD CONSTRAINT const1 a",
"ALTER TABLE fake_schema.tbl1 ADD CONSTRAINT const2 b",
"ALTER TABLE fake_schema.tbl1 ADD CONSTRAINT const3 c",
"Returned: ")), "Removed and replaced constraints when successful")
ok(cmp(disable_constraints("tbl1", stop("Oh noes")), c(
"ALTER TABLE fake_schema.tbl1 DROP CONSTRAINT const3",
"ALTER TABLE fake_schema.tbl1 DROP CONSTRAINT const2",
"ALTER TABLE fake_schema.tbl1 DROP CONSTRAINT const1",
"ALTER TABLE fake_schema.tbl1 ADD CONSTRAINT const1 a",
"ALTER TABLE fake_schema.tbl1 ADD CONSTRAINT const2 b",
"ALTER TABLE fake_schema.tbl1 ADD CONSTRAINT const3 c",
"[1] \"Oh noes\"")), "Removed and replaced constraints when something went wrong")
ok(cmp(disable_constraints(c("tbl1", "tbl2"), stop("Oh noes")), c(
"ALTER TABLE fake_schema.tbl2 DROP CONSTRAINT const3",
"ALTER TABLE fake_schema.tbl2 DROP CONSTRAINT const2",
"ALTER TABLE fake_schema.tbl2 DROP CONSTRAINT const1",
"ALTER TABLE fake_schema.tbl1 DROP CONSTRAINT const3",
"ALTER TABLE fake_schema.tbl1 DROP CONSTRAINT const2",
"ALTER TABLE fake_schema.tbl1 DROP CONSTRAINT const1",
"ALTER TABLE fake_schema.tbl1 ADD CONSTRAINT const1 a",
"ALTER TABLE fake_schema.tbl1 ADD CONSTRAINT const2 b",
"ALTER TABLE fake_schema.tbl1 ADD CONSTRAINT const3 c",
"ALTER TABLE fake_schema.tbl2 ADD CONSTRAINT const1 a",
"ALTER TABLE fake_schema.tbl2 ADD CONSTRAINT const2 b",
"ALTER TABLE fake_schema.tbl2 ADD CONSTRAINT const3 c",
"[1] \"Oh noes\"")), "Removed and replaced constraints when something went wrong")
ok(cmp(disable_constraints("tbl1", cat("executing code block\n"), constraint_list = data.frame()), c(
"executing code block",
"Returned: ")), "Still works when no constraints exist")
out <- disable_constraints("tbl1", cat("executing code block\n"), am_owner = 0)
ok(cmp(disable_constraints("tbl1", cat("executing code block\n"), am_owner = 0), c(
"executing code block",
"Returned: ")), "Still executed code block")
})
ok_group("mfdb_table_exists", {
table_exists <- function(table_name, ret) {
mdb <- fake_mdb()
mdb$ret_rows <- ret
out <- capture.output(tryCatch({
out <- mfdb:::mfdb_table_exists(mdb, table_name)
cat("Returned:", out, "\n")
}, error = function (e) e$message))
return(out)
}
ok(cmp(table_exists("carol", ret = data.frame(count = 1)), c(
"SELECT COUNT(*) FROM information_schema.tables WHERE (table_schema IN ('fake_schema') OR table_schema = (SELECT nspname FROM pg_namespace WHERE oid = pg_my_temp_schema())) AND table_name IN ('carol')",
"Returned: TRUE ",
NULL)), "SQL looks good")
ok(cmp(table_exists("carol", ret = data.frame(count = 0)), c(
"SELECT COUNT(*) FROM information_schema.tables WHERE (table_schema IN ('fake_schema') OR table_schema = (SELECT nspname FROM pg_namespace WHERE oid = pg_my_temp_schema())) AND table_name IN ('carol')",
"Returned: FALSE ",
NULL)), "Can alter return value")
ok(cmp(table_exists(c("frank", "carol"), ret = data.frame(count = c(0, 1))), c(
"SELECT COUNT(*) FROM information_schema.tables WHERE (table_schema IN ('fake_schema') OR table_schema = (SELECT nspname FROM pg_namespace WHERE oid = pg_my_temp_schema())) AND table_name IN ('frank','carol')",
"Returned: FALSE TRUE ",
NULL)), "Vectorises")
})
|
ec13e20dc983f03ceaea5c66605b1a6ffcc83efd
|
87220c1f9a5cd789272fb950d47887a3a15ef0f5
|
/pySensorbase/client/client.R
|
fbd55f49cd9f7b2da8a62fc4ce7b60ec896faa73
|
[] |
no_license
|
nesl/splt
|
1f25c631536e0d639d5bf2acd6739c34a5bb35a0
|
4de9adc58ad38bd1e42545102c150403d2da72b1
|
refs/heads/master
| 2021-05-16T03:08:10.781402
| 2017-11-17T15:11:45
| 2017-11-17T15:11:45
| 12,226,748
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,656
|
r
|
client.R
|
client = function(rfidFile, pmFile, userID) {
# This is the client to make sense of the read data
# RFID file format
# Timestamp, Appliance ID, Tag ID
# PM file format
# Timestamp, Appliance ID, Watt
# Usage time
USAGE_TIME = 60
total_power = 0
# Read the data from the file
rfid_data = read.table(rfidFile, head = TRUE, sep = ',')
pm_data = read.table(pmFile, head = TRUE, sep = ',')
# Find all tag readings of this user
for(i in 1:length(rfid_data$tag_id)) {
if(userID == rfid_data$tag_id[i]) {
print('user used appliance')
print(rfid_data$app_id[i])
print('at time')
print(rfid_data$timestamp[i])
print('the amount of power used is')
# Find the power usage of the appliance
for(j in 1:length(pm_data$timestamp)) {
if((rfid_data$timestamp[i] == pm_data$timestamp[j])
&& (rfid_data$app_id[i] == pm_data$app_id[j])){
# idle_power is the minimum power seen in the 10 seconds around usage
idle_power = min(pm_data$watt[(j - 5) : (j + 5)])
integ = 0
# the appliance will be in use for usage_time or end of data stream
for(k in j:min(j + USAGE_TIME, length(pm_data$timestamp) - j)) {
# add to integral if greater than idle power
if(pm_data$watt[k] > idle_power)
integ = integ + pm_data$watt[k]
}
# Add to the total usage of the user
total_power = total_power + (integ / min(j + USAGE_TIME, length(pm_data$timestamp) - j))
}
}
}
}
# return the total power usage
print('total power = ')
total_power
}
|
0eeb7e7a8650cef707670482fe5bbf224fa2c0f8
|
3dbb408ab830a572260dd9c8f755d7ee00cdf89c
|
/day02/part1.R
|
f8b88ae2930dcf7c534251e78368a477476a2d95
|
[] |
no_license
|
ryanbthomas/adventofcode2020
|
32bf46be3b91479ccab69ae67d6cc95dbb2d6da6
|
2dfd227538c08e6d6fdf30c25b8ac5b6b72574c2
|
refs/heads/main
| 2023-04-09T08:50:51.176833
| 2021-04-23T19:36:58
| 2021-04-23T19:36:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 645
|
r
|
part1.R
|
input_file <- "day2/input/day2_real"
passwd <-readLines(input_file)
library(stringr)
library(purrr)
policy_passwd <- str_split(passwd, pattern = ":")
policies <- map_chr(policy_passwd, pluck, 1)
passwd <- map_chr(policy_passwd, pluck, 2)
chr <- str_split(policies, pattern = " ")
rng <- map_chr(chr, pluck, 1) %>%
str_split(pattern = "-") %>%
map(as.integer)
ltr <- map_chr(chr, pluck, 2)
cnts <- str_count(passwd, pattern = ltr)
valid_pwds <- rep(0L, length(rng))
for( i in seq_along(rng)) {
valid_pwds[i] <- as.integer(cnts[i] >= rng[[i]][1] && cnts[i] <= rng[[i]][2])
}
print(sum(valid_pwds))
|
1b7af165b1fe91011cd02641058b072c7a0efb25
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/CLVTools/R/pnbd_dyncov_LL_Bi.R
|
a53a975ec3573748a006b2865349f2f9bb688bb8
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,769
|
r
|
pnbd_dyncov_LL_Bi.R
|
.pnbd_dyncov_LL_Bi <- function(data.work.trans.aux, cbs.t.x, i){
t.x <- Aji <- adj.Walk1 <- d <- Id <- Aki <- Num.Walk <- adj.Max.Walk <- delta <- NULL
# because there are only AuxTrans there is exactly one single row for each customer
# -> there is no need for by=Id
#Also replacing Walk_i with Max.Walk was ommited by changing >=/< relations in Aki
results <- data.table(Id = data.work.trans.aux$Id, Aji=0, Aki=0)
data.work.trans.aux[, t.x:=cbs.t.x]
##Aji Part ------------------------------------------------------------------
if(i == 1 || i == 2)
{
#Aji only consists of Aj1
#no sum by needed as only AuxTrans are used
#No need to remove NAs as it will be removed when summing with Aki
results[, Aji:= data.work.trans.aux[, adj.Walk1 * d]]
}else{
middle.walks <- paste( "c(", paste0("adj.Walk", 2:(i-1), collapse = ","), ")")
results[, Aji:= data.work.trans.aux[, sum(adj.Walk1*d, eval(parse(text=middle.walks)), na.rm=T), by=Id]$V1]
}
# Aki Part ------------------------------------------------------------------
if(i==1){
# omit delta part
results[, Aki:= data.work.trans.aux[, adj.Walk1 * (-cbs.t.x - d)] ]
}else{
# include delta part
ind.num.se.i <- data.work.trans.aux[,.I[Num.Walk <= i]]
ind.num.g.i <- data.work.trans.aux[,.I[Num.Walk > i]]
results[ind.num.se.i, Aki:= data.work.trans.aux[ind.num.se.i, adj.Max.Walk
* (-cbs.t.x[ind.num.se.i] - d - delta*(Num.Walk-2))] ]
results[ind.num.g.i, Aki:= data.work.trans.aux[ind.num.g.i, get(paste0("adj.Walk", i))
* (-cbs.t.x[ind.num.g.i] - d - delta*(i-2))]]
}
return(results[, sum(Aki, Aji, na.rm=T), by=Id]$V1)
}
|
9e15ebaf5a8a438d362240e87bf034c837aa72b1
|
f834196d68e850df95f24bec0ed38ccf686b2e17
|
/man/anno_barplot.rd
|
8589c7229816ee8850c0d1b071133a0c7818e8b3
|
[] |
no_license
|
kassambara/ComplexHeatmap
|
141bfc3fb0a22619697d7a418c5eb262c5ddac96
|
ff9ffa810ca9f16a253861d97301062962dbe38b
|
refs/heads/master
| 2020-12-03T05:14:27.135170
| 2015-07-13T21:25:54
| 2015-07-13T21:25:54
| 39,065,677
| 1
| 2
| null | 2015-07-14T09:20:58
| 2015-07-14T09:20:58
| null |
UTF-8
|
R
| false
| false
| 882
|
rd
|
anno_barplot.rd
|
\name{anno_barplot}
\alias{anno_barplot}
\title{
Using barplot as annotation
}
\description{
Using barplot as annotation
}
\usage{
anno_barplot(x, which = c("column", "row"),
gp = gpar(fill = "#CCCCCC"), axis = FALSE, axis_side = NULL,
axis_gp = gpar(fontsize = 8), ...)}
\arguments{
\item{x}{a vector of values.}
\item{which}{is the annotation a column annotation or a row annotation?}
\item{gp}{graphic parameters.}
\item{axis}{whether add axis}
\item{axis_side}{value in "left", "right", "bottom" and "top"}
\item{axis_gp}{graphic parameters for axis}
\item{...}{for future use.}
}
\value{
A graphic function which can be set in \code{\link{HeatmapAnnotation}} constructor method.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
f = anno_barplot(rnorm(10))
grid.newpage(); f(1:10)
f = anno_barplot(rnorm(10), which = "row")
grid.newpage(); f(1:10)}
|
f7f32967fce750fc6cab577cc66f5dbe378f4b3a
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/FSAdata/R/WalleyeEL.R
|
563a704ec70c5c4e1ce6b4369781332a87bf1347
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,426
|
r
|
WalleyeEL.R
|
#' @title Stock and recruitment data for Walleye from Escanaba Lake, WI, 1958-1992.
#'
#' @description Abundance of age-0 and age-5 and older Walleye (\emph{Sander vitreus}), abundance of adult Yellow Perch (\emph{Perca flavescens}), and coefficient of variation of May temperatures for Escanaba Lake, WI, 1958-1992.
#'
#' @name WalleyeEL
#'
#' @docType data
#'
#' @format A data frame of 39 observations on the following 5 variables:
#' \describe{
#' \item{yrclass}{Year-class of the data}
#' \item{age0}{Abundance of age-0 Walleye (recruits)}
#' \item{age5}{Abundance of age-5 and older Walleye (stock)}
#' \item{maycv}{Coefficient of variation of May temperatures in birth year}
#' \item{yep}{Abundance of adult (larger than 152.4 mm) Yellow Perch}
#' }
#'
#' @section Topic(s):
#' \itemize{
#' \item Stock-Recruit
#' \item Recruitment
#' }
#'
#' @concept 'Stock-Recruit' Recruitment
#'
#' @source Hansen, M. J., M. A. Bozek, J. R. Newby, S. P. Newman, and M. D. Staggs. 1998. Factors affecting recruitment of walleyes in Escanaba Lake, Wisconsin, 1958-1995. North American Journal of Fisheries Management 18:764-774.
#'
#' @keywords datasets
#'
#' @examples
#' data(WalleyeEL)
#' str(WalleyeEL)
#' head(WalleyeEL)
#' op <- par(mfrow=c(1,2),pch=19)
#' plot(age0~yrclass,data=WalleyeEL,type="l")
#' plot(age0~age5,data=WalleyeEL)
#' par(op)
#'
NULL
|
27a49832cfef19d5bab1c186dd140b6b747c4808
|
a96e99a689291c49dd86d377f074ebadb2437fe9
|
/man/vno.Rd
|
705de40aaaa4b9c076c6b66380fe1e8ff7436ba4
|
[] |
no_license
|
cran/GPIC
|
9d6d0322fe75d055607569f381f8d4cdde87a293
|
f8d8ddaee313328db92cf88004f888042cbe850e
|
refs/heads/master
| 2023-03-18T05:29:13.277515
| 2021-03-01T08:00:13
| 2021-03-01T08:00:13
| 343,411,693
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 609
|
rd
|
vno.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vno-data.R
\docType{data}
\name{vno}
\alias{vno}
\title{Results of Vietnamese National Olympiads 2010-2020}
\format{
A data frame with 24151 rows and 5 variables:
\describe{
\item{ID}{student ID}
\item{Year}{year of award}
\item{Team}{administrative contest team that delegated the student}
\item{Subject}{test subject}
\item{Prize}{award achieved}
}
}
\source{
\doi{10.5281/zenodo.3764691}
}
\usage{
vno
}
\description{
A dataset containing the information of
more than 24,000 awarded students over 11 years.
}
\keyword{datasets}
|
d5473a285dfe0aa5230139ea54c0b3d9f1e1e8f5
|
bf2d49ee7650586b57f846faf3b8915dece85cc4
|
/best.r
|
e662e6cf572fcc1ae8316f11d4c33c9d66e45455
|
[] |
no_license
|
SnowCrash35/ProgrammingAssignment3
|
16adfeca86c2572b87799715edd3c9b62b5c0868
|
df9b455fca8e6fcf989b37123d6bd09acc88df3a
|
refs/heads/master
| 2020-06-27T05:07:35.273886
| 2017-07-13T23:46:27
| 2017-07-13T23:46:27
| 97,048,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,671
|
r
|
best.r
|
best <- function(state, outcome)
{
# read outcome data from file
# ===========================
# Column 2: Hospital Name
# Column 7: State
# Column 11: 30-day death rate for heart attack
# Column 17: 30-day death rate for heart failure
# Column 23: 30-day death rate for Pneumonia
#print("reading data from file...")
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# check that input arguments are valid
# ======================================
states <- unique(data$State)
if( !(state %in% states) )
{
stop("invalid state")
}
if(outcome == "heart attack")
{ f <- 11 }
else if(outcome == "heart failure")
{ f <- 17 }
else if(outcome == "pneumonia")
{ f <- 23 }
else
{
stop("invalid outcome")
}
# return hospital name in that state with lowest 30-day death rate
# ================================================================
# remove columns we are not intersted with
z <- data[data$State == state, c(2, 7, f)]
# coerce death rate data to numeric type
z[, 3] <- as.numeric(z[, 3])
# omit NA values
z <- na.omit(z)
# find the MIN death rate and return the respective Hospital Name
z <- z[ z[,3] == min(z[,3]), ]
z$Hospital.Name
}
testbest <- function()
{
print(best("TX", "heart attack"))
print(best("TX", "heart failure"))
print(best("MD", "heart attack"))
print(best("MD", "pneumonia"))
print(best("BB", "heart attack"))
print(best("NY", "hert attack"))
}
|
c6e903ff2b11166efd2cd2aabdfae3290d1e1c9f
|
2492c8dee590ff43db2189ddca8c68ac32d49bdf
|
/TSExperiment/R/boxtest.R
|
9b4e407abc0c2b769364a2ad947a926690518c29
|
[
"MIT"
] |
permissive
|
dfreestone/TSLibrary
|
1848fd0f7cc601bee816dd377742b0901ab3045f
|
5b864d23e2c26da1745fc039b5c0358dbda7e69a
|
refs/heads/master
| 2023-03-08T12:07:22.163911
| 2020-04-08T21:21:52
| 2020-04-08T21:21:52
| 83,917,970
| 0
| 0
|
MIT
| 2022-12-12T10:27:35
| 2017-03-04T19:10:41
|
HTML
|
UTF-8
|
R
| false
| false
| 13,298
|
r
|
boxtest.R
|
# boxtest
# perform standard box tests and email the results
#
# Author(s) : David Freestone (freestoned@wpunj.edu)
# Date : 2017-05-28
#
# This code is covered under the MIT license
# Copyright (c) , David M. Freestone
# All rights reserved.
#
# -------------------------------------------------------------------- #
# Statistics from h experiment (19,501,302 data points over 220 days) #
# -------------------------------------------------------------------- #
#
# Amount of food:
# |mean |sd |median |iqr
# |2.87 |1.16 |3 |1.02
#
# Left detection latency
#
# |mean |sd |median |iqr
# |30.35 |694.47 |0.47 |0.43
#
# Right detection latency
#
# |mean |sd |median |iqr
# |98.81 |1549.66 |0.51 |0.2
#
# Number of blocked deliveries
# |mean |sd |median |iqr
# |12.54 |37.49 |2 |10
#
# TODO(David): Duration of blocked pellets
#
# The number of discrepencies between the on and offs
# |mean |sd |median |iqr | max | min
# |0.18 |0.50 |0 |0 | 3 | -1
# TODO(David): Break this up into functions so we can ouput sections
# to the TSBrowser
#' Perform a boxtest
#'
#' @return NULL
#' @importFrom magrittr %>%
#' @export
#' @examples
Boxtest <- function(){
needs(tidyverse, ggplot2, TSLib)
dropbox = DropBoxPaths()$LocalActiveExperimentPath
figure_path = file.path(dropbox, "output")
output_filename = file.path(dropbox, "output", "output.txt")
# Thresholds for the boxtest
# TODO(David): Move this to the caller or to a file?
anchor_event = "Off_Daytime"
anchor_time = c(20, 0)
# Values chosen based on the "h" experiment (see above)
min_food_per_day = 2.8 # grams
median_latency_tolerance = 0.25 # proportion
num_blocked_tolerance = 12 # count (mean of "h" experiment because median is 2)
maximum_time_without_data = 6 # hours
maximum_number_of_on_off_discrepencies = 1 # count
start_time = Sys.time()
sink(file = output_filename)
cat("---------------------\n")
cat("Box checks started at", format(start_time), "\n")
cat("---------------------\n")
sink()
# ---------------- #
# Read Data #
# ---------------- #
# DEBUG with known sessions (in case there aren't 0.999 files)
# files = "/Users/dmf025/Dropbox/lab/experiments/active/k_time_vs_magnitude/data/mpc/*.042"
# file = "/Users/dmf025/Dropbox/lab/experiments/system/mouse_eventcodes.csv"
# data = Sys.glob(files) %>%
# mpc_load_files() %>%
# mpc_tidy(file=file)
data = ReadActiveExperimentFiles()
recent_date = max(data$date)
# ---------------- #
# Analysis #
# ---------------- #
## Summary information about the day ##
summary = data %>%
group_by(subject, date) %>%
summarize(time = max(time)/3600,
nDaytime = sum(event=="On_Daytime", na.rm=TRUE),
nNighttime = sum(event=="Off_Daytime", na.rm=TRUE))
## Summary information about the feeding periods ##
summary_feedingperiods = data %>%
group_by(subject, date) %>%
mutate(feeding = feeding_periods(event)) %>%
filter(feeding>0) %>%
mutate(feeding = cumsum(feeding != lag(feeding, default=0))) %>%
group_by(subject, date, feeding) %>%
summarize(hasVariables = any(event=="Variable")) %>%
group_by(subject, date) %>%
summarize(nFeedingPeriods = length(unique(feeding)),
AllFeedingPeriodsHaveVariables = nFeedingPeriods == sum(hasVariables))
# Summary information about the amount of food
summary_foodamount = data %>%
group_by(subject, date) %>%
summarize(food = sum(event %in% c("On_Left_Feeder", "On_Right_Feeder")),
amount = 0.02 * food)
## Summary information about the detection latencies ##
df = data %>%
group_by(subject, date) %>%
mutate(left = trialdef(event, c("On_Left_Feeder", "On_Left_Pellet"), fromfirst=TRUE),
right = trialdef(event, c("On_Right_Feeder", "On_Right_Pellet"), fromfirst=TRUE))
left_latencies = df %>%
group_by(subject, date, left) %>%
mutate(time = time - time[1]) %>%
slice(n()) %>%
filter(left>0) %>%
arrange(subject, date, time) %>%
group_by(subject) %>%
mutate(p = (1:n()) / n(),
mdn = median(time))
right_latencies = df %>%
group_by(subject, date, right) %>%
mutate(time = time - time[1]) %>%
slice(n()) %>%
filter(right>0) %>%
arrange(subject, date, time) %>%
group_by(subject) %>%
mutate(p = (1:n()) / n(),
mdn = median(time))
df1 = left_latencies %>%
group_by(subject, date) %>%
summarize(left_n = n(),
left_mdn = mdn[1])
df2 = right_latencies %>%
group_by(subject, date) %>%
summarize(right_n = n(),
right_mdn = mdn[1])
summary_latencies = left_join(df1, df2, by=c("subject", "date"))
rm(df, df1, df2)
## Summary information about the blocked deliveries ##
blocked_deliveries = data %>%
group_by(subject, date) %>%
mutate(left = trialdef(event, c("On_Left_Pellet", "On_Left_Feeder", "Off_Left_Pellet")),
right = trialdef(event, c("On_Right_Pellet", "On_Right_Feeder", "Off_Right_Pellet"))) %>%
filter(event %in% c("On_Left_Feeder", "On_Right_Feeder"))
summary_blocked = blocked_deliveries %>%
group_by(subject, date) %>%
summarize(amount = sum(left>0) + sum(right>0))
## Get the actual wall time of the last event ##
summary_lastevent = data %>%
group_by(subject, date) %>%
mutate(timeofday = time_of_day(date, time, event, anchor_time, anchor_event)) %>%
slice(n())
## Diagnostics for on-off pairs ##
summary_discrepencies= data %>%
group_by(subject, date) %>%
do(diagnostics(.$event))
# -------------------------- #
# Output checks to text file #
# -------------------------- #
sink(file = output_filename, append=TRUE)
cat(sprintf("[%s]: All event times are less than 24 hours\n",
ifelse(all(summary$time<24), "OK", "FAILED")))
cat(sprintf("[%s]: Only 1 daytime per day\n",
ifelse(all(summary$nDaytime==1), "OK", "FAILED")))
cat(sprintf("[%s]: Only 1 nighttime per day\n",
ifelse(all(summary$nNighttime==1), "OK", "FAILED")))
cat(sprintf("[%s]: Variable found per feeding phase\n",
ifelse(all(summary_feedingperiods$AllFeedingPeriodsHaveVariables), "OK", "FAILED")))
cat(sprintf("[%s]: All Ons and Offs are consistent (within %d).\n",
ifelse(all(abs(summary_discrepencies$difference)<=maximum_number_of_on_off_discrepencies),
"OK", "FAILED"), maximum_number_of_on_off_discrepencies))
cat(sprintf("[%s]: All animals received more than %2.1f grams of food\n",
ifelse(all(summary_foodamount$amount>min_food_per_day), "OK", "FAILED"),
min_food_per_day))
cat(sprintf("[%s]: Food detection latencies are normal (within %1.2f from 0.5)\n",
ifelse(all(abs(summary_latencies$left_mdn - 0.5)<median_latency_tolerance
& abs(summary_latencies$right_mdn - 0.5)<median_latency_tolerance),
"OK", "FAILED"), median_latency_tolerance))
cat(sprintf("[%s]: Blocked food deliveries are normal (under %d)\n",
ifelse(all(summary_blocked$amount <= num_blocked_tolerance), "OK", "FAILED"),
num_blocked_tolerance))
cat(sprintf("[%s]: Last event was more recent than %s hours\n",
ifelse(all(difftime(force_tz(start_time, "UTC"), summary_lastevent$timeofday, units="hours") < maximum_time_without_data),
"OK", "FAILED"), maximum_time_without_data))
sink()
all_tests_passed = all(all(summary$time<24),
all(summary$nDaytime==1),
all(summary$nNighttime==1),
all(summary_feedingperiods$AllFeedingPeriodsHaveVariables),
all(summary$nNighttime==1), # TODO(David): Fix this one
all(summary_foodamount$amount>min_food_per_day),
all(abs(summary_latencies$left_mdn - 0.5)<median_latency_tolerance & abs(summary_latencies$right_mdn - 0.5)<median_latency_tolerance),
all(difftime(force_tz(start_time, "UTC"), summary_lastevent$timeofday, units="hours") < maximum_time_without_data),
all(abs(summary_discrepencies$difference)<=maximum_number_of_on_off_discrepencies))
# ---------------- #
# Figures #
# ---------------- #
## Food amounts ##
ggplot(summary_foodamount) +
labs(x="subject", y="food amount", title=paste0("date: ", recent_date)) +
theme(text = element_text(size=18)) +
geom_bar(aes(x=subject, y=amount), stat="identity") +
geom_hline(yintercept=3, size=1.2, linetype="dashed") +
ggsave(file.path(figure_path, "food_amounts.pdf"),
dpi=600, height=6, width=6, units="in", device=cairo_pdf)
feeding_periods = data %>%
group_by(subject, date) %>%
filter(event %in% c("On_FeedingPeriod1", "On_FeedingPeriod2",
"Off_FeedingPeriod1", "Off_FeedingPeriod2"))
data %>%
group_by(subject, date) %>%
filter(event %in% c("On_Left_Feeder", "On_Right_Feeder")) %>%
mutate(amount = 0.02 * (1:n())) %>%
ggplot() +
theme(text = element_text(size=18)) +
labs(x="time (in hours)", y="food amount", title=paste0("date: ", recent_date)) +
coord_cartesian(xlim=c(0, 24)) +
geom_vline(aes(xintercept=time/3600), size=0.5, linetype="dashed", data=filter(feeding_periods, startsWith(as.character(event), "On_"))) +
geom_vline(aes(xintercept=time/3600), size=0.5, linetype="dashed", color="#ff7ca3", data=filter(feeding_periods, startsWith(as.character(event), "Off_"))) +
geom_hline(yintercept=min_food_per_day, size=0.75, color="gray70") +
geom_line(aes(x=time/3600, y=amount), size=1.2, color="#70a6ff") +
facet_wrap(~subject) +
ggsave(file.path(figure_path, "food_amounts_cumulative.pdf"),
dpi=600, height=9, width=9, units="in", device=cairo_pdf)
## Blocked deliveries ##
ggplot(summary_blocked) +
labs(x="subject", y="blocked deliveries", title=paste0("date: ", recent_date)) +
theme(text = element_text(size=18)) +
geom_hline(yintercept=num_blocked_tolerance, size=0.75, color="gray70") +
geom_bar(aes(x=subject, y=amount), stat="identity") +
ggsave(file.path(figure_path, "blocked_deliveries.pdf"),
dpi=600, height=6, width=6, units="in", device=cairo_pdf)
df = blocked_deliveries %>%
group_by(subject, date) %>%
mutate(left = cumsum(left>0),
right = cumsum(right>0))
ggplot(df) +
theme(text = element_text(size=18)) +
labs(x="time (in hours)", y="blocked deliveries", title=paste0("date: ", df$date[1])) +
coord_cartesian(xlim=c(0, 24)) +
geom_vline(aes(xintercept=time/3600), size=0.5, linetype="dashed", data=filter(feeding_periods, startsWith(as.character(event), "On_"))) +
geom_vline(aes(xintercept=time/3600), size=0.5, linetype="dashed", color="#ff7ca3", data=filter(feeding_periods, startsWith(as.character(event), "Off_"))) +
geom_line(aes(x=time/3600, y=left), size=1.2, color="#84c3ff", data=filter(df, left>0)) +
geom_line(aes(x=time/3600, y=right), size=1.2, color="#ff7ff6", data=filter(df, right>0)) +
facet_wrap(~subject) +
ggsave(file.path(figure_path, "blocked_deliveries_cumulative.pdf"),
dpi=600, height=9, width=9, units="in", device=cairo_pdf)
rm(df)
## Detection latencies ##
ggplot(summary_latencies) +
theme(text = element_text(size=18)) +
labs(x="detection latency", y="cumulative fraction", title=paste0("date: ", recent_date)) +
coord_cartesian(ylim=c(0, 1)) +
geom_hline(yintercept=0.5, size=0.5, linetype="dashed") +
geom_jitter(aes(x=subject, y=time, group=subject), shape=19, alpha=1/2, color="#84c3ff", width=0.1, data=left_latencies) +
geom_jitter(aes(x=subject, y=time, group=subject), shape=19, alpha=1/2, color="#ff7ff6", width=0.1, data=right_latencies) +
geom_point(aes(x=subject, y=mdn, group=subject), size=4, shape=19, alpha=1/2, color="#41a1fc", data=left_latencies) +
geom_point(aes(x=subject, y=mdn, group=subject), size=4, shape=19, alpha=1/2, color="#ff42f1", data=right_latencies) +
ggsave(file.path(figure_path, "detection_latencies.pdf"),
dpi=600, height=9, width=9, units="in", device=cairo_pdf)
# ---------------- #
# Clean up #
# ---------------- #
sink(file = output_filename, append=TRUE)
cat("\nBoxtest figures written to disk.\n")
cat(sprintf("Box checks finished at %s [%s seconds]\n", format(Sys.time()),
round(Sys.time() - start_time, digits=3)))
sink()
SendMail(sprintf("Boxtest results: %s", ifelse(all_tests_passed, "OK.", "FAILED.")),
body=sprintf(paste(readLines(output_filename, encoding="UTF-8"), collapse="\n")))
# TODO(David): Get the attachements to work
# attachments=c(file.path(figure_path, "detection_latencies.pdf"),
# file.path(figure_path, "blocked_deliveries.pdf"),
# file.path(figure_path, "food_amounts.pdf")))
}
|
bcdbbf5b2430f0544ef057fa4ef95adce8fcb782
|
f2d3a834eb614c444e4c4d2f863577e804d9fb70
|
/man/scale_01.Rd
|
9b3d062f9575daa35bd9a26e09f4a17651061a96
|
[] |
no_license
|
David-Hervas/clickR
|
150669cc67575659258e2bb44f429544e52e809c
|
cb738e505375376d91ac37eb01813ac3fb0e1432
|
refs/heads/master
| 2023-08-14T05:06:15.095067
| 2023-08-07T17:01:53
| 2023-08-07T17:01:53
| 90,495,146
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 283
|
rd
|
scale_01.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/descriptive.R
\name{scale_01}
\alias{scale_01}
\title{Scales data between 0 and 1}
\usage{
scale_01(x)
}
\arguments{
\item{x}{A numeric variable}
}
\value{
Scaled data
}
\description{
Escale data to 0-1
}
|
25c4caf73cbcf7069e997c4bf7b30c79c941c954
|
13895420920703501ab66c28a3927089a2de042e
|
/R/ICLUST.diagram.R
|
d0b9cd5d3f60f59f1dacfb77e1386366d2487a76
|
[] |
no_license
|
cran/psych
|
3349b3d562221bb8284c45a3cdd239f54c0348a7
|
ee72f0cc2aa7c85a844e3ef63c8629096f22c35d
|
refs/heads/master
| 2023-07-06T08:33:13.414758
| 2023-06-21T15:50:02
| 2023-06-21T15:50:02
| 17,698,795
| 43
| 42
| null | 2023-06-29T05:31:57
| 2014-03-13T05:54:20
|
R
|
UTF-8
|
R
| false
| false
| 15,658
|
r
|
ICLUST.diagram.R
|
#modified 6/6/20 to vectorize the labels and rectangles.
#modified 8/19/22 to make labels characters (which they are normally)
"iclust.diagram" <-
function(ic,labels=NULL,short=FALSE,digits=2,cex=NULL,min.size=NULL,e.size=1,colors=c("black","blue"), main="ICLUST diagram",cluster.names = NULL,marg=c(.5,.5,1.5,.5),plot=TRUE,bottomup=TRUE) {
old.par<- par(mar=marg) #give the window some narrower margins
on.exit(par(old.par)) #set them back
clusters <- ic$results #the main table from ICLUST
num <- nrow(clusters)
num.var <- num+1
if(is.null(cex)) cex <- min(16/num.var,1)
if (is.null(labels)) {
var.labels <- rownames(ic$loadings)} else {var.labels=labels}
if (short) {var.labels <- paste("V",1:num.var,sep="")}
if(is.null(var.labels)) {var.labels <- paste("V",1:num.var,sep="")}
var.labels <- as.character(var.labels) #added 8/19/22
fixed <- fix.names(ic,var.labels)
clusters <- fixed$ic$results
max.len <- max(nchar((var.labels)))
if(is.null(cluster.names)) cluster.names <- rownames(clusters) #added Sept 2, 2012
names(cluster.names) <- rownames(clusters)
length.labels <- max(max.len* .15 * cex,.25*cex)
##
nc <- length(ic$size)
nvar <- sum(ic$size)
last <- dim(clusters)[1]
max.size <- max(ic$size)
#limx <- c(-length.labels,nvar+2) #for long names and not many variables this is ugly
if(nvar < 12) {limx <- c(-max.len*.08 * cex,nvar+2)} else {limx <- c(-length.labels,nvar+2)} #for long names and not many variables this is ugly
limy <- c(0,nvar+1)
if(nvar < 12) e.size <- e.size * .7 #this is a kludge to make small problems look better
if(is.null(min.size)) min.size <- .1 * nvar
if(plot) {plot(0,type="n",xlim=limx,ylim=limy,frame.plot=FALSE,axes=FALSE,ylab="",xlab="",main=main)
new.max.len <- max(strwidth(var.labels,units="user"))} else {new.max.len =10}
if (new.max.len > max.len) {limx <- c(-new.max.len/2,nvar+2)
if(plot) plot(0,type="n",xlim=limx,ylim=limy,frame.plot=FALSE,axes=FALSE,ylab="",xlab="",main=main)}
top <- num.var
done <- 0
rect.list <- list()
arrow.list <- list()
cluster.list <- list()
if (nc==1) {head <- num
size <- num.var
y.loc <- clusters[head,"size2"]
v.loc <- down(clusters,head,size,y.loc,old.head= NULL,old.loc=NULL,min.size=min.size,e.size=e.size,digits=digits,cex=cex,limx=limx,limy=limy,colors=colors,cluster.names=cluster.names,rect.list=rect.list,arrow.list=arrow.list,cluster.list=cluster.list,bottomup=bottomup)
rect.list <- c(rect.list$rect.list,v.loc$rect.list)
cluster.list <- v.loc$cluster.list
arrow.list <- v.loc$arrow.list } else {
#the multiple cluster case
for(clust in 1:nc) {
#size <- ic$size[clust]
size <- sum(abs(ic$clusters[,clust]))
if (substr(colnames(ic$clusters)[clust],1,1)=="C") {
#head <- which(rownames(clusters)==names(ic$size[clust]))
head <- which(rownames(clusters)==colnames(ic$clusters)[clust])
cluster <- clusters[head,]
y.loc <- clusters[head,"size2"] + done
v.loc <- down(clusters,head,size,y.loc,old.head= NULL,old.loc=NULL,min.size=min.size,e.size=e.size,digits=digits,cex=cex,limx=limx,limy=limy,colors=colors,cluster.names = cluster.names,rect.list=rect.list,arrow.list=arrow.list,cluster.list=cluster.list,bottomup=bottomup)
rect.list <- v.loc$rect.list
cluster.list <- v.loc$cluster.list
arrow.list <- v.loc$arrow.list
} else {v.name <- names(which(ic$clusters[,clust] ==1)) #the case of a non-clustered variable
v.loc <- dia.rect(0,done+.5,v.name,xlim=limx,ylim=limy,cex=cex,draw=FALSE)
rect.list <- c(rect.list,v.loc,v.name)
}
done <- done + size
}
}
#we have gathered the variables, the clusters and the arrows, now show them
rect.mat <- matrix(unlist(rect.list),ncol=12,byrow=TRUE)
rect.df <- as.data.frame(rect.mat,stringsAsFactors=FALSE)
colnames(rect.df ) <- c("left","y","right","right.y","topx","topy", "xbott","botty","centerx","centery","radius","lab")
if(plot) {
text(as.numeric(rect.df$centerx),as.numeric(rect.df$centery),rect.df$lab,cex=cex)
rect(as.numeric(rect.df$left),as.numeric(rect.df$botty),as.numeric(rect.df$right),as.numeric(rect.df$topy))
cluster.mat <- matrix(unlist(cluster.list),ncol=15,byrow=TRUE)
cluster.df <- data.frame(cluster.mat,stringsAsFactors=FALSE)
cluster.df[c(1:12,14:15)] <- nchar2numeric(cluster.df[c(1:12,14:15)])
colnames(cluster.df ) <- c("left","yl","right","yr","topx","topy","xbott","botty","centerx","centery","link","radius","lab","alpha","beta")
rownames(cluster.df) <- cluster.df$lab
dia.cluster1(cluster.df,cex=cex,e.size=e.size, digits=digits)
arrow.mat <- matrix(unlist(arrow.list),ncol=21,byrow=TRUE)
arrow.df <- data.frame(arrow.mat,stringsAsFactors=FALSE)
arrow.df[c(1:19,21)] <- nchar2numeric(arrow.df[c(1:19,21)])
tv <- arrow.df
text(tv[,1],tv[,2],tv[,3],cex=tv[,5])
arrows(x0=tv[,6],y0=tv[,7],x1=tv[,8],y1=tv[,9],length=tv[1,10],angle=tv[1,11],code=1,col=tv[,20],lty=tv[,21])
arrows(x0=tv[,13],y0=tv[,14],x1=tv[,15],y1=tv[,16],length=tv[1,17],angle=tv[1,18],code=2,col=tv[,20],lty=tv[,21])
} #end of plot
sorted.order <- psychTools::dfOrder(data.frame(y=as.numeric(rect.df[,"y"]), lab= rect.df[,"lab"]),ascending=TRUE)[,"lab"]
invisible(sorted.order)
} #end of iclust.diagram
fix.names <- function(ic,var.labels) {
var.names <- ic$results[,c(1:2)]
max.len <- 0
vn <- dim(var.names)[1]
for(i in 1:vn) {
vname <- sub("V","",var.names[i,1])
suppressWarnings(vname <- as.numeric(vname) )
if(!is.na(vname) & (vname < 1)) vname <- NA
if(!is.na(vname)) {var.names[i,1] <- var.labels[vname]
if(max.len < nchar(var.labels[vname])) max.len <- nchar(var.labels[vname]) }
vname <- sub("V","",var.names[i,2])
suppressWarnings(vname <- as.numeric(vname) )
if(!is.na(vname) & (vname < 1)) vname <- NA
if(!is.na(vname)) {var.names[i,2] <- var.labels[vname]
if(max.len < nchar(var.labels[vname])) max.len <- nchar(var.labels[vname]) }
}
ic$results[,c(1:2)] <- var.names
return(list(ic=ic,max.len=max.len))
}
"dia.cluster" <-
function(x, y = NULL, cluster, link=NA, digits=2,cex = cex,e.size=.6,xlim=c(0,1),ylim=c(0,1),small=FALSE,cluster.names,draw=FALSE) {
if(draw) {
if(!small){
text(x,y, (cluster.names[rownames(cluster)]),pos=3,cex=cex)
text(x,y, substitute(list(alpha) == list(a),list(a=round(cluster[1,"alpha"],digits))),cex=cex)
text(x,y, substitute(list(beta) == list(b), list(b=round(cluster[1,"beta"],digits))),cex=cex,pos=1)
xs <- dia.ellipse1(x,y,xlim=xlim,ylim=ylim,e.size=e.size,draw=draw )} else { text(x,y, (cluster.names[rownames(cluster)]),cex=cex)
xs <- dia.ellipse1(x,y,xlim=xlim,ylim=ylim,e.size=e.size *.75) }
}
#just save the information for later drawing
if (!draw) {
xs <- dia.ellipse1(x,y,xlim=xlim,ylim=ylim,e.size=e.size,draw=FALSE )
if(small) {clust.info <- list(cluster= rownames(cluster),alpha=NA, beta = NA)} else {
clust.info <- list(cluster= rownames(cluster),alpha=round(cluster[1,"alpha"],digits),
beta = round(cluster[1,"beta"],digits))
}
}
vert <- cex*.3
left <- c(x-xs,y)
right <- c(x+xs,y)
top <- c(x,y+xs)
bottom <- c(x,y-xs)
center <- c(x,y)
dia.cluster <- list(left=left,right=right,top=top,bottom=bottom,center=center,link=link,radius=xs , clust.info)
}
#June 27, 2020 revised to allow for faster drawing
#this draws all the clusters at once
#still a little slow, but better than before
# By putting NA at the end of every unit.circle, we can draw multiple circles rapidly
#modified 8/21/22 to include "beta"
"dia.cluster1" <-
function(cluster.df,digits=2,cex = cex,e.size=.6,xlim=c(0,1),ylim=c(0,1)) {
big <- cluster.df[!is.na(cluster.df[,"alpha"]),]
x.big <- big[,"centerx"]
y.big <- big[,"centery"]
small <- cluster.df[is.na(cluster.df[,"alpha"]),]
x <- cluster.df[,"centerx"]
y <- cluster.df[,"centery"]
text(x.big,y.big, rownames(big),pos=3,cex=cex) #all clusters have names
#these next two lines just report the first values
temp.alpha <- substitute(list(alpha) == "")
temp.beta <- substitute(list(beta) == "")
text(x.big,y.big,temp.alpha,cex=cex,adj=1) #this shows the symbol alpha to the left
text(x.big,y.big,temp.beta, cex=cex,adj=c(1,1.5)) #below and to the left
text(x.big,y.big,round(big[,"alpha"],digits),offset=1,adj=0,cex=cex)
# text(x.big,y.big,round(big[,"beta"],digits),cex=cex,pos=1)
#text(x.big,y.big,round(big[,"beta"],digits),cex=cex,pos=1)
text(x.big,y.big,round(big[,"beta"],digits),cex=cex,adj=c(0,1.7))
#text(x.big,y.big, substitute(list(alpha) == list(a),list(a=round(big[,"alpha"],digits))),cex=cex)
#text(x.big,y.big, substitute(list(beta) == list(b), list(b=round(big[,"beta"],digits))),cex=cex,pos=1)
# temp.n <- NROW(big)
# for(i in 1:temp.n) {text(x.big[i],y.big[i],substitute(list(alpha) == list(a),list(a=round(big[i,"alpha"],digits))),cex=cex)
# text(x.big[i],y.big[i],substitute(list(beta) == list(a),list(a=round(big[i,"beta"],digits))),cex=cex,pos=1) }
#do the geometric work just once
segments = 51
angles <- c((0:segments) * 2 * pi/segments,NA)
unit.circle <- cbind(cos(angles), sin(angles))
#this will break
xrange = (xlim[2] - xlim[1])
yrange = (ylim[2] - ylim[1])
xs <- e.size * xrange
#store the values for drawing
ellipsex <- rep(x.big,each=(segments + 2)) + unit.circle[,1] * xs
ellipsey <- rep(y.big,each=(segments + 2)) + unit.circle[,2] *xs
lines(ellipsex,ellipsey)
if(NROW(small)>0) {
x.small <- small[,"centerx"]
y.small <- small[,"centery"]
text(x.small,y.small, rownames(small),cex=cex)
nc <- NROW(small)
ellipsex <- rep(x.small,each=(segments + 2)) + unit.circle[,1] *xs * .75
ellipsey <- rep(y.small,each=(segments + 2)) + unit.circle[,2] *xs * .75
lines(ellipsex,ellipsey)
}
}
#down is a recursive function that draws the complete cluster structure
"down" <-
function(clusters,head,x,y,sign.clust=1,old.head = NULL,old.loc=NULL,digits,cex,limx,limy,min.size=1,e.size=.6,color.lines=TRUE,colors=c("black","blue"),cluster.names,rect.list,arrow.list,cluster.list,bottomup) {
a.loc <- NULL
shift <- 2
size <- clusters[head,"size"]
cluster <- clusters[head,]
if(is.null(old.loc)) {link <- NA} else {link <- old.head} #remember the cluster that spawned this cluster
if(size > min.size) {c.loc <- dia.cluster(head+shift,y,cluster,link=link,digits=digits,cex=cex,e.size=e.size,cluster.names=cluster.names)
cluster.list <- c(cluster.list,c.loc) } else {c.loc <- dia.cluster(head+2,y,cluster,link=link,digits=digits,cex=cex,e.size=e.size*.6,small=TRUE,cluster.names=cluster.names)
cluster.list <- c(cluster.list,c.loc) }
if(!is.null(old.loc)) {
if(old.loc$top[2] < c.loc$top[2]) {labels <- round(clusters[c.loc$link,"r1"],digits) } else { labels <- round(clusters[c.loc$link,"r2"],digits)}
sign.clust <- sign(labels)
if(old.loc$left[1] < c.loc$right[1]) {
if(old.loc$left[2] < c.loc$right[2]) {
sign.clust <- sign(labels)
if(bottomup) {a.loc <- dia.arrow(c.loc,old.loc,labels=labels,cex=cex,col=colors[((sign.clust < 0)+1)],lty=(sign.clust < 0)+1,draw=FALSE)} else {
a.loc <- dia.arrow(old.loc,c.loc,labels=labels,cex=cex,col=colors[((sign.clust < 0)+1)],lty=(sign.clust < 0)+1,draw=FALSE)}} else {
if(bottomup) { a.loc <- dia.arrow(c.loc,old.loc,labels=labels,cex=cex,col=colors[((sign.clust <0)+1)],lty=((sign.clust)<0)+1,draw=FALSE)} else {
a.loc <- dia.arrow(old.loc,c.loc,labels=labels,cex=cex,col=colors[((sign.clust <0)+1)],lty=((sign.clust)<0)+1,draw=FALSE)}}} else {
if(bottomup){a.loc <- dia.arrow(c.loc,old.loc,labels=labels,cex=cex,col=colors[((sign(labels)<0)+1)],lty=((sign(labels)<0)+1),draw=FALSE) } else {
a.loc <- dia.arrow(old.loc,c.loc,labels=labels,cex=cex,col=colors[((sign(labels)<0)+1)],lty=((sign(labels)<0)+1),draw=FALSE)}}}
size1 <- clusters[head,"size1"]
size2 <- clusters[head,"size2"]
arrow.list <- c(arrow.list,a.loc)
if(size1==1) {
v.loc <- dia.rect(0,y+.5,clusters[head,1],xlim=limx,ylim=limy,cex=cex,draw=FALSE)
rect.list <- c(rect.list,v.loc,clusters[head,1])
#sign.clust <- sign.clust *sign(cluster["r1"])
sign.clust <- sign(cluster["r1"])
if(bottomup) {a.loc <- dia.arrow(v.loc,c.loc,round(cluster["r1"],digits),cex=cex,col=colors[((sign.clust)<0) +1],lty=((sign.clust) <0)+ 1,draw=FALSE)} else {
a.loc <- dia.arrow(c.loc,v.loc,round(cluster["r1"],digits),cex=cex,col=colors[((sign.clust)<0) +1],lty=((sign.clust) <0)+ 1,draw=FALSE) }
arrow.list <- c(arrow.list,a.loc) } else {
head1 <- which(rownames(clusters)== clusters[head,1])
cluster <- clusters[head1,] #get ready to go down the tree
y.shift <- clusters[head1,"size2"]
v.loc <- down(clusters,head1,x,y+y.shift,sign.clust,old.head=head,old.loc = c.loc,min.size=min.size,e.size=e.size,digits=digits,cex=cex,limx=limx,limy=limy,colors=colors,cluster.names=cluster.names,rect.list=rect.list,arrow.list=arrow.list,cluster.list=cluster.list,bottomup=bottomup)
rect.list <- v.loc$rect.list
cluster.list <- v.loc$cluster.list
arrow.list <- v.loc$arrow.list
}
if(size2==1) {
v.loc <- dia.rect(0,y-.5,clusters[head,2],xlim=limx,ylim=limy,cex=cex,draw=FALSE)
rect.list <- c(rect.list,v.loc,clusters[head,2])
sign.clust <- sign(clusters[head,"r2"])
#sign.clust <- sign(clusters[head,"r2"])
if(bottomup) {a.loc <- dia.arrow(v.loc,c.loc,labels = round(clusters[head,"r2"],digits),cex=cex,col=colors[((sign.clust)<0) +1],lty=((sign.clust)<0) + 1, draw=FALSE) } else {
a.loc <- dia.arrow(c.loc,v.loc,labels = round(clusters[head,"r2"],digits),cex=cex,col=colors[((sign.clust)<0) +1],lty=((sign.clust)<0) + 1, draw=FALSE)}
arrow.list <- c(arrow.list,a.loc)
} else {
old.head <- head
head <- which(rownames(clusters)== clusters[head,2])
cluster <- clusters[head,]
y.shift <- clusters[head,"size1"]
v.loc <- down(clusters,head,x,y-y.shift,sign.clust,old.head=old.head,old.loc = c.loc,min.size=min.size,e.size=e.size,digits=digits,cex=cex,limx=limx,limy=limy,colors=colors,cluster.names=cluster.names,rect.list=rect.list,arrow.list=arrow.list,cluster.list=cluster.list,bottomup=bottomup)
rect.list <- v.loc$rect.list
cluster.list <- v.loc$cluster.list
arrow.list <- v.loc$arrow.list
}
invisible(list(rect.list=rect.list,arrow.list=arrow.list,cluster.list=cluster.list)) }
|
825e2d2535c0e93ad482bdb82c5344915bb3fde9
|
87927a36c2b4d2f5c528724d57c801766302cae2
|
/wsjchart1.R
|
9d8d68f3c0a3f8eedb9a41d8eb294e79fdbb3237
|
[] |
no_license
|
econdataus/ipums
|
d79c25519f81b6a023b31d3b5e9747b1b38e8f39
|
1db144c9f38c8456cd08a3269755fde9d117614b
|
refs/heads/master
| 2021-01-10T21:35:40.009565
| 2015-04-19T18:24:36
| 2015-04-19T18:24:36
| 31,491,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,881
|
r
|
wsjchart1.R
|
dd <- read.csv("wsjstem1.csv")
labyears <- "1990-2010"
source("stem1labels0.R")
with(dd, plot(native_coll_wkwage_change ~ immig_stem_change, xlim=c(-2,4), col=col, cex=0.6,
ylab="Percentage change in real native college-graduate wages",
xlab="Percentage change in foreign STEM workers"))
with(dd, text(native_coll_wkwage_change ~ immig_stem_change, labels=label, cex=0.6, pos=pos, offset=0.2, col=col))
title(main=paste("Native College Wages vs. Foreign Stem Workers,", labyears))
abline(h=100*47280/35941-100, col="blue", lty=3) # percent change in real per-capita GDP, 1990-2010
grid()
legend("topleft", inset=0, cex=0.5, lty=c(3), col=c("blue"), horiz=FALSE, c("change in real per-capita GDP, 1990-2010"))
legend("bottomright", inset=0, cex=0.5, pch=c(1,1), col=c("red","deepskyblue"), horiz=FALSE,
c("highest influx according to article", "no data in 1990 5% IPUMS Census"))
readline("Press enter to continue, escape to exit")
x11()
dd <- read.csv("wsjstem3.csv")
labyears <- "1990-2010"
source("stem1labels0.R")
with(dd, plot(native_coll_wkwage_change ~ immig_stem_change, xlim=c(-2,4), col=col, cex=0.6,
ylab="Percentage change in real native college-graduate wages",
xlab="Percentage change in foreign STEM workers"))
with(dd, text(native_coll_wkwage_change ~ immig_stem_change, labels=label, cex=0.6, pos=pos, offset=0.2, col=col))
title(main=paste("Native College Wages vs. Foreign Stem Workers,", labyears))
abline(h=100*47280/35941-100, col="blue", lty=3) # percent change in real per-capita GDP, 1990-2010
grid()
legend("topleft", inset=0, cex=0.5, lty=c(3), col=c("blue"), horiz=FALSE, c("change in real per-capita GDP, 1990-2010"))
legend("bottomright", inset=0, cex=0.5, pch=c(1,1), col=c("red","deepskyblue"), horiz=FALSE,
c("highest influx according to article", "no data in 1990 5% IPUMS Census"))
readline("Press enter to continue, escape to exit")
|
3f38dfa38ee90bf1abbcb0c87f7fc7937cbf3ff2
|
1c5620ebfe6b2ce9e9a1b6332ac229e041d433c8
|
/oldman/annotateTrans.Rd
|
cca6cce273d32a225f279908606625f0b634c9b5
|
[] |
no_license
|
lawremi/VariantTools
|
33a752d2a68ca0212c79f3849f8081733da33072
|
fdcc2e645f034667e89cfe230b84034efb942d3b
|
refs/heads/master
| 2021-06-03T12:12:14.520204
| 2020-04-10T19:58:01
| 2020-04-10T20:00:16
| 101,421,367
| 1
| 1
| null | 2018-01-30T17:36:21
| 2017-08-25T16:18:06
|
R
|
UTF-8
|
R
| false
| false
| 1,731
|
rd
|
annotateTrans.Rd
|
\name{annotateTrans}
\alias{annotateTrans}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
A function to annotate a transcript grl with variant information.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Given a txbd, coverage RLE and set of functional variants, this function
will return a GRangesList of transcripts annotated with additional
metadata which includes the fraction of the transcript that is callable
defined by the function isCallable and the number of functional variants
that fall in the transcript.
}
\usage{
annotateTrans(txdb, cov, anno_gr)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{txdb}{
%% ~~Describe \code{txdb} here~~
a txdb object
}
\item{cov}{
%% ~~Describe \code{cov} here~~
A coverage RLE as generated by the getCov function
}
\item{anno_gr}{
%% ~~Describe \code{anno_gr} here~~
a GRanges object with variants annotaed for transcript occurance and
consequence. The transcipt IDs are assumed to be ref_seq IDs.
}
\item{cores}{
%% ~~Describe \code{cores} here~~
Number of cores to be used in the parallel aspects of the
code. Setting cores to 1 will run on a single core.
}
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
Returns a GRangesList of the transcripts with metadata columns
added for the fraction of the cds or exon region that is considered
callable and the number of protein altering mutations found in the total
cds regions.
}
\author{
%% ~~who you are~~
Jeremiah Degenhardt
}
\keyword{internal}
|
6971df31a30fecac33f6f41d8f81a1b017b6d350
|
5d25d2b58e5b8e3fda7d1f4f9564d6e3662f1a87
|
/man/ctSFTM.Rd
|
9a514a3b0f484f2421b4e13eed76d5321ac91bc2
|
[] |
no_license
|
shuyang1987/contTimeCausal
|
4bd1f9c2045b6c401eb4b2fbb0ae21a19688d559
|
00bf39ffeb270db86c77edafbbdf727d1e9c8c25
|
refs/heads/master
| 2021-06-08T07:48:50.036125
| 2021-05-06T16:28:11
| 2021-05-06T16:28:11
| 170,953,343
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,145
|
rd
|
ctSFTM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ctSFTM.R
\name{ctSFTM}
\alias{ctSFTM}
\title{Continuous-time Structural Failure Time Model (ctSFTM)}
\usage{
ctSFTM(V, deltaV, U, deltaD, Lti, Ltd4Vtime, Ltd4Utime)
}
\arguments{
\item{V}{the time to treatment discontinuation or failure or censoring (n x 1)}
\item{deltaV}{the binary indicator of treatment discontinuation at time V (n x 1)}
\item{U}{the time to failure or censoring (n x 1)}
\item{deltaD}{the binary indicator of failure at time U (n x 1)}
\item{Lti}{1-dimensional baseline covariate (n x 1)}
\item{Ltd4Vtime}{a matrix consisting of the time-dependent covariate (n x ltimeV)
ltimeV is the length of uniquely observed treatment discontinuation times (called V times)
one row represents one individual's time-dependent covariates
columns represent ordered V times}
\item{Ltd4Utime}{a matrix consisting of the time-dependent covariate (n x ltimeU)
ltimeU is the length of uniquely observed failure times (called U times)
one row represents one individual's time-dependent covariates
columns represent ordered U times}
}
\value{
\code{est}: estimate of the SFTM parameter
}
\description{
The function estimates the effect of treatment effect for a survival outcome under a SFTM
with time-varying treatment and confounding in the presence of dependent censoring.
}
\details{
The SFTM assumes that the potential failure time \code{U} had the individual never received treatment and the observed failure time \code{T} follow
\deqn{U ~ \int_0^T e^{\psi A_u}d u, }
where \code{~} means "has the same distribution as", and \eqn{A_u} is the treatment indicator at time \eqn{u}.
We assume that the individual continuously received treatment until time \eqn{V}.
The observed failure time can be censored assuming the censoring time is independent of the failure time given the treatment and covariate history (the so-called ignorable censoring).
The current function provides a template to handle one-dimensional baseline covariate and one-dimensional time-dependent covariate;
extension to handling multiple baseline and time-dependent covariates is possible.
Variance estimate should be implemented by delete-one-group jackknifing and recalling ctSFTM.
}
\examples{
library("survival")
library("MASS")
library("zoo")
set.seed(seed=11)
n=1000
## generate time-indept covariate
Lti<-rbinom(n,1,0.55)
Lti<-Lti-mean(Lti)
## generate time-dept covariate
Sigma<-matrix(0,3,3)
for(i in 1:3){
for(j in 1:3){
Sigma[i,j]<-0.7^(abs(i-j))
}
}
## Ltd represents the values of covariate at times t1=0, t2=5, and t3=10.
## We assume that the time-dependent variable remains constant between measurements.
Ltdtemp<-mvrnorm(n = n, rep(0,3), Sigma)
Ltd<-Ltdtemp
t<-c(0,5,10,100)
colnames(Ltd)<-paste("t",1:3,sep="")
## generate time-to-events
## D =time to death if never stop treatment (time-indep Cox)
## V =time to discontinuity (time-dep Cox)
## avoiding the same time points for V and U
## generate D according to an exp distribution
D<-rexp(n=n,0.2)
Ltd<-Ltdtemp+ matrix((D-20)/5,n,3,byrow=FALSE)
colnames(Ltd)<-paste("t",1:3,sep="")
## generate V according to a tme-dept Cox using Bender et al (2005)
lambdaV <- 0.15; betaV <- c(0.15,0.15)
v <- runif(n=n)
temp1 <- (- log(1-v) / (lambdaV * exp(cbind(Lti,Ltd[,1]) \%*\% betaV)))
v <- runif(n=n)
temp2 <- (- log(1-v) / (lambdaV * exp(cbind(Lti,Ltd[,2]) \%*\% betaV)))
v <- runif(n=n)
temp3 <- (- log(1-v) / (lambdaV * exp(cbind(Lti,Ltd[,3]) \%*\% betaV)))
id1<-(temp1 < t[2])
id2<-(temp2 < (t[3]-t[2]))
id3<-(temp3 < (t[4]-t[3]))
V2<- id1*temp1 + (1-id1)*id2*(temp2+t[2]) + (1-id1)*(1-id2)*(temp3+t[3])
## generate Tv according to a SFTM
psi<- 0
true<-exp(psi)
id1<-D<=V2
T.temp11<-D*exp(-psi[1])
id11<-T.temp11<=V2
id12<-T.temp11>V2
T.temp12<-D + V2-exp(psi[1])*V2
id2<-D>V2
T.temp2<-D + V2-exp(psi[1])*V2
Tv<-id11*T.temp11+id12*T.temp12
## generate censoring according to time-dept Cox
## nu=time to censoring
lambdaC <- 0.025; betaC <- c(0.15,0.15)
v <- runif(n=n)
temp3 <- (- log(1-v) / (lambdaC * exp(cbind(Lti,1) \%*\% betaC)))
v <- runif(n=n)
temp4 <- (- log(1-v) / (lambdaC * exp(cbind(Lti,0) \%*\% betaC)))
id3<-(temp3 < V2)
nu<- id3*temp3 + (1-id3)*(V2+temp4)
check1<-sort( c(V2, apply(cbind(Tv,nu),1,min)))
check2<-c(check1,9999)-c(0,check1)
if(min(check2)<10^-6){
print("Please re-generate the data in order to avoid the same time points for V and U")
}
U<-apply( cbind(Tv,nu) ,1,min)
deltaD <- ( U<nu )
deltaV<-(V2<U)&(V2<nu)
V<-apply(cbind(V2,U,nu),1,min)
## time-dependent covariate
## Ltd4Vtime is a n x ltimeV matrix consisting of the time-dependent cov
## each row represents each indiviudal
## columns represent ordered V times (the realized treatment discontinuation times)
data1<-list(time=V,status=deltaV)
fit<-coxph(Surv(time, status) ~ . , data1)
ss<-survfit(fit)
obsV.times<-ss$time
ltime<-length(obsV.times)
id1<- (obsV.times < t[2])
id2<-((obsV.times < t[3])&(obsV.times > t[2]))
id3<- (obsV.times > t[3])
Ltd4Vtime<-matrix(NA,nrow=n,ncol=ltime)
Ltd4Vtime[,which(id1==1)]<-Ltd[,1]
Ltd4Vtime[,which(id2==1)]<-Ltd[,2]
Ltd4Vtime[,which(id3==1)]<-Ltd[,3]
## Ltd4Utime is a n x ltimeU matrix consisting of the time-dependent cov
## each row represents each indiviudal
## columns represent ordered U times (the realized event times)
data2<-list(time=U,status=1-deltaD)
fit<-coxph(Surv(time, status) ~ . , data2)
ss<-survfit(fit)
obsU.times<-ss$time[ss$n.event==1]
ltimeU<-length(obsU.times)
id1<- (obsU.times < t[2])
id2<-((obsU.times < t[3])&(obsU.times > t[2]))
id3<- (obsU.times > t[3])
Ltd4Utime<-matrix(NA,nrow=n,ncol=ltimeU)
Ltd4Utime[,which(id1==1)]<-Ltd[,1]
Ltd4Utime[,which(id2==1)]<-Ltd[,2]
Ltd4Utime[,which(id3==1)]<-Ltd[,3]
true
contTimeCausal::ctSFTM(V,deltaV,U,deltaD,Lti,Ltd4Vtime,Ltd4Utime)$est
}
\references{
Yang, S., K. Pieper, and F. Cools. (2019) Semiparametric estimation of structural failure time model in continuous-time processes.
\url{https://arxiv.org/abs/1808.06408}
}
\seealso{
\code{\link{ctCoxMSM}}
}
|
dbb41fe4354d994eca3266d96fa28fd6b9585342
|
295a2ae5946d7481ac067822577ec9ad3f09c6cb
|
/man/installing.rcasc.Rd
|
850aec364f5f10bf9be2c463c228775943de6c01
|
[] |
no_license
|
kendomaniac/BCsctutorial
|
c61b134581b66736d5a352cc88860883638775d0
|
c121631ed7f89e1f97f2af42daa4b0090d007a02
|
refs/heads/main
| 2023-02-15T07:13:31.640633
| 2021-01-11T08:16:25
| 2021-01-11T08:16:25
| 322,218,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 496
|
rd
|
installing.rcasc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/installing.rcasc.R
\name{installing.rcasc}
\alias{installing.rcasc}
\title{A function prepare the environment for the SCA tutorial.}
\usage{
installing.rcasc()
}
\description{
This function check that dcker is installed and download locally the required docker containers
}
\examples{
\dontrun{
installing.rcasc()
}
}
\author{
Raffaele Calogero, raffaele.calogero [at] unito [dot] it, University of Torino, Italy
}
|
2f587adbd42cc1172b18978a34f805a0873793a6
|
a21b4d4c4ca56f0accab4924df346d896186be39
|
/cachematrix.R
|
8647a730cc750db5e8e26849e92e181f64798289
|
[] |
no_license
|
RajatAst/ProgrammingAssignment2
|
fdd2732737ff5571deb1686e70fd52dd619a6020
|
477324b833c26a04d6e1b08207e59bd4b8c5a448
|
refs/heads/master
| 2022-11-23T05:17:23.326618
| 2020-07-29T12:51:07
| 2020-07-29T12:51:07
| 283,414,300
| 0
| 0
| null | 2020-07-29T06:10:12
| 2020-07-29T06:10:11
| null |
UTF-8
|
R
| false
| false
| 1,437
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function allows to set & get value of matrix,
## and its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL ##initialising i with null
##setting the matrix value
set <- function(y) {
x <<- y ##assignment outside
i <<- NULL
}
##getting the matrix value
get <- function() x
##setting the matrix inverse value
setinverse <- function(inverse) i <<- inverse
##returns 'i' matrix
getinverse <- function() i
##implicit return
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
##This function inverses the returned matrix by makeChachematrix.
##If inverse exists, it retrieves from cache.
##If no inverse exists it calcualtes inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
##checking cache for matrix inverse
i <- x$getinverse()
##checking contents of i and printing
if (!is.null(i)) {
message("No null in i. Retrieving cached data")
return(i)
}
## i is null, getting data
data <- x$get()
i <- solve(data, ...)##calculating matrix inverse
x$setinverse(i)##storing the value of inverse in i
i ##inverted matrix will be returned
}
|
3db4830f1aa5d29cfc7ce368006ae4823cb8668c
|
a560269290749e10466b1a29584f06a2b8385a47
|
/Notebooks/r/sociopath00/titanic-with-socio/titanic-with-socio.R
|
f97fb7010ae679ce50cb9bb5517077a523c217e2
|
[] |
no_license
|
nischalshrestha/automatic_wat_discovery
|
c71befad1aa358ae876d5494a67b0f4aa1266f23
|
982e700d8e4698a501afffd6c3a2f35346c34f95
|
refs/heads/master
| 2022-04-07T12:40:24.376871
| 2020-03-15T22:27:39
| 2020-03-15T22:27:39
| 208,379,586
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,318
|
r
|
titanic-with-socio.R
|
# This R environment comes with all of CRAN preinstalled, as well as many other helpful packages
# The environment is defined by the kaggle/rstats docker image: https://github.com/kaggle/docker-rstats
# For example, here's several helpful packages to load in
library(ggplot2) # Data visualization
library(readr) # CSV file I/O, e.g. the read_csv function
library(randomForest)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
system("ls ../input")
rfNews()
# Any results you write to the current directory are saved as output.
train <- read.table("../input/train.csv", header = T, sep = ",", stringsAsFactors = F)
test <- read.table("../input/test.csv", header = T, sep = ",", stringsAsFactors = F)
train$isTrain <- TRUE
test$isTrain <- FALSE
test$Survived <- NA
full <- rbind(train,test)
str(full)
full$Pclass<-as.ordered(full$Pclass)
full$Sex <- as.factor(full$Sex)
full$Embarked <- as.factor(full$Embarked)
str(full)
ageTrain <- full[is.na(full$Age),]
ageV <- full[is.na(full$Age)==FALSE,]
age.Formula <- "Age ~ Pclass + Sex + SibSp + Parch+ Fare"
age.Formula <- as.formula(age.Formula)
age.Model <- lm(age.Formula,ageV)
age.Predict<-predict(age.Model,ageTrain)
age.Predict<-round(age.Predict)
ageTrain$Age<-age.Predict
tail(ageTrain)
full<-rbind(ageTrain, ageV)
#dim(full)
full<- full[order(full$PassengerId),]
tail(full)
boxplot(train$Fare)
outBound <- boxplot.stats(train$Fare)$stats[5]
Fare.t<-full[full$Fare<=outBound,]
fare.formula<-"Fare ~ Pclass + Age + Sex + SibSp + Parch "
fare.formula<- as.formula(fare.formula)
fare.Model<-lm(fare.formula, Fare.t)
fare.Pred<-predict(fare.Model, full[1044,])
full[1044,]$Fare<- fare.Pred
full[1040:1045,]
train <- full[full$isTrain== TRUE,]
test <- full[full$isTrain== FALSE,]
train$Survived<- as.factor(train$Survived)
fml<- "Survived ~ Pclass + Sex + Age + SibSp + Parch + Embarked + Fare"
fml<-as.formula(fml)
titanic.model<-randomForest(fml,train, ntree=500, mtry=3, nodesize=0.01*nrow(train) )
Survived<-predict(titanic.model,test)
PassengerId<- test$PassengerId
op<-as.data.frame(PassengerId)
op$Survived <- Survived
write.csv(op, file= "Titanic_socio.csv", row.names = F)
|
078a6539c292cdc61e5913175872a94c417d48c0
|
680f31307651d40672fca41c08df08be053d478b
|
/man/maxGripFinder.Rd
|
f46c980ea883cca5ecd92c4c1cb74c011f563611
|
[] |
no_license
|
jonkeane/mocapGrip
|
ad30226f8a5fd486cc8c11f652e536762bb18136
|
ffeaafc8071a2e2d94dcf1518e61e56ba647a76f
|
refs/heads/master
| 2020-12-24T20:52:13.258443
| 2016-06-11T21:59:14
| 2016-06-11T21:59:14
| 56,876,628
| 3
| 0
| null | 2016-05-27T17:23:35
| 2016-04-22T18:22:16
|
HTML
|
UTF-8
|
R
| false
| true
| 1,008
|
rd
|
maxGripFinder.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/processingFunctions.R
\name{maxGripFinder}
\alias{maxGripFinder}
\title{Processing function for finding maximum grips}
\usage{
maxGripFinder(data, percOcclusion = 0.05)
}
\arguments{
\item{data}{the data to process.}
\item{percOcclusion}{the percentage of occlusion that is acceptable (this is the upper bound, in percent.) Default: \code{0.05}, or only trials where there is less than 5\% of occlusion are processed.}
}
\value{
a dataframe with the data that has been processed, one line per observation
}
\description{
\code{maxGripFinder()} takes the period and extracts the maximum value of grip for the whole period.
This function should only be used in \code{\link{processDataSet}}, because that has the code to group all
of the data into the right groups based on subject, session, trial, condition, period, etc. This should be
included in the processing section of \link{modelMetadata} called \code{processFunction}
}
|
d2ab6a0fd7376a8ef0f5a074340a80ce19d01ad7
|
1ed87c596958af5205fe6efe481d97f456e1fae6
|
/Assignments/midterm/wendy.R
|
c4ba36de924cc47aca909f6dce96520ef0e5c595
|
[] |
no_license
|
aaronxhill/dataviz14f
|
1530a3d16803c3e49d0f940dde687da6ebe3b6f5
|
290187d53b1e88bcf255c23dd2ba8e3af7294ea2
|
refs/heads/master
| 2020-03-30T19:02:34.451718
| 2014-11-15T00:33:58
| 2014-11-15T00:33:58
| 23,426,895
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 882
|
r
|
wendy.R
|
setwd("C:/Users/Wendy/Desktop/Data Visualization/Assignment 1")
titanic <- read.csv("titanic.text")
is.numeric(titanic$Pclass)
titanic$Pclass.f <- factor(titanic$Pclass, labels = c("First Class", "Second Class", "Third Class"))
# add a fourth variable / shape
is.factor(titanic$Pclass.f)
summary(titanic$Pclass.f)
is.numeric(titanic$Survived)
titanic$Survived.f <- factor(titanic$Survived, labels = c("False", "True"))
# add a fourth variable / shape
is.factor(titanic$Survived.f)
summary(titanic$Survived.f)
ggplot(titanic) +
geom_jitter(aes(x=Sex, y=Pclass.f, color = factor(Survived.f))) +
ggtitle("Titanic Disaster Survival Rates by Class & Gender") + labs(color="Survived") +
scale_color_discrete(labels=c("Deceased", "Survived")) +
labs(x = "Gender", y = "Class")
grid.text("Wendy Brisita_Midterm Assignment"), x = .93, y = .02, gp = gpar(fontsize=11, family="mono"))
|
abb1b12a3825a5416e6a830c1000cb07074353dc
|
27beeb71964e1064586bc9390ea5a21af4e4fe3f
|
/Functions.R
|
505de70bb33990e5db95ef93f4ba39beabb1de3f
|
[] |
no_license
|
Zaphiroth/ntm_docker
|
a9e849aa74e3e78a4e26f95c99b0ec8f96ec0bf2
|
ba66278df818c0cf34e2399a6a006c6cb2f71c11
|
refs/heads/master
| 2020-05-16T17:23:32.481792
| 2019-05-17T09:25:42
| 2019-05-17T09:25:42
| 183,192,573
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,591
|
r
|
Functions.R
|
##------------------------------------------------------------------------------
##-- Get previous data
##------------------------------------------------------------------------------
get_p_data <- function(proposal_id, p_sales_report_id, personnel_assessment_id) {
## p_sales ----
db_sales_report <- mongo(collection = "SalesReport", db = options()$mongodb$db, url = options()$mongodb$host)
sales_report_info <- db_sales_report$find(query = paste0('{"_id": {"$oid": "', p_sales_report_id, '"}}'))
hospital_sales_report_ids <- sales_report_info$`hospital-sales-report-ids`[[1]]
db_hospital_sales_report <- mongo(collection = "HospitalSalesReport", db = options()$mongodb$db, url = options()$mongodb$host)
p_hospital_sales_report_info <- data.frame()
for (i in hospital_sales_report_ids) {
info <- db_hospital_sales_report$find(query = paste0('{"_id": {"$oid": "', i, '"}}'))
p_hospital_sales_report_info <- bind_rows(p_hospital_sales_report_info, info)
}
## product ----
goods_config_id <- p_hospital_sales_report_info$`goods-config-id`[!duplicated(p_hospital_sales_report_info$`goods-config-id`)]
db_goods <- mongo(collection = "GoodsConfig", db = options()$mongodb$db, url = options()$mongodb$host)
goods_info <- db_goods$find(query = paste0('{"_id": {"$oid": "', goods_config_id, '"}}'), fields = '{}')
db_product <- mongo(collection = "ProductConfig", db = options()$mongodb$db, url = options()$mongodb$host)
product_info <- db_product$find(query = paste0('{"_id": {"$oid": "', goods_info$`goods-id`, '"}}'), fields = '{}')
product <- goods_info %>%
left_join(product_info, by = c("goods-id" = "_id")) %>%
select(`_id`, `product-id`, `life-cycle`)
## hospital ----
dest_config_ids <- p_hospital_sales_report_info$`dest-config-id`
db_dest <- mongo(collection = "DestConfig", db = options()$mongodb$db, url = options()$mongodb$host)
dest_info <- data.frame()
for (i in dest_config_ids) {
info <- db_dest$find(query = paste0('{"_id": {"$oid": "', i, '"}}'), fields = '{}')
dest_info <- bind_rows(dest_info, info)
}
db_hospital <- mongo(collection = "HospitalConfig", db = options()$mongodb$db, url = options()$mongodb$host)
dest_ids <- dest_info$`dest-id`
hospital_info <- data.frame()
for (i in dest_ids) {
info <- db_hospital$find(query = paste0('{"_id": {"$oid": "', i, '"}}'), fields = '{}')
hospital_info <- bind_rows(hospital_info, info)
}
hospital <- dest_info %>%
left_join(hospital_info, by = c("dest-id" = "_id")) %>%
select(`_id`, `hospital-id`)
## p_intermedia ----
db_intermedia <- mongo(collection = "Intermedia", db = options()$mongodb$db, url = options()$mongodb$host)
p_intermedia_info <- db_intermedia$find(query = paste0('{"proposal-id": "', proposal_id, '"}'))
p_intermedia <- p_intermedia_info$initial_phase[[1]]
p_hospital_sales_info <- p_hospital_sales_report_info %>%
left_join(product, by = c("goods-config-id" = "_id")) %>%
left_join(hospital, by = c("dest-config-id" = "_id")) %>%
left_join(p_intermedia, by = c("hospital-id" = "hosp_id")) %>%
select(`hospital-id`, `hosp_size`, `product-id`, `life-cycle`,
`sales`, `share`, `offer_attractiveness`, `customer_relationship`, `potential`)
colnames(p_hospital_sales_info) <- c("hosp_id", "hosp_size", "prod_id", "life_cycle",
"p_sales", "p_market_share", "p_offer_attractiveness", "p_customer_relationship", "p_potential")
## p_rep ----
db_personnel_assessment <- mongo(collection = "PersonnelAssessment", db = options()$mongodb$db, url = options()$mongodb$host)
personnel_assessment_info <- db_personnel_assessment$find(query = paste0('{"_id": {"$oid": "', personnel_assessment_id, '"}}'))
rep_ability_ids <- personnel_assessment_info$`representative-ability-ids`[[1]]
db_rep_ability <- mongo(collection = "RepresentativeAbility", db = options()$mongodb$db, url = options()$mongodb$host)
p_rep_ability_info <- data.frame()
for (i in rep_ability_ids) {
info <- db_rep_ability$find(query = paste0('{"_id": {"$oid": "', i, '"}}'))
p_rep_ability_info <- bind_rows(p_rep_ability_info, info)
}
p_rep_ability_info1 <- p_rep_ability_info %>%
select(`representative-id`, `product-knowledge`, `sales-ability`,
`regional-management-ability`, `job-enthusiasm`, `behavior-validity`)
colnames(p_rep_ability_info1) <- c("rep_id", "p_product_knowledge", "p_sales_skills", "p_territory_management_ability",
"p_work_motivation", "p_behavior_efficiency")
## output ----
output <- list(p_hospital_sales_info = p_hospital_sales_info,
p_rep_ability_info = p_rep_ability_info1)
return(output)
}
##------------------------------------------------------------------------------
##-- Get input data
##------------------------------------------------------------------------------
get_input_data <- function(input_id) {
## paper_input ----
db_input <- mongo(collection = "Paperinput", db = options()$mongodb$db, url = options()$mongodb$host)
input_info <- db_input$find(query = paste0('{"_id": {"$oid": "', input_id, '"}}'))
business_input_ids <- input_info$`business-input-ids`[[1]]
rep_input_ids <- input_info$`representative-input-ids`[[1]]
manager_input_id <- input_info$`manager-input-ids`[[1]]
## total_budget ----
db_scenario <- mongo(collection = "Scenario", db = options()$mongodb$db, url = options()$mongodb$host)
scenario_info <- db_scenario$find(query = paste0('{"proposal-id": "', proposal_id, '", "phase": ', format(phase, nsmall = 1), '}'), fields = '{}')
scenario_id <- scenario_info$`_id`
db_resource <- mongo(collection = "ResourceConfig", db = options()$mongodb$db, url = options()$mongodb$host)
resource_info <- db_resource$find(query = paste0('{"scenario-id": "', scenario_id, '"}'), fields = '{}')
manager_config_id <- resource_info$`resource-id`[which(resource_info$`resource-type` == 0)]
db_manager <- mongo(collection = "ManagerConfig", db = options()$mongodb$db, url = options()$mongodb$host)
manager_info <- db_manager$find(query = paste0('{"_id": {"$oid": "', manager_config_id, '"}}'))
total_budget <- manager_info$`total-budgets`
## business_input ----
db_business_input <- mongo(collection = "Businessinput", db = options()$mongodb$db, url = options()$mongodb$host)
business_input_info <- data.frame()
for (i in business_input_ids) {
info <- db_business_input$find(query = paste0('{"_id": {"$oid": "', i, '"}}'))
business_input_info <- bind_rows(business_input_info, info)
}
resource_config_ids <- business_input_info$`resource-config-id`[!duplicated(business_input_info$`resource-config-id`)]
goods_config_id <- business_input_info$`goods-config-id`[!duplicated(business_input_info$`goods-config-id`)]
dest_config_ids <- business_input_info$`dest-config-id`[!duplicated(business_input_info$`dest-config-id`)]
# representative
resource_info <- resource_info %>%
filter(`resource-type` == 1)
db_rep <- mongo(collection = "RepresentativeConfig", db = options()$mongodb$db, url = options()$mongodb$host)
resource_ids <- resource_info$`resource-id`
rep_info <- data.frame()
for (i in resource_ids) {
info <- db_rep$find(query = paste0('{"_id": {"$oid": "', i, '"}}'), fields = '{}')
rep_info <- bind_rows(rep_info, info)
}
representative <- resource_info %>%
left_join(rep_info, by = c("resource-id" = "_id")) %>%
select(`_id`, `representative-id`)
# product
db_goods <- mongo(collection = "GoodsConfig", db = options()$mongodb$db, url = options()$mongodb$host)
goods_info <- db_goods$find(query = paste0('{"_id": {"$oid": "', goods_config_id, '"}}'), fields = '{}')
db_product <- mongo(collection = "ProductConfig", db = options()$mongodb$db, url = options()$mongodb$host)
product_info <- db_product$find(query = paste0('{"_id": {"$oid": "', goods_info$`goods-id`, '"}}'), fields = '{}')
product <- goods_info %>%
left_join(product_info, by = c("goods-id" = "_id")) %>%
select(`_id`, `product-id`)
# hospital
db_dest <- mongo(collection = "DestConfig", db = options()$mongodb$db, url = options()$mongodb$host)
dest_info <- data.frame()
for (i in dest_config_ids) {
info <- db_dest$find(query = paste0('{"_id": {"$oid": "', i, '"}}'), fields = '{}')
dest_info <- bind_rows(dest_info, info)
}
db_hospital <- mongo(collection = "HospitalConfig", db = options()$mongodb$db, url = options()$mongodb$host)
dest_ids <- dest_info$`dest-id`
hospital_info <- data.frame()
for (i in dest_ids) {
info <- db_hospital$find(query = paste0('{"_id": {"$oid": "', i, '"}}'), fields = '{}')
hospital_info <- bind_rows(hospital_info, info)
}
hospital <- dest_info %>%
left_join(hospital_info, by = c("dest-id" = "_id")) %>%
select(`_id`, `hospital-id`)
business_input <- business_input_info %>%
left_join(hospital, by = c("dest-config-id" = "_id")) %>%
left_join(resource_info, by = c("resource-config-id" = "_id")) %>%
left_join(rep_info, by = c("resource-id" = "_id")) %>%
left_join(product, by = c("goods-config-id" = "_id")) %>%
mutate(total_budget = total_budget) %>%
select(`dest-config-id`, `hospital-id`, `resource-config-id`, `representative-id`, `goods-config-id`, `product-id`,
`sales-target`, `budget`, `meeting-places`, `visit-time`, `total_budget`)
colnames(business_input) <- c("dest_id", "hosp_id", "resource_id", "rep_id", "goods_id", "prod_id",
"quota", "budget", "meeting_attendance", "call_time_factor", "total_budget")
## rep_input ----
db_rep_input <- mongo(collection = "Representativeinput", db = options()$mongodb$db, url = options()$mongodb$host)
rep_input_info <- data.frame()
for (i in rep_input_ids) {
info <- db_rep_input$find(query = paste0('{"_id": {"$oid": "', i, '"}}'))
rep_input_info <- bind_rows(rep_input_info, info)
}
rep_input <- resource_info %>%
left_join(rep_info, by = c("resource-id" = "_id")) %>%
select(`_id`, `representative-id`) %>%
left_join(rep_input_info, by = c("_id" = "resource-config-id")) %>%
select(`_id`, `representative-id`, `product-knowledge-training`, `sales-ability-training`, `region-training`,
`performance-training`, `vocational-development`, `ability-coach`, `assist-access-time`)
colnames(rep_input) <- c("resource_id", "rep_id", "product_knowledge_training", "sales_skills_training", "territory_management_training",
"performance_review", "career_development_guide", "one_on_one_coaching", "field_work")
## manager_input ----
db_manager_input <- mongo(collection = "Managerinput", db = options()$mongodb$db, url = options()$mongodb$host)
manager_input_info <- db_manager_input$find(query = paste0('{"_id": {"$oid": "', manager_input_id, '"}}'))
manager_input <- select(manager_input_info,
`strategy-analysis-time`, `admin-work-time`, `client-management-time`, `kpi-analysis-time`, `team-meeting-time`)
colnames(manager_input) <- c("business_strategy_planning", "admin_work", "kol_management", "employee_kpi_and_compliance_check", "team_meeting")
## output ----
output <- list(business_input = business_input,
rep_input = rep_input,
manager_input = manager_input)
return(output)
}
##------------------------------------------------------------------------------
##-- Generate data to use
##------------------------------------------------------------------------------
get_data2use <- function(p_data, input_data) {
output <- input_data$business_input %>%
left_join(input_data$rep_input, by = c("resource_id", "rep_id")) %>%
bind_cols(input_data$manager_input[rep(1, each = 10), ]) %>%
left_join(p_data$p_hospital_sales_info, by = c("hosp_id", "prod_id")) %>%
left_join(p_data$p_rep_ability_info, by = c("rep_id")) %>%
select(`dest_id`, `hosp_id`, `hosp_size`, `p_sales`, `p_market_share`, `p_offer_attractiveness`, `p_customer_relationship`, `p_potential`,
`resource_id`, `rep_id`, `p_territory_management_ability`, `p_sales_skills`, `p_product_knowledge`, `p_behavior_efficiency`, `p_work_motivation`,
`goods_id`, `prod_id`, `life_cycle`, `quota`, `budget`, `meeting_attendance`, `call_time_factor`, `total_budget`,
`field_work`, `one_on_one_coaching`, `team_meeting`, `business_strategy_planning`, `admin_work`, `employee_kpi_and_compliance_check`, `kol_management`,
`territory_management_training`, `sales_skills_training`, `product_knowledge_training`, `performance_review`, `career_development_guide`)
return(output)
}
##------------------------------------------------------------------------------
##-- Get curves and weightages
##------------------------------------------------------------------------------
get_intermedia <- function(uuid, type) {
db_intermedia <- mongo(collection = "Intermedia", db = options()$mongodb$db, url = options()$mongodb$host)
intermedia <- db_intermedia$find(query = paste0('{"uuid": "', uuid, '"}'), fields = paste0('{"_id": 0, "', type, '": 1}'))[[1]]
intermedia <- as.list(intermedia)
for (i in names(intermedia)) {
intermedia[[i]] <- intermedia[[i]][[1]]
}
return(intermedia)
}
##------------------------------------------------------------------------------
##-- Curves computation
##------------------------------------------------------------------------------
curve_func <- function(curve, curves, input) {
curve_data <- curves[[curve]]
if (input < min(curve_data$x))
return(curve_data[which.min(curve_data$x), 2])
if (input > max(curve_data$x))
return(curve_data[which.max(curve_data$x), 2])
left <- curve_data[which.min(abs(input - curve_data$x)), ]
tmp <- curve_data[-which.min(abs(input - curve_data$x)), ]
right <- tmp[which.min(abs(input - tmp$x)), ]
y <- ifelse(left$x <= right$x,
(1 - (input - left$x) / (right$x - left$x)) * left$y + (1 - (right$x - input) / (right$x - left$x)) * right$y,
(1 - (input - right$x) / (left$x - right$x)) * right$y + (1 - (left$x - input) / (left$x - right$x)) * left$y)
return(y)
}
##------------------------------------------------------------------------------
##-- Calculation function
##------------------------------------------------------------------------------
get_results <- function(dat, curves, weightages) {
dat <- dat %>%
mutate(budget = budget / total_budget * 100)
# general ability
dat01 <- dat %>%
mutate(work_motivation = p_work_motivation + (10 - p_work_motivation) * 0.15 * (performance_review + career_development_guide),
territory_management_ability = p_territory_management_ability + (10 - p_territory_management_ability) * 0.3 * territory_management_training,
sales_skills = p_sales_skills + (10 - p_sales_skills) * 0.3 * sales_skills_training,
product_knowledge = p_product_knowledge + (10 - p_product_knowledge) * 0.3 * product_knowledge_training,
behavior_efficiency_factor = sapply(one_on_one_coaching, function(x) {curve_func("curve09", curves, x)}),
behavior_efficiency = p_behavior_efficiency + (10 - p_behavior_efficiency) * behavior_efficiency_factor,
general_ability = (territory_management_ability * weightages[["weightage02"]]$territory_management_ability +
sales_skills * weightages[["weightage02"]]$sales_skills +
product_knowledge * weightages[["weightage02"]]$product_knowledge +
behavior_efficiency * weightages[["weightage02"]]$behavior_efficiency +
work_motivation * weightages[["weightage02"]]$work_motivation) * 10)
# rep ability efficiency
dat02 <- dat01 %>%
mutate(quota_restriction_factor = ifelse(quota / p_sales < 0.5 | quota / p_sales > 2,
0.8,
ifelse(quota / p_sales >= 0.5 & quota / p_sales <= 2,
1,
0)),
quota_restriction_factor = sapply(quota_restriction_factor, function(x) {curve_func("curve14", curves, x)}),
rep_ability_efficiency = general_ability * weightages[["weightage03"]]$general_ability +
call_time_factor * weightages[["weightage03"]]$call_time_factor +
quota_restriction_factor * weightages[["weightage03"]]$quota_restriction_factor)
# field work factor
dat03 <- dat02 %>%
mutate(field_work_factor = sapply(field_work, function(x) {curve_func("curve16", curves, x)}))
# deployment quality
dat04 <- dat03 %>%
mutate(business_strategy_planning_factor = sapply(business_strategy_planning, function(x) {curve_func("curve18", curves, x)}),
admin_work_factor = sapply(admin_work, function(x) {curve_func("curve19", curves, x)}),
employee_kpi_and_compliance_check_factor = sapply(employee_kpi_and_compliance_check, function(x) {curve_func("curve20", curves, x)}),
team_meeting_factor = sapply(team_meeting, function(x) {curve_func("curve21", curves, x)}),
kol_management_factor = sapply(kol_management, function(x) {curve_func("curve22", curves, x)}),
deployment_quality = business_strategy_planning_factor * weightages[["weightage04"]]$business_strategy_planning_factor +
admin_work_factor * weightages[["weightage04"]]$admin_work_factor +
employee_kpi_and_compliance_check_factor * weightages[["weightage04"]]$employee_kpi_and_compliance_check_factor +
team_meeting_factor * weightages[["weightage04"]]$team_meeting_factor +
kol_management_factor * weightages[["weightage04"]]$kol_management_factor)
# sales performance
dat05 <- dat04 %>%
mutate(sales_performance = rep_ability_efficiency * weightages[["weightage05"]]$rep_ability_efficiency +
field_work_factor * weightages[["weightage05"]]$field_work_factor +
deployment_quality * weightages[["weightage05"]]$deployment_quality)
# customer relationship
dat06 <- dat05 %>%
mutate(budget_factor = ifelse(hosp_size == 1,
sapply(budget, function(x) {curve_func("curve02", curves, x)}),
ifelse(hosp_size == 2,
sapply(budget, function(x) {curve_func("curve03", curves, x)}),
ifelse(hosp_size == 3,
sapply(budget, function(x) {curve_func("curve04", curves, x)}),
0))),
meeting_attendance_factor = ifelse(hosp_size == 1,
sapply(meeting_attendance, function(x) {curve_func("curve05", curves, x)}),
ifelse(hosp_size == 2,
sapply(meeting_attendance, function(x) {curve_func("curve06", curves, x)}),
ifelse(hosp_size == 3,
sapply(meeting_attendance, function(x) {curve_func("curve07", curves, x)}),
0))),
customer_relationship_factor = budget_factor * weightages[["weightage06"]]$budget_factor +
meeting_attendance_factor * weightages[["weightage06"]]$meeting_attendance_factor,
customer_relationship = p_customer_relationship + (100 - p_customer_relationship) * customer_relationship_factor)
# current oa
dat07 <- dat06 %>%
mutate(current_oa = sales_performance * weightages[["weightage07"]]$sales_performance +
customer_relationship * weightages[["weightage07"]]$customer_relationship)
# offer attractiveness
dat08 <- dat07 %>%
mutate(offer_attractiveness = ifelse(life_cycle == "导入期",
current_oa * weightages[["weightage10"]]$current_oa +
p_offer_attractiveness * weightages[["weightage10"]]$p_offer_attractiveness,
ifelse(life_cycle == "成熟期",
current_oa * weightages[["weightage11"]]$current_oa +
p_offer_attractiveness * weightages[["weightage11"]]$p_offer_attractiveness,
0)))
# market share, sales
dat09 <- dat08 %>%
mutate(potential = p_potential,
market_share = sapply(offer_attractiveness, function(x) {curve_func("curve28", curves, x)}),
market_share = round(market_share / 100, 2),
sales = round(potential * market_share / 4, 2),
quota_rate = ifelse(quota == 0,
0,
round(sales / quota, 2)))
return(dat09)
}
##------------------------------------------------------------------------------
##-- Update representative information
##------------------------------------------------------------------------------
get_rep_ability <- function(results) {
rep_ability <- results %>%
group_by(rep_id, product_knowledge, sales_skills, territory_management_ability, work_motivation, behavior_efficiency) %>%
summarise(potential = sum(potential),
sales = sum(sales),
quota = sum(quota)) %>%
ungroup() %>%
mutate(work_motivation = ifelse(sales / quota >= 0.9 & sales / quota <= 1.2,
work_motivation + (10 - work_motivation) * 0.2,
ifelse(sales / quota < 0.9 | sales / quota > 1.2,
work_motivation,
0))) %>%
mutate(product_knowledge = round(product_knowledge, 1),
sales_skills = round(sales_skills, 1),
territory_management_ability = round(territory_management_ability, 1),
work_motivation = round(work_motivation, 1),
behavior_efficiency = round(behavior_efficiency, 1)) %>%
select(`rep_id`, `product_knowledge`, `sales_skills`, `territory_management_ability`, `work_motivation`, `behavior_efficiency`)
colnames(rep_ability) <- c("representative-id", "product-knowledge", "sales-ability", "regional-management-ability", "job-enthusiasm", "behavior-validity")
return(rep_ability)
}
get_action_kpi <- function(p_action_kpi, rep_ability) {
action_kpi <- p_action_kpi %>%
left_join(rep_ability, by = c("representative-id")) %>%
mutate(class1 = ifelse(`behavior-validity` >= 0 & `behavior-validity` < 3,
1,
ifelse(`behavior-validity` >= 3 & `behavior-validity` < 6,
2,
ifelse(`behavior-validity` >= 6 & `behavior-validity` < 8,
3,
ifelse(`behavior-validity` >= 8 & `behavior-validity` <= 10,
4,
0))))) %>%
mutate(`target-coverage` = ifelse(class1 == 1,
sapply(`target-coverage`, function(x) {x - sample(5:10, 1)}),
ifelse(class1 == 2,
sapply(`target-coverage`, function(x) {x - sample(0:5, 1)}),
ifelse(class1 == 3,
sapply(`target-coverage`, function(x) {x + sample(0:5, 1)}),
ifelse(class1 == 4,
sapply(`target-coverage`, function(x) {x + sample(5:10, 1)}),
0))))) %>%
mutate(class2 = ifelse(`job-enthusiasm` >= 0 & `job-enthusiasm` < 3,
1,
ifelse(`job-enthusiasm` >= 3 & `job-enthusiasm`< 6,
2,
ifelse(`job-enthusiasm` >= 6 & `job-enthusiasm` < 8,
3,
ifelse(`job-enthusiasm` >= 8 & `job-enthusiasm` < 10,
4,
0))))) %>%
mutate(`high-level-frequency` = ifelse(class1 == 1,
sapply(`high-level-frequency`, function(x) {sample(13:14, 1)}),
ifelse(class1 == 2,
sapply(`high-level-frequency`, function(x) {sample(14:15, 1)}),
ifelse(class1 == 3,
sapply(`high-level-frequency`, function(x) {sample(16:18, 1)}),
ifelse(class1 == 4,
sapply(`high-level-frequency`, function(x) {sample(19:22, 1)}),
0)))),
`middle-level-frequency` = ifelse(class1 == 1,
sapply(`middle-level-frequency`, function(x) {sample(13:14, 1)}),
ifelse(class1 == 2,
sapply(`middle-level-frequency`, function(x) {sample(13:14, 1)}),
ifelse(class1 == 3,
sapply(`middle-level-frequency`, function(x) {sample(12:13, 1)}),
ifelse(class1 == 4,
sapply(`middle-level-frequency`, function(x) {sample(12:13, 1)}),
0)))),
`low-level-frequency` = ifelse(class1 == 1,
sapply(`low-level-frequency`, function(x) {sample(13:14, 1)}),
ifelse(class1 == 2,
sapply(`low-level-frequency`, function(x) {sample(12:13, 1)}),
ifelse(class1 == 3,
sapply(`low-level-frequency`, function(x) {sample(12:13, 1)}),
ifelse(class1 == 4,
sapply(`low-level-frequency`, function(x) {sample(11:12, 1)}),
0))))) %>%
mutate(`high-level-frequency` = ifelse(class2 == 1,
sapply(`high-level-frequency`, function(x) {x - sample(1:2, 1)}),
ifelse(class2 == 2,
sapply(`high-level-frequency`, function(x) {x - sample(0:1, 1)}),
ifelse(class2 == 3,
sapply(`high-level-frequency`, function(x) {x + sample(0:1, 1)}),
ifelse(class2 == 4,
`high-level-frequency` + 1,
0)))),
`middle-level-frequency` = ifelse(class2 == 1,
`middle-level-frequency` - 2,
ifelse(class2 == 2,
`middle-level-frequency` - 1,
ifelse(class2 == 3,
sapply(`middle-level-frequency`, function(x) {x + sample(0:1, 1)}),
ifelse(class2 == 4,
`middle-level-frequency` + 1,
0)))),
`low-level-frequency` = ifelse(class2 == 1,
`low-level-frequency` - 2,
ifelse(class2 == 2,
`low-level-frequency` - 1,
ifelse(class2 == 3,
sapply(`low-level-frequency`, function(x) {x + sample(0:1, 1)}),
ifelse(class2 == 4,
`low-level-frequency` + 1,
0))))) %>%
select(`representative-id`, `target-number`, `target-coverage`, `high-level-frequency`, `middle-level-frequency`, `low-level-frequency`)
return(action_kpi)
}
##------------------------------------------------------------------------------
##-- Generate reports
##------------------------------------------------------------------------------
get_hosp_report <- function(results) {
hosp_report <- results %>%
mutate(growth = round(sales / p_sales - 1, 2)) %>%
select(`dest_id`, `resource_id`, `goods_id`, `potential`, `sales`, `quota`, `market_share`, `quota_rate`, `growth`)
colnames(hosp_report) <- c("dest-config-id", "resource-config-id", "goods-config-id", "potential", "sales", "sales-quota", "share", "quota-achievement", "sales-growth")
return(hosp_report)
}
get_rep_report <- function(results) {
rep_report <- results %>%
select(`resource_id`, `goods_id`, `potential`, `p_sales`, `sales`, `quota`) %>%
group_by(resource_id, goods_id) %>%
summarise(potential = sum(potential),
p_sales = sum(p_sales),
sales = sum(sales),
quota = sum(quota)) %>%
ungroup() %>%
mutate(market_share = round(sales / potential * 4, 2),
quota_rate = ifelse(quota == 0,
0,
round(sales / quota, 2)),
growth = round(sales / p_sales - 1, 2)) %>%
select(`resource_id`, `goods_id`, `potential`, `sales`, `quota`, `market_share`, `quota_rate`, `growth`)
colnames(rep_report) <- c("resource-config-id", "goods-config-id", "potential", "sales", "sales-quota", "share", "quota-achievement", "sales-growth")
return(rep_report)
}
get_prod_report <- function(results, p_sales_report_id) {
prod1_report <- results %>%
select(`goods_id`, `potential`, `p_sales`, `sales`, `quota`) %>%
group_by(goods_id) %>%
summarise(potential = sum(potential),
p_sales = sum(p_sales),
sales = sum(sales),
quota = sum(quota)) %>%
ungroup() %>%
mutate(market_share = round(sales / potential * 4, 2),
quota_rate = ifelse(quota == 0,
0,
round(sales / quota, 2)),
growth = round(sales / p_sales - 1, 2)) %>%
select(`goods_id`, `sales`, `quota`, `market_share`, `quota_rate`, `growth`)
db_sales_report <- mongo(collection = "SalesReport", db = options()$mongodb$db, url = options()$mongodb$host)
sales_report_info <- db_sales_report$find(query = paste0('{"_id": {"$oid": "', p_sales_report_id, '"}}'))
product_sales_report_ids <- head(sales_report_info$`product-sales-report-ids`[[1]], 3)
db_product_sales_report <- mongo(collection = "ProductSalesReport", db = options()$mongodb$db, url = options()$mongodb$host)
p_product_sales_report_info <- data.frame()
for (i in product_sales_report_ids) {
info <- db_product_sales_report$find(query = paste0('{"_id": {"$oid": "', i, '"}}'), fields = '{}')
p_product_sales_report_info <- bind_rows(p_product_sales_report_info, info)
}
p_product_sales_report_info <- arrange(p_product_sales_report_info, `goods-config-id`)
market_share1 <- sample(50:55, 1)/100 - prod1_report$market_share
market_share2 <- market_share1 * sample(60:75, 1)/100
market_share3 <- market_share1 - market_share2
potential <- prod1_report$sales / prod1_report$market_share
prod2_report <- tibble(goods_id = p_product_sales_report_info$`goods-config-id`[2:3],
market_share = c(market_share2, market_share3)) %>%
mutate(sales = round(potential * market_share, 2),
quota = round(sales, -5),
quota_rate = ifelse(quota == 0,
0,
round(sales / quota, 2)),
growth = round(sales / p_product_sales_report_info$sales[2:3] - 1, 2),
market_share = round(market_share, 2))
prod_report <- bind_rows(prod1_report, prod2_report)
colnames(prod_report) <- c("goods-config-id", "sales", "sales-quota", "share", "quota-achievement", "sales-growth")
return(prod_report)
}
|
d1bd085d13dcda7a230c3a8498b97e5828ed8add
|
6c4464440bf42df3df8eb947b3a2798476dfac78
|
/PBSmodelling/man/resetGraph.Rd
|
62b01df3536f29ef9daa9faff7bde4c6732ed508
|
[] |
no_license
|
pbs-software/pbs-modelling
|
ad59ca19ced6536d2e44ff705e36a787341f60d7
|
44b14f20af33d5dee51401bad2ff3dce2dfd3cea
|
refs/heads/master
| 2023-01-11T16:18:06.846368
| 2023-01-06T22:45:05
| 2023-01-06T22:45:05
| 37,491,656
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,076
|
rd
|
resetGraph.Rd
|
\name{resetGraph}
\alias{resetGraph}
\title{Reset par Values for a Plot}
\description{
Reset \code{par()} to default values to ensure that a new plot
utilizes a full figure region. This function helps manage the device
surface, especially after previous plotting has altered it.
}
\usage{resetGraph(reset.mf=TRUE)}
\arguments{
\item{reset.mf}{if \code{TRUE} reset the multi-frame status; otherwise
preserve \code{mfrow}, \code{mfcol}, and \code{mfg}}
}
\details{
This function resets \code{par()} to its default values.
If \code{reset.mf=TRUE}, it also clears the graphics device with
\code{frame()}. Otherwise, the values of \code{mfrow}, \code{mfcol},
and \code{mfg} are preserved, and graphics continues as usual in
the current plot. Use \code{resetGraph} only before a high level
command that would routinely advance to a new frame.
}
\value{
invisible return of the reset value \code{par()}
}
\author{
Jon T. Schnute, Pacific Biological Station, Fisheries and Oceans Canada, Nanaimo BC
}
\keyword{device}
|
59ed252e089c16cde05601837df50c68d9f73e07
|
2db8d6baaf70d7254c7cd9b1d2098f2c61580c96
|
/DataSetOne/Occupancy_Tuesday.R
|
78a777bbc13d4a3f770e81a2c1c08b70d6d2207a
|
[] |
no_license
|
ShaneColeman/Big_Data_Occupancy
|
9bc1095bdfc682a772594fffcf2d1188298f1b1c
|
bfdc95d283cdd033f21f51228579df82684d48a2
|
refs/heads/master
| 2021-01-01T05:09:26.926197
| 2016-05-02T22:10:59
| 2016-05-02T22:10:59
| 56,057,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,032
|
r
|
Occupancy_Tuesday.R
|
#Occupancy_Tuesday.R
#library(plyr)
#Setting Histogram Colour
colourHist <- c(1:3,4:7)
#Count Attributes
TuesdayTemperature <- count(occupancyTuesday,"Temperature")
TuesdayHumidity <- count(occupancyTuesday,"Humidity")
TuesdayLight <- count(occupancyTuesday,"Light")
TuesdayCO2 <- count(occupancyTuesday,"CO2")
TuesdayHumidityRatio <- count(occupancyTuesday,"HumidityRatio")
#Attribute Value Variables
temperatureTuesday <- occupancyTuesday$Temperature
humidityTuesday <- occupancyTuesday$Humidity
lightTuesday <- occupancyTuesday$Light
co2Tuesday <- occupancyTuesday$CO2
humidityRatioTuesday <- occupancyTuesday$HumidityRatio
occupancyTuesdayValue <- as.factor(occupancyTuesday$Occupancy)
#Minimum - Maximum Attribute Values
#Temperature
min(temperatureTuesday)
max(temperatureTuesday)
#Humidity
min(humidityTuesday)
max(humidityTuesday)
#Light
min(lightTuesday)
max(lightTuesday)
#CO2
min(co2Tuesday)
max(co2Tuesday)
#Humidity Ratio
min(humidityRatioTuesday)
max(humidityRatioTuesday)
#Summary on Variables
summary(temperatureTuesday)
summary(humidityTuesday)
summary(lightTuesday)
summary(co2Tuesday)
summary(humidityRatioTuesday)
#Histogram
hist(temperatureTuesday, main = "Tuesday's Temperature Range", xlab = "Temperature",
ylab = "Frequency", col = colourHist, breaks = 5)
hist(humidityTuesday, main = "Tuesday's Humidity Range", xlab = "Humidity",
ylab = "Frequency", col = colourHist, breaks = 5)
hist(lightTuesday, main = "Tuesday's Light Range", xlab = "Light",
ylab = "Frequency", col = colourHist, breaks = 5)
hist(co2Tuesday, main = "Tuesday's CO2 Range", xlab = "CO2",
ylab = "Frequency", col = colourHist, breaks = 5)
hist(humidityRatioTuesday, main = "Tuesday's Humidity Ratio Range", xlab = "Humidity",
ylab = "Frequency", col = colourHist, breaks = 5)
#Box Plot
boxplot(temperatureTuesday ~ occupancyTuesdayValue, main = "Temperature due to Occupancy: \nTuesday",
xlab = "Occupied", ylab = "Temperature", cex = 0, col = c("green","yellow"))
boxplot(humidityTuesday ~ occupancyTuesdayValue, main = "Humidity due to Occupancy: \nTuesday",
xlab = "Occupied", ylab = "Humidity", cex = 0, col = c("green","yellow"))
boxplot(lightTuesday ~ occupancyTuesdayValue, main = "Light due to Occupancy: \nTuesday",
xlab = "Occupied", ylab = "Light",cex = 0, col = c("green","yellow"))
boxplot(co2Tuesday ~ occupancyTuesdayValue, main = "CO2 due to Occupancy: \nTuesday" ,
xlab = "Occupied", ylab = "CO2", cex = 0, col = c("green","yellow"))
boxplot(humidityRatioTuesday ~ occupancyTuesdayValue, main = "Humidity Ratio due to Occupancy: \nTuesday",
xlab = "Occupied", ylab = "Humidity Ratio",cex = 0, col = c("green","yellow"))
#Decision Tree
#library(rpart)
#library(rattle)
#library(rpart.plot)
#library(RColorBrewer)
tuesdayDecisionTree <- rpart(Occupancy ~ Day + TimePeriod + TimeOfDay + Temperature + Humidity +
Light + CO2 + HumidityRatio, data = occupancyTuesday, method = "class")
prp(tuesdayDecisionTree)
fancyRpartPlot(tuesdayDecisionTree)
|
e90477500ad68830c3b11dd72b5aac0e8fdddd94
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleanalyticsv3.auto/man/management.filters.delete.Rd
|
27c25dd1086e45022b096e78160493d39701897a
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 871
|
rd
|
management.filters.delete.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics_functions.R
\name{management.filters.delete}
\alias{management.filters.delete}
\title{Delete a filter.}
\usage{
management.filters.delete(accountId, filterId)
}
\arguments{
\item{accountId}{Account ID to delete the filter for}
\item{filterId}{ID of the filter to be deleted}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/analytics.edit
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/analytics.edit)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/analytics/}{Google Documentation}
}
|
1efb25ac55c0cc72f726ec2785fc78f57f631101
|
fa6bf6d629ce6a6524526d4c8ec0c37600106780
|
/src/utitlities/bar.R
|
0a59210b54df8b04016501c932a20b28aaf31be4
|
[] |
no_license
|
whyshu/TWITTER_FAKE_NEWS_DETECTION
|
2ab17327f626452219d56e38e0c91781394aae15
|
837cef195bc8ad4b4d7f4ff021880de4625dc351
|
refs/heads/master
| 2021-05-08T20:28:53.096443
| 2018-01-31T00:29:40
| 2018-01-31T00:29:40
| 119,612,431
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 664
|
r
|
bar.R
|
correct<-c(1000,1500)
wrong<-c(50,75)
m<-c(correct,wrong)
mat<- matrix(m,nrow=2,ncol=2,byrow=TRUE)
rownames(mat)<-c("Correct", "Wrong")
colnames(mat)<-c("Spam", "NonSpam")
par(mfrow=c(2,2))
barplot(mat,main="Accuracy of classifier using training data",
xlab="Classes", col=c("green","red"), legend = rownames(mat))
barplot(mat,main="Accuracy of classifier using training data",
xlab="Classes", col=c("green","red"), legend = rownames(mat))
B <- c(3, 2, 25, 37, 22, 34, 19)
barplot(B, main="MY NEW BARPLOT", ylim = c(0,100), col = rainbow(20), xlab="LETTERS", ylab="MY Y VALUES", names.arg=c("A","B","C","D","E","F","G"))
axis(2,at=seq(0,100,10))
|
50f4a07c78b66a75764372c7de05e35fb2289ca7
|
748fb3a6f9b194bdaad36813b090e56dd4f40582
|
/Week 3 2020/Passwords.R
|
3a9c28f24076a343ef182e29bf46d04be1b4d98c
|
[] |
no_license
|
Jazzalchemist/TidyTuesday
|
768fc0b2895bbddb121e4a44eb500bdcda0802fa
|
3ca4115502c4e5453a78edd207fc816263b18c4b
|
refs/heads/master
| 2022-08-31T02:08:33.291198
| 2022-08-09T05:13:33
| 2022-08-09T05:13:33
| 175,743,720
| 31
| 11
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,140
|
r
|
Passwords.R
|
## Tidy Tuesday Week 3 2020 - Passwords
## Data Source: Knowledge is Beautiful
#Load packages
library(tidyverse)
library(extrafont)
library(ggfittext)
#Import data
passwords <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-01-14/passwords.csv')
#Inpect data
head(passwords)
#Wrangle data
passwords_filtered <- passwords %>%
na.omit(category) %>%
select(category) %>%
group_by(category) %>%
tally() %>%
arrange(-n) %>%
mutate(category = fct_reorder(category, n))
#Calculate mean password strength for name category
passwords_mean <- passwords %>%
group_by(category) %>%
summarise(mean = mean(strength)) %>%
filter(category == "name")
# Set theme
my_font <- 'Century Gothic'
my_background <- 'gray95'
my_textcolour <- "gray17"
my_axiscolour <- "black"
my_theme <- theme(text = element_text(family = my_font),
rect = element_rect(fill = my_background),
plot.background = element_rect(fill = my_background, color = NA),
plot.title = element_text(face = 'bold', size = 18, colour = my_textcolour),
plot.subtitle = element_text(size = 15, colour = my_textcolour),
plot.caption = element_text(size = 11, colour = my_textcolour),
panel.background = element_rect(fill = my_background, color = NA),
panel.border = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
axis.title.y = element_text(face = 'bold', size = 13, colour= my_axiscolour),
axis.title.x = element_blank(),
axis.text.y = element_text(size = 12, colour= my_axiscolour),
axis.text.x = element_blank(),
axis.ticks.y = element_blank(),
axis.ticks.x = element_line(colour = "black", size = 0.5),
axis.line.x = element_blank(),
legend.position="none")
theme_set(theme_light() + my_theme)
# Plot chart
passwords_filtered %>%
mutate(highlight_flag = ifelse(category == "name",T,F)) %>%
ggplot(aes(category, n, fill = highlight_flag)) +
scale_fill_manual(values = c('#8A8D91', '#734B5E')) +
geom_col() +
geom_bar_text(place = "right", contrast = TRUE) +
coord_flip() +
geom_curve(aes(x = 7.5, y = 167, xend = 9.3, yend = 181),
arrow = arrow(length = unit(0.3, "cm")), size = 0.4,
color = "grey20", curvature = 0.3) +
annotate("text", x = 7.5, y = 85, size = 4, hjust = 0, color = my_textcolour, family = my_font,
label = "The average password strength is ") +
annotate("text", x = 6.5, y = 85, size = 10, hjust = -2, color = '#734B5E', family = my_font,
label = 7.2) +
labs(title = "People Really Like Using Names as Passwords!",
subtitle = "Based on number of passwords per category",
caption = "\nVisualisation: @JaredBraggins | Data Source: Information is Beautiful")
ggsave('Passwords.png', type = "cairo")
|
b95f550304e5a3b26189a70a0c6b188e97b1276e
|
62715380a7dddcce8b5bc2c84c6c5d4a32b10d4f
|
/Baidu_Geocoding.R
|
bba8b5f60130e95da51f7c8b9f110e5e8d8fd3c5
|
[] |
no_license
|
iandmozart/CN_Addr_Geocoding
|
a90161c5f444699d0bdedda3c177cb480da4c2be
|
d1f9af8d7f2156dd70d1017ea610356de4c1a8f9
|
refs/heads/master
| 2020-08-25T02:27:42.289295
| 2013-08-21T12:59:25
| 2013-08-21T12:59:25
| 216,948,535
| 1
| 0
| null | 2019-10-23T02:14:24
| 2019-10-23T02:14:24
| null |
UTF-8
|
R
| false
| false
| 4,893
|
r
|
Baidu_Geocoding.R
|
#Baidu Maps Geocoding Service documentation: http://developer.baidu.com/map/webservice-geocoding.htm
#Please read through the Google_Geocoding.R firstly,
#Baidu GeoCode sample: http://api.map.baidu.com/geocoder?address=地址&output=输出格式类型&key=用户密钥&city=城市名
#Baidu ReverseGeoCode sample: http://api.map.baidu.com/geocoder?location=纬度,经度&output=输出格式类型&key=用户密钥
#Baidu user name: 蝶舞凄扬
#Baidu Password: xxxx=xxxx or xxxxxxxx
#Baidu Key: 9ce566a6dd950173e625cd335c8b23a4
#Another Baidu Key: 37492c0ee6f924cb5e934fa08c6b1676
setwd("F:/Khaki/CVD&RespiratoryDisease/track/baidu")
# install.packages("RJSONIO")
# install.packages("RCurl")
library(RJSONIO)
library(RCurl)
#This following function is a multiple gsub() originally in library(qdap). I copied it from
#http://stackoverflow.com/questions/15253954/replace-multiple-arguments-with-gsub
mgsub <- function(pattern, replacement, x, ...) {
if (length(pattern)!=length(replacement)) {
stop("pattern and replacement do not have the same length.")
}
result <- x
for (i in 1:length(pattern)) {
result <- gsub(pattern[i], replacement[i], result, ...)
}
result
}
#http://api.map.baidu.com/geocoder?address=地址&output=输出格式类型&key=用户密钥&city=城市名
# key <- 9ce566a6dd950173e625cd335c8b23a4
construct.geocode.url <- function(address, return.call = "json", key= "9ce566a6dd950173e625cd335c8b23a4", city="北京市") {
root <- "http://api.map.baidu.com/geocoder"
u <- paste(root, "?address=", address, "&output=", return.call, "&key=",key,"&city=", city, sep = "")
return(URLencode(u))
}
bGeoCode <- function(address,verbose=FALSE) {
if(verbose) cat(address,"/n")
connectStr <- construct.geocode.url(address)
con <- url(connectStr)
data.json <- fromJSON(paste(readLines(con), collapse=""))
close(con)
data.json <- unlist(data.json)
if(data.json["status"]=="OK"){
lng <- iconv(data.json["result.location.lng"],"UTF-8","gb2312")
lat <- iconv(data.json["result.location.lat"],"UTF-8","gb2312")
return(c(lat, lng))
} else {
return(c(NA,NA))
}
}
# {status: '字符串状态常量', 取值如下:
# //OK 成功
# INVILID_KEY 非法密钥
# INVALID_PARAMETERS 非法参数,参数错误时候给出。
# result: {
# location: {
# lat: 纬度:数值,
# lng: 经度:数值
# },
# precise:’位置的附加信息,是否精确查找’(1为精确查找,0为不精确查找),
# confidence: 可信度,
# level:'级别'
# },
# }
#Reverse Geocoding
# http://api.map.baidu.com/geocoder?location=纬度,经度&output=输出格式类型&key=用户密钥
bReverseGeoCode <- function(latlng) {
latlngStr <- gsub(' ','%20', paste(latlng, collapse=","))#Collapse and Encode URL Parameters
library("RJSONIO") #Load Library
#Open Connection
connectStr <- paste("http://api.map.baidu.com/geocoder?location=",latlngStr,"&output=json&key=9ce566a6dd950173e625cd335c8b23a4", sep="")
con <- url(connectStr)
data.json <- fromJSON(paste(readLines(con), collapse=""))
close(con)
#Flatten the received JSON
data.json <- unlist(data.json)
if(data.json["status"]=="OK") {
address <- iconv(data.json["result.formatted_address"],"UTF-8","gb2312")
return (address)
} else {
return(NA)
}
}
book <- read.csv("db2_Addr_Final.csv")
# book <- book[11000,]
jiedao <- read.csv("Jiedao_ToDel.csv")
Begin <- 50001
End <- 100000
#1:5000;50001:100000;100001:dim(book)[1]
head(book)
head(jiedao)
coor <- rep(NA,3)
for (i in Begin:End) {
library(RCurl)
ad <- book$Baddress[i]
blank <- rep("",dim(jiedao)[1])
ad <- mgsub(jiedao[,1],blank,ad)
# ad <- as.character(gsub("街道办事处","",ad))
# result<- gGeoCode(iconv(ad,"gb2312","UTF-8"))
result<- bGeoCode(ad)
result <- c(book[i,"key"],result)
coor <- rbind(coor,result)
}
coor <- as.data.frame(coor[-1,])
colnames(coor) <- c("key","result.location.lat","result.location.lng")
rownames(coor) <- 1:dim(coor)[1]
write.csv(coor,"coor2.csv")
#####################################################################
#####################################################################
addr <- rep(NA,2)
for (i in Begin:End) {
library(RCurl)
I <- i - Begin + 1
latlng <- paste(coor[I,"result.location.lat"],coor[I,"result.location.lng"],sep=",")
result <- bReverseGeoCode(latlng)
result <- c(coor[I,1],result)
addr <- rbind(addr,result)
}
addr <- as.data.frame(addr[-1,])
colnames(addr) <- c("key","result.formatted_address")
rownames(addr) <- 1:dim(addr)[1]
write.csv(addr,"addr2.csv")
#####################################################################
#####################################################################
continue <- merge(book,addr,by="key",incomparables = NA)
final <- merge(continue,coor,by="key",incomparables=NA)
write.csv(final,"final.csv")
|
bbb13aa4ac848325bb5ce9ae0d2167344ba3e380
|
ef3315aa25c746c84a6357bd46fb3a67fc0b7b41
|
/man/zbind.Rd
|
30e9950e1445db5fbf361da71b97b7da8caf6add
|
[] |
no_license
|
jweile/yogitools
|
4bbdba6f2ade9a2ef38c1fac14be9c883e159e39
|
35265272206d2e3daed1b8b0cc11da4c17166d49
|
refs/heads/master
| 2023-05-10T21:36:48.707325
| 2023-05-03T21:29:22
| 2023-05-03T21:29:22
| 120,494,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 372
|
rd
|
zbind.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yogitools.R
\name{zbind}
\alias{zbind}
\title{3D-bind matrices}
\usage{
zbind(...)
}
\arguments{
\item{...}{Any number of matrices of the same size}
}
\value{
A 3D array of the bound matrices
}
\description{
Binds matrices of same size together to a 3D array, analogously
to cbind and rbind.
}
|
bf6530900bc44fdcc2bcfa49c2216f92378e96ac
|
2161e2c9b1463f3f0b8d27a9447c136e5e08d2b9
|
/R/barplotRichness.R
|
6b5e522646ac4be0a50ffae480dff333b81645b4
|
[] |
no_license
|
NCRN/NCRNbirds
|
14a258e8182849bb0434eb4368fa291105d56a7c
|
5a512b736d674d9308c27667e7a99b142aebfcef
|
refs/heads/master
| 2023-08-16T13:00:26.367713
| 2023-07-11T15:54:50
| 2023-07-11T15:54:50
| 32,335,489
| 5
| 12
| null | 2023-08-17T15:09:47
| 2015-03-16T15:44:44
|
R
|
UTF-8
|
R
| false
| false
| 6,993
|
r
|
barplotRichness.R
|
#' @include NCRNbirds_Class_def.R
#'
#' @title barplotRichness
#'
#' @importFrom dplyr case_when mutate
#' @importFrom ggplot2 aes coord_flip element_blank element_rect element_text geom_bar ggplot labs scale_fill_manual scale_x_discrete theme
#' @importFrom magrittr %>%
#' @importFrom RColorBrewer brewer.pal
#' @importFrom rlang !! sym
#'
#' @description Makes a barplot of species richness
#'
#' @param object An \code{NCRNbirds} object a \code{list} of such objects, or a \code{data.frame} like that produced by \code{\link{birdRichness}}.
#' @param byPark Defaults to \code{FALSE}. If true the barplot will have on bar for each park.
#' @param byYear Defaults to \code{FALSE}. If true the barplot will have on bar for each year.
#' @param byPoint Defaults to \code{FALSE}. If \code{TRUE} the barplot will have one bar for each point.
#' @param byGuild Defaults to \code{FALSE}. If \code{TRUE} the barplot will be stacked by Guild.
#' @param includeNA Defaults to \code{TRUE}. If \code{byGuild = T} and \code{includeNA=F} then speices which have a response guild of \code{NA}
#' will not be included in the bar chart.
#' @param colors One or more colors for the bars.
#' @param palette Color pallete for the background of the graph. Defaults to "BuGn" (blue green) but will accept any RColorBrewer palette
#' @param plot_title Optional, A title for the plot.
#' @param scale If \code{TRUE} will include a scale to the right of the barplot.
#' @param labels If\code{TRUE} will label species richness values on the barplot.Labels are never shown wehen plotting by guild.
#' @param output Either "total" (the default) or "list". Only used when \code{object} is a \code{list}
#' @param plot Logical. Return plot \code{TRUE} (default) or data.frame \code{FALSE}.
#' @param ... Additional arguments passed to \code{\link{birdRichness}}
#' @details This function produces a barplot of species richness by Park, Year or Point. It does this by using the
#' output of the \code{\link{birdRichness}} function. The data is then passed on to ggplot2 for graphing. The user can chose to have \code{byPark=T},
#' \code{byYear=T} or \code{byPoint=T}, but should only pick one of the three. These can be combined with a guild category using \code{byGuild}. The
#' \code{includeNA} argument indicates if bird species that are not categorized for a particular guild shoud be included.
#'
#' Colors:
#'
#' Colors of the bars are controlled by either \code{colors} or \code{palette} argumnts. If the barplot is not by guild then the color indicated by
#' \code{colors} will be used. If \code{byGuild=T} then by default the \code{palette} arguement will be used. However, the \code{palette} arguement
#' can be overridden simply by giving \code{colors} a vector of colors with one color for each guild category shown in the barplot.If
#' \code{includeNA=T} than than the first color will correspond to the NA value.
#'
#'
#' @export
setGeneric(name="barplotRichness",function(object, byPark=F, byYear=F, byPoint=F, byGuild=F, includeNA=T, colors="dark green", palette="BuGn",
plot_title=NA, scale=T, labels=T,output="total", plot=TRUE, ...){standardGeneric("barplotRichness")}, signature="object")
setMethod(f="barplotRichness", signature=c(object="list"),
function(object, byPark, byYear, byPoint, byGuild, includeNA, colors, palette, plot_title, scale, labels, output, plot, ...) {
switch(output,
total={
barplotdata=birdRichness(object,byPark=byPark, byYear=byYear, byPoint=byPoint, byGuild = byGuild, output="total",...)
return(barplotRichness(object=barplotdata, byPark=byPark, byYear=byYear, byPoint=byPoint,byGuild = byGuild, includeNA = includeNA,
colors=colors, palette=palette, plot_title=plot_title, scale=scale, labels=labels, plot = plot))
},
list={
return(map(object,barplotRichness,byPark=byPark, byYear=byYear, byPoint=byPoint, byGuild = byGuild, includeNA = includeNA, colors=colors,
palette=palette, plot_title=plot_title, scale=scale, labels=labels, plot=plot, ...))
}
)
})
setMethod(f="barplotRichness", signature=c(object="NCRNbirds"),
function(object, byPark, byYear, byPoint, byGuild, includeNA, colors, palette, plot_title, scale, labels, plot, ...){
barplotdata<-birdRichness(object, byPark=byPark, byYear=byYear, byPoint=byPoint, byGuild=byGuild, ...)
barplotRichness(object=barplotdata, byPark=byPark, byYear=byYear, byPoint=byPoint, byGuild = byGuild, includeNA = includeNA, colors=colors,
palette=palette, plot_title = plot_title, scale=scale, labels=labels, plot=plot)
})
setMethod(f="barplotRichness", signature=c(object="data.frame"),
function(object, byPark, byYear, byPoint, byGuild, includeNA, colors, palette, plot_title, scale, labels, plot){
guildLevels<-if(byGuild) unique(object$Guild) else NA
object<-object %>%
{if(byPark) mutate(., ParkName=factor(ParkName)) else .} %>%
{if(byYear) mutate(., Year=factor(Year)) else .} %>%
{if(byPoint) mutate(., Point_Name=factor(Point_Name)) else .} %>%
{if(!includeNA & byGuild) filter(.,!is.na( Guild ) ) else .} %>%
{if(includeNA & byGuild) mutate(.,Guild=ifelse(is.na(Guild), "No Guild", Guild)) else .} %>%
{if(byGuild) mutate(., Guild=factor(Guild, levels=c("No Guild", guildLevels[!is.na(guildLevels)]))) else . }
richnessColors<-brewer.pal( if(!byGuild) 1 else length(guildLevels), palette)
richnessColors<-if(length(colors)==length(richnessColors)) colors else richnessColors
if(!plot) return(object)
xcol<-sym(case_when(byPark~"ParkName", byYear~"Year", byPoint~"Point_Name"))
ggplot(data=object,aes(x=!!xcol, y=Richness, fill= if(byGuild) Guild else richnessColors)) +
{if(byGuild) geom_bar(position="stack", stat="identity") } +
{if(!byGuild) geom_bar(stat="identity", fill=richnessColors)}+
theme(panel.background = element_rect(fill="transparent"),
panel.border= element_rect(color="black", fill=NA),
axis.text.x = element_text(size=8),
axis.text.y = element_text(size=8),
axis.title.x = element_text(size=12),
axis.title.y = element_text(size=12)) +
theme(legend.background = element_rect(fill="white",
size=0.5, linetype="solid",
colour ="black"),legend.position=c(0.8,0.90)) +
scale_x_discrete(limits=rev(levels(object %>% select(!!xcol)))) +
scale_fill_manual(values=richnessColors) +
labs(x=element_blank(),y="Species Richness") +
{if(labels & !byGuild) geom_text(aes(x=!!xcol, y=Richness, label=Richness),
hjust = 1.3, color="white", size = 3.5, fontface="bold",inherit.aes = TRUE)} +
{if(!is.na(plot_title)) ggtitle(plot_title)} +
{if(byYear) coord_flip()} +
{if(byGuild) labs(fill=element_blank())}
})
|
bcf441d6ddb9f570d134c08cee3aa8658d38fa03
|
d8873812872be794f291a120940ccaa298238b6c
|
/tests/testthat/test-schema.R
|
e89963ba32120e2f6929c7a0399583a390428788
|
[] |
no_license
|
cran/mdbr
|
6dcdb582d80a5210535a9b7bd9bc523512af3c3e
|
3afb6a8332a514b2bd5e79864fdf49422e0b54ec
|
refs/heads/master
| 2023-01-06T06:27:15.750095
| 2020-11-09T08:30:02
| 2020-11-09T08:30:02
| 311,429,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
r
|
test-schema.R
|
library(testthat)
library(mdbr)
test_that("schema returns col spec", {
skip_on_cran()
dat <- mdb_schema(mdb_example(), "Flights")
expect_s3_class(dat, "col_spec")
})
test_that("schema can be condensed", {
skip_on_cran()
a <- mdb_schema(mdb_example(), "Flights")
b <- mdb_schema(mdb_example(), "Flights", condense = TRUE)
expect_gt(length(a$cols), length(b$cols))
})
test_that("schema errors without table", {
skip_on_cran()
expect_error(mdb_schema(mdb_example()))
})
|
0a6e7fdbe341f831e357d45f9104c5139001328c
|
660b7765755e497ddc066f808dc7992fc905fc89
|
/script/train.R
|
74c7bd06ab0291a4e2aed8b9089e106ec357080a
|
[] |
no_license
|
pykler/predmachlearn-006
|
797070f2eae4db2146c90b3b17d495f938c372d0
|
36c60cfb18c49e5c9a528e3dae0052f8dc53d092
|
refs/heads/master
| 2021-01-22T17:22:05.614216
| 2014-10-20T14:15:22
| 2014-10-20T14:25:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
r
|
train.R
|
#!/usr/bin/env Rscript --vanilla
args <- commandArgs(TRUE)
method <- args[1]
if (is.na(method)) {
stop('No training algorithm supplied')
}
library(caret)
pdir <- '~/projects/predmachlearn/project'
load(paste0(pdir, '/data/pml_training_noagg.Rdata'))
project_dir <- pdir
print('NoAgg approach')
print(paste('Training', args))
pml.train.any(pml_train, method = method, "_noagg")
|
b4da3869b1ead2dd9195c34d298d6be14c6e3bcd
|
91437aa75903254a42f677a6c61e0b16975789ac
|
/plot4.R
|
8adacdfddcb381842732396162d11a985cdc12a7
|
[] |
no_license
|
pgirish/ExData_Plotting1
|
56c166c8c9b4ebc80877ba8bdaadd24a0a4671d5
|
de7cc089fc82fd819339423ec3fac6f1f2b2b8c3
|
refs/heads/master
| 2021-01-16T20:44:27.431683
| 2016-02-17T23:28:10
| 2016-02-17T23:28:10
| 51,870,353
| 0
| 0
| null | 2016-02-16T21:08:39
| 2016-02-16T21:08:37
| null |
UTF-8
|
R
| false
| false
| 1,575
|
r
|
plot4.R
|
# Read the entire data from the given text file
# subset data by filtering two days of data
dataFile <- "household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE)
dataSubSet <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
## read in date/time info in format 'd/m/y h:m:s'
datetime <- paste(dataSubSet$Date, dataSubSet$Time, sep=" ")
datetime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
#get the Global Active Power data
globalActivePower <- as.numeric(dataSubSet$Global_active_power)
#get the Energy submetering data
subMetering1 <- as.numeric(dataSubSet$Sub_metering_1)
subMetering2 <- as.numeric(dataSubSet$Sub_metering_2)
subMetering3 <- as.numeric(dataSubSet$Sub_metering_3)
#get the Voltage data
voltage <- as.numeric(dataSubSet$Voltage)
#get the Global Reactive Power data
globalReactivePower <- as.numeric(dataSubSet$Global_reactive_power)
#initialize and plot the graph them in 2x2 format
png("plot4.png", width=480, height=480)
par(mfcol = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power")
plot(datetime, subMetering1, type="l", ylab="Energy Sub metering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=1.5, col=c("black", "red", "blue"))
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
f07f8f5671bdfcf1d97829d00b76f2cf9cf6b3a9
|
2b471d85b488a05aff1c19dd2ea8870daa11ed9a
|
/R/package-sparsepp.R
|
8a369e9f2e935a66be3dc48e8a7a93de09956166
|
[
"BSD-3-Clause"
] |
permissive
|
dselivanov/r-sparsepp
|
8501ecc6c6cdf60a5de8f124503b0d34d750dd25
|
fe71f4f99eff216b4115fd554b7eb407bd59c4c0
|
refs/heads/master
| 2021-09-23T12:04:33.371257
| 2018-09-22T12:35:51
| 2018-09-22T12:35:51
| 77,609,339
| 8
| 3
| null | 2018-09-22T12:35:52
| 2016-12-29T12:03:32
|
R
|
UTF-8
|
R
| false
| false
| 1,746
|
r
|
package-sparsepp.R
|
#' sparsepp
#'
#' \code{sparsepp} provides bindings to the
#' \href{https://github.com/greg7mdp/sparsepp}{sparsepp} - fast, memory efficient hash map for C++.
#' \code{sparsepp} is an open source C++ library derived from Google's
#' excellent sparsehash implementation, but considerably outperform it - \url{https://github.com/greg7mdp/sparsepp/blob/master/bench.md}.
#' It aims to achieve the following objectives:
#' \itemize{
#' \item A drop-in alternative for unordered_map and unordered_set.
#' \item Extremely low memory usage (typically about one byte overhead per entry).
#' \item Very efficient, typically faster than your compiler's unordered map/set or Boost's.
#' \item C++11 support (if supported by compiler).
#' \item Single header implementation - just copy sparsepp.h to your project and include it.
#' \item Tested on Windows (vs2010-2015, g++), linux (g++, clang++) and MacOS (clang++).
#' }
#' @examples
#' \dontrun{
#' library(Rcpp)
#' code = "
#' // [[Rcpp::plugins(cpp11)]]
#' #include <Rcpp.h>
#' using namespace std;
#' using namespace Rcpp;
#' // drop-in replacement for unordered_map
#' //#include <unordered_map>
#' #include <sparsepp/spp.h>
#' //[[Rcpp::depends(sparsepp)]]
#' using spp::sparse_hash_map;
#' // @export
#' // [[Rcpp::export]]
#' IntegerVector word_count(CharacterVector v) {
#' //unordered_map<string, int> smap;
#' sparse_hash_map<string, int> smap;
#' for(auto x: v) {
#' smap[as<string>(x)] ++;
#' }
#' IntegerVector res(smap.size());
#' int i = 0;
#' for(auto s:smap) {
#' res[i]=s.second;
#' i++;
#' }
#' return(res);
#' }"
#' f = tempfile(, fileext = ".cpp")
#' writeLines(code, f)
#' sourceCpp(f)
#' unlink(f)
#' word_count(sample(letters, 100, T))
#'}
"_PACKAGE"
|
1708c0f1edd947fde7e30145a81c9c4e17777b6d
|
a03da6a1edc7b1a1cf4b0829f5ece771f584df95
|
/R/EmpRule.R
|
b4d2b8a6312d98daf34f97f3b45a932bb9e53a3b
|
[] |
no_license
|
homerhanumat/tigerstats
|
4fbcc3609f46f6046a033d17165f7838dbd77e1a
|
17067f7e5ec6b6cf712b628a4dbf5131c691ae22
|
refs/heads/master
| 2021-07-06T06:24:07.716196
| 2020-09-22T15:24:01
| 2020-09-22T15:24:01
| 15,921,287
| 14
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,932
|
r
|
EmpRule.R
|
#' @title Empirical Rule
#' @description An app to investigate how the Empirical Rule applies to symmetric data and skewed data. The user can select
#' is they want to view a histogram of symmetric data or skewed data. Vertical bars are also plotted to signify
#' one, two, and three standard deviations from the mean. Summary data is output to the console giving the proportion of the histogram that falls within one, two,
#' and three standard deviations of the mean.
#'
#' @rdname EmpRule
#' @usage EmpRule()
#' @return Graphical and numerical output
#' @export
#' @author Rebekah Robinson \email{rebekah_robinson@@georgetowncollege.edu}
#' @examples
#' \dontrun{
#' if (require(manipulate)) EmpRule()
#' }
EmpRule <- function ()
{
if (!("manipulate" %in% installed.packages())) {
return(cat(paste0("You must be on R Studio with package manipulate installed\n",
"in order to run this function.")))
}
rpareto <- function(n,alpha,theta) {#random values for Pareto(alpha,theta) distribution
theta*((1-runif(n))^(-1/alpha)-1)
}
dpareto <- function(x,alpha,theta) { #pdf for Pareto(alpha,theta) distribution
alpha*theta^alpha/(x+theta)^(alpha+1)
}
results = matrix(0, 1, 3, dimnames = list("Proportion",
c("One SD", "Two SD", "Three SD")))
manipulate(n = slider(5, 1000, initial = 50, label = "Sample Size n"),
type = picker("Symmetric", "Skewed", "Super-Skewy", label = "Target Data Shape"),
showpop=checkbox(FALSE,"Show Population Density Curve"),
{
if (type == "Symmetric") {
tally1 = 0
tally2 = 0
tally3 = 0
mu = 20
stdev = 2
data = rnorm(n, mean = mu, sd = stdev)
xbar = mean(data)
s = sd(data)
hist(data, freq = FALSE, col = "lightblue",
xlim = c(mu - 5 * stdev, mu + 5 * stdev), ylim = c(0, 0.35),
main = "Empirical Rule with Target Symmetric")
abline(v = xbar-s, col = "red")
abline(v = xbar+s, col = "red")
points(xbar, 0, pch = 20)
abline(v = xbar - 2 * s, col = "blue")
abline(v = xbar + 2 * s, col = "blue")
abline(v = xbar - 3 * s, col = "green")
abline(v = xbar + 3 * s, col = "green")
if (showpop) curve(dnorm(x,mean=mu,sd=stdev),col="red",add=TRUE,n=1001)
for (i in 1:n) {
if (data[i] < xbar + s & data[i] > xbar -
s) {
tally1 = tally1 + 1
}
if (data[i] < xbar + 2 * s & data[i] > xbar -
2 * s) {
tally2 = tally2 + 1
}
if (data[i] < xbar + 3 * s & data[i] > xbar -
3 * s) {
tally3 = tally3 + 1
}
}
results[, 1] = round(tally1/n, 4)
results[, 2] = round(tally2/n, 4)
results[, 3] = round(tally3/n, 4)
print(results)
}
if (type == "Skewed") {
tally1 = 0
tally2 = 0
tally3 = 0
alpha = 1.5
beta = 5
data = rbeta(n, alpha, beta)
mu = 1/(1 + (beta/alpha))
stdev = sqrt((alpha * beta)/((alpha + beta)^2 *
(alpha + beta + 1)))
xbar = mean(data)
s = sd(data)
breaks.symm = ifelse(n<200,"Sturges","Scott")
hist(data, freq = FALSE, col = "lightblue", breaks=breaks.symm,
xlim = c(mu - 3 * stdev, mu + 5 * stdev), ylim = c(0, 5),
main = "Empirical Rule with Target Skewed")
if (showpop) curve(dbeta(x,shape1=alpha,shape2=beta),
xlim=c(0,1),col="red",add=TRUE,n=1001)
abline(v = xbar - s, col = "red")
abline(v = xbar + s, col = "red")
points(xbar, 0, pch = 20)
abline(v = xbar - 2 * s, col = "blue")
abline(v = xbar + 2 * s, col = "blue")
abline(v = xbar - 3 * s, col = "green")
abline(v = xbar + 3 * s, col = "green")
for (i in 1:n) {
if (data[i] < xbar + s & data[i] > xbar -
s) {
tally1 = tally1 + 1
}
if (data[i] < xbar + 2 * s & data[i] > xbar -
2 * s) {
tally2 = tally2 + 1
}
if (data[i] < xbar + 3 * s & data[i] > xbar -
3 * s) {
tally3 = tally3 + 1
}
}
results[, 1] = round(tally1/n, 4)
results[, 2] = round(tally2/n, 4)
results[, 3] = round(tally3/n, 4)
print(results)
}
if (type == "Super-Skewy") {
tally1 = 0
tally2 = 0
tally3 = 0
p.alpha = 3 #parameter in 2-parameter pareto(alpha,theta) distribution
p.theta <- 100 #parameter in pareto
rpareto <- function(n,alpha,theta) {
theta*((1-runif(n))^(-1/alpha)-1)
}
data = rpareto(n,p.alpha,p.theta)
mean.par = p.theta/(p.alpha-1)
sd.par = mean.par*sqrt(p.alpha/(p.alpha-2))
xbar=mean(data)
s=sd(data)
xmin = mean.par - 3 * sd.par
xmax = mean.par+10*sd.par
ymax = 1.3*p.alpha/p.theta
hist(data, freq = FALSE, col = "lightblue", breaks="FD",
xlim = c(xmin,xmax),
ylim = c(0, ymax),
main = "Empirical Rule with Target Super-Skewy")
if (showpop) curve(dpareto(x,alpha=p.alpha,theta=p.theta),
xlim=c(0,xmax),col="red",add=TRUE,n=1001)
abline(v = xbar - s, col = "red")
abline(v = xbar + s, col = "red")
points(xbar, 0, pch = 20)
abline(v = xbar - 2 * s, col = "blue")
abline(v = xbar + 2 * s, col = "blue")
abline(v = xbar - 3 * s, col = "green")
abline(v = xbar + 3 * s, col = "green")
for (i in 1:n) {
if (data[i] < xbar + s & data[i] > xbar -
s) {
tally1 = tally1 + 1
}
if (data[i] < xbar + 2 * s & data[i] > xbar -
2 * s) {
tally2 = tally2 + 1
}
if (data[i] < xbar + 3 * s & data[i] > xbar -
3 * s) {
tally3 = tally3 + 1
}
}
results[, 1] = round(tally1/n, 4)
results[, 2] = round(tally2/n, 4)
results[, 3] = round(tally3/n, 4)
print(results)
}
})
}
# if(getRversion() >= "2.15.1") utils::globalVariables(c("type","showpop"))
|
0d1db9167b98e0eb07abe07f1bad4a4937f7b4fb
|
bb5cc42782e2751e7df4c1f8330402df8611bf5c
|
/scripts/analyze_discussion.R
|
ee3515b2e7c7b1d154561f438964b0576a29702a
|
[] |
no_license
|
femeunier/LianaRemoval
|
5365ef7d9e050ae74eff6211f014f732910dc01b
|
8a818ab8618e05bc4bf837525f8a18e1c5c385c7
|
refs/heads/main
| 2023-01-28T05:33:26.582573
| 2020-12-08T10:04:56
| 2020-12-08T10:04:56
| 319,260,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,977
|
r
|
analyze_discussion.R
|
rm(list = ls())
library(ggplot2)
library(dplyr)
library(albedo)
library(reshape2)
library(tidyr)
# load("/home/femeunier/Documents/projects/LianaRemoval/outputs/removal.RData")
# control <- datum
# removal <- datum
N = 100
init = 99000011369
directory <- "/data/gent/vo/000/gvo00074/pecan/output/other_runs/removal/out"
year.min = 2011
year.max = 2021
Vars <- c("nep","gpp","npp","het.resp","plant.resp")
all.OP <- data.frame()
for (i in seq(1,N)){
print(i)
local.dir <- file.path(directory,paste0(init+i))
if (file.exists(file.path(local.dir,"control.RData")) & file.exists(file.path(local.dir,"removal.RData"))){
load(file.path(local.dir,"control.RData"))
control <- datum
load(file.path(local.dir,"removal.RData"))
removal <- datum
df_control <- data.frame(time = control$year + (control$month - 1)/12,
run = i,
topsoil.wc = control$emean$soil.water[,15],
subsoil.wc = control$emean$soil.water[,13],
par.gnd = control$emean$par.gnd,
tree.seeds = apply(control$szpft$bseeds[,12,c(2,3,4)],1,sum))
df_removal <- data.frame(time = removal$year + (removal$month - 1)/12,
run = i,
topsoil.wc = removal$emean$soil.water[,15],
subsoil.wc = removal$emean$soil.water[,13],
par.gnd = removal$emean$par.gnd,
tree.seeds = apply(removal$szpft$bseeds[,12,c(2,3,4)],1,sum))
all.OP <- bind_rows(list(all.OP,
df_control %>% mutate(type = "control"),
df_removal %>% mutate(type = "removal")))
}
}
saveRDS(object = all.OP,file = file.path(".","OP.posterior.runs.RDS"))
# scp /home/femeunier/Documents/projects/LianaRemoval/scripts/analyze_discussion.R hpc:/data/gent/vo/000/gvo00074/felicien/R
|
cfc9e82ac958c082e8c80e378e4c83c349e84ab8
|
ea0bc1dfa2a9d499a05eaf0995064c15ba37fa24
|
/man/randomWalkByMatrixInv-matrix-method.Rd
|
5d18409a4fdabab286eb2a6b5a4a230a6090b65f
|
[] |
no_license
|
sqjin/netSmooth
|
7fde4b241294d97f635ea9c8bccc784c5817b811
|
136384b31fe51e56acbb86400b33527489c273b2
|
refs/heads/master
| 2020-11-27T12:28:45.467858
| 2020-01-15T07:22:11
| 2020-01-15T07:22:11
| 229,440,928
| 0
| 0
| null | 2019-12-21T14:43:33
| 2019-12-21T14:43:33
| null |
UTF-8
|
R
| false
| true
| 884
|
rd
|
randomWalkByMatrixInv-matrix-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/randomWalkByMatrixInv.R
\name{randomWalkByMatrixInv,matrix-method}
\alias{randomWalkByMatrixInv,matrix-method}
\title{Smooth data on graph by computing the closed-form steady state
distribution of the random walk with restarts process.}
\usage{
\S4method{randomWalkByMatrixInv}{matrix}(
f0,
adjMatrix,
alpha,
normalizeAdjMatrix = c("rows", "columns")
)
}
\arguments{
\item{f0}{initial data matrix [NxM]}
\item{adjMatrix}{adjacency matrix of graph to network smooth on
will be column-normalized.}
\item{alpha}{smoothing coefficient (1 - restart probability of
random walk)}
}
\value{
network-smoothed gene expression
}
\description{
The closed-form solution is given by
f_{ss} = (1 - alpha) * (I - alpha * A)^{-1} * f_0
and is computed by matrix inversion in this function.
}
\keyword{internal}
|
42d934a148d471dd8706df88ba2359bb125c6d23
|
a7cf5209b264a8879c25ce652c030d9d308601f7
|
/R/text-functions.R
|
198c221bc636debe7d3e6e69b6b4db20bd23170d
|
[] |
no_license
|
cran/scraEP
|
cfb79b4ae2be47a988073b96af70ba93c8623c5c
|
782a58f9b26cba4bfd6ec323cc9f69eebe68f817
|
refs/heads/master
| 2021-07-14T17:08:02.369469
| 2021-06-23T06:00:02
| 2021-06-23T06:00:02
| 110,838,033
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,011
|
r
|
text-functions.R
|
## Function to remove all accents from a character string
unaccent <- function(text) {
text <- gsub("['`^~\"]", " ", text)
text <- iconv(text, to="ASCII//TRANSLIT//IGNORE")
text <- gsub("['`^~\"]", "", text)
return(text)
}
## Compare two string vectors for common elements.
strcomp <- function(text1, text2) {
text1 <- as.character(text1)
text2 <- as.character(text2)
matchOneInTwo <- table(sapply(text1, function(x) sum(text2 == x)))
matchTwoInOne <- table(sapply(text2, function(x) sum(text1 == x)))
tabOneInTwo <- table(text1 %in% text2)
tabTwoInOne <- table(text2 %in% text1)
oneNotInTwo <- text1[! text1 %in% text2]
twoNotInOne <- text2[! text2 %in% text1]
concat <- unique(c(text1, text2))
tabAll <- table(x1= concat %in% text1, x2= concat %in% text2)
list(matchTable = tabAll,
matchOneInTwo = matchOneInTwo, matchTwoInOne = matchTwoInOne,
tabOneInTwo = tabOneInTwo, tabTwoInOne = tabTwoInOne,
oneNotInTwo = oneNotInTwo, twoNotInOne = twoNotInOne)
}
|
b371041c57dfbb103e73685a6fca8de9f104afbf
|
9a1277a635b73c72472ae40442994d6c301ca1b4
|
/docs/articles/nifti_basics.R
|
d7069dbbaf180561244d0335f897121794583911
|
[] |
no_license
|
muschellij2/neurobase
|
eaf8632de4659cd857bb5a864bf3a60f83333a89
|
375101bab5a546bd8c8a092c21190b48b36f9a13
|
refs/heads/master
| 2022-10-25T16:00:24.322516
| 2022-10-23T16:07:05
| 2022-10-23T16:07:05
| 68,750,968
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,484
|
r
|
nifti_basics.R
|
## ----setup, include=FALSE------------------------------------------------
library(neurobase)
library(oro.nifti)
library(methods)
library(ggplot2)
library(httr)
library(reshape2)
knitr::opts_chunk$set(
echo = TRUE, comment = "")
## ---- eval = FALSE-------------------------------------------------------
# packages = installed.packages()
# packages = packages[, "Package"]
# if (!"oro.nifti" %in% packages) {
# install.packages("oro.nifti")
# ### development version
# # devtools::install_github("bjw34032/oro.nifti")
# }
## ----nifti_obj-----------------------------------------------------------
library(oro.nifti)
set.seed(20161007)
dims = rep(10, 3)
arr = array(rnorm(10*10*10), dim = dims)
nim = oro.nifti::nifti(arr)
print(nim)
print(class(nim))
oro.nifti::is.nifti(nim)
## ----nifti_slot----------------------------------------------------------
nim@cal_max
cal_max(nim)
slot(nim, "cal_max")
## ----nifti_data----------------------------------------------------------
data = slot(nim, ".Data")
class(data)
## ----nifti_data2---------------------------------------------------------
data = oro.nifti::img_data(nim)
class(data)
dim(data)
## ----nifti_slice---------------------------------------------------------
slice = data[,,3]
class(slice)
## ----nifti_arr_slice-----------------------------------------------------
slice = data[,,3, drop = FALSE]
class(slice)
## ----slotnames-----------------------------------------------------------
slotNames(nim)
## ----logical-------------------------------------------------------------
above_zero = nim > 0
class(above_zero)
img_data(above_zero)[1]
## ----multi_log-----------------------------------------------------------
class(nim > 0 & nim < 2)
## ------------------------------------------------------------------------
class(nim * 2)
class(nim + (nim / 4))
class(nim * nim)
class(nim^2)
## ----sum-----------------------------------------------------------------
sum(above_zero)
## ----mean----------------------------------------------------------------
mean(above_zero)
## ----summs---------------------------------------------------------------
min(nim)
max(nim)
range(nim)
class(abs(nim))
## ----read_eve, cache = FALSE---------------------------------------------
eve_types = c("T1", "T2", "T1_Brain")
eve_stubs = paste0("JHU_MNI_SS_", eve_types, ".nii.gz")
url = "https://raw.githubusercontent.com/"
paths = paste(c("jfortin1",
"EveTemplate",
"master",
# "raw",
"inst",
"extdata"),
collapse = "/")
paths = paste(paths, eve_stubs, sep = "/")
path = paths[1]
eve_fnames = sapply(paths, function(path) {
tmp = tempfile(fileext = ".nii.gz")
req <- httr::GET(url,
path = path,
httr::write_disk(path = tmp),
httr::progress())
httr::stop_for_status(req)
return(tmp)
})
names(eve_fnames) = eve_types
readEve = function(what = c("T1", "T2", "Brain")) {
what = match.arg(what)
if (what == "Brain") {
what = "T1_Brain"
}
fname = eve_fnames[what]
readnii(fname)
}
## ----eve, cache=FALSE----------------------------------------------------
eve = readEve(what = "Brain")
## ----ortho---------------------------------------------------------------
oro.nifti::orthographic(eve)
## ----ortho2--------------------------------------------------------------
neurobase::ortho2(eve)
## ----ortho2_noorient-----------------------------------------------------
neurobase::ortho2(eve, add.orient = FALSE)
## ----ortho_nona----------------------------------------------------------
orthographic(eve, y = eve > quantile(eve, 0.9))
## ----ortho2_nona---------------------------------------------------------
ortho2(eve, y = eve > quantile(eve, 0.9))
## ----eve2, cache=FALSE---------------------------------------------------
eve2 = eve
eve2[which.max(eve)] = eve2[which.max(eve)] * 5
## ----ortho2_large--------------------------------------------------------
ortho2(eve2)
## ----ortho2_rob----------------------------------------------------------
ortho2(robust_window(eve2))
## ----ortho2_zlim---------------------------------------------------------
ortho2(eve2, zlim = quantile(eve2, probs = c(0, 0.999)))
## ----eve_full, cache = FALSE---------------------------------------------
eve_full = readEve(what = "T1")
## ----double_ortho--------------------------------------------------------
double_ortho(eve_full, eve)
|
0b92c07c5d790ee6acd018c4bc9d0243df72e83a
|
416b6b1b4678cbe68302af528fe95c366d9e23df
|
/man/dot_plot_profiles_fun.Rd
|
f042d0449f8c6a0d0f052af2c0cf26cff580e02e
|
[
"MIT"
] |
permissive
|
GenomicsNX/SPOTlight
|
c9a9e7171a0891d5b686c50067c4553a553a91d5
|
1f364a965ab275ac54a711e28c67554db25d547e
|
refs/heads/master
| 2023-05-01T13:12:05.810905
| 2021-05-26T09:02:57
| 2021-05-26T09:02:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 938
|
rd
|
dot_plot_profiles_fun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dot_plot_profiles_fun.R
\name{dot_plot_profiles_fun}
\alias{dot_plot_profiles_fun}
\title{This function takes in the H coefficient matrix object from and NMF object and returns plots to visualize the topic profiles between and within cell types}
\usage{
dot_plot_profiles_fun(h, train_cell_clust)
}
\arguments{
\item{h}{Object of class matrix; H coeficient matrix from NMF model.}
\item{train_cell_clust}{Object of class vector with cluster of the cells used to train the model.}
}
\value{
This function returns a list where the first element is a plot with the topic profiles of all the cell types and the 2nd element is a plot with the consensus topic profile per spot.
}
\description{
This function takes in the H coefficient matrix object from and NMF object and returns plots to visualize the topic profiles between and within cell types
}
\examples{
}
|
48acf50e89c86930c7405bf8dd75dac080f441d3
|
10a489c8b3d0b174a54c0aa9ae1452ff2cd36a0a
|
/paperFigs.r
|
15a0225159da79381d675116d795f74cc67a7afd
|
[] |
no_license
|
jfelectron/Statiscal-analysis-of-large-biologial-datasets
|
2d9832eb7bbd330d25917d335bb999ee6a79a3d6
|
3f6f28d28742460b3637eafce1c2ec32338a5915
|
refs/heads/master
| 2020-12-24T18:50:36.480693
| 2013-03-09T23:44:24
| 2013-03-09T23:44:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,901
|
r
|
paperFigs.r
|
library(ggplot2)
library(scales)
library(flowCore)
library(gridExtra)
library(reshape)
library(plyr)
library(grImport)
library(Hmisc)
library(corrplot)
source("/Users/jonathan/Documents/flowset2ggplot.r")
# generates Figure 1
#Figure 1a Clonal Workflow
color<-TRUE
if(color) {
PostScriptTrace("Clonal_workflow_color.ps")
workflow<-readPicture("Clonal_workflow_color.ps.xml")
}else {
PostScriptTrace("Clonal_workflow_grayscale.ps")
workflow<-readPicture("Clonal_workflow_grayscale.ps.xml")
}
F1a<-pictureGrob(workflow);
#Figure 1b Bulk Gating
LGbulk_GFP<-read.table("/Users/jonathan/Documents/JF_SD_paperFigs/Data/LGbulk.txt")
NJ_GFP<-read.table("/Users/jonathan/Documents/JF_SD_paperFigs/Data/NJ.txt")
LGBulk<-cbind(name=rep("LGM2 Polyclonal",length(LGbulk_GFP)),rename(LGbulk_GFP,c(V1="GFP")))
NJ<-cbind(name=rep("Autofluorescence",length(NJ_GFP)),rename(NJ_GFP,c(V1="GFP")))
#Shifts RFU top 10^0-10^4 rather than 10^-1-10^3, doesn't really matter
#Bulk_Auto<-as.data.frame(rbind(LGBulk,NJ))
#Bulk_Auto$GFP<-10^(log10(Bulk_Auto$GFP+1));
F1b<-ggplot(Bulk_Auto,aes(x=GFP,fill=name))+geom_density(alpha=0.4)
if(color) F1b<-F1b+scale_fill_manual(values=c("green","grey")) else F1b<-F1b+scale_fill_grey()
F1b<-F1b+scale_x_log10(name="GFP RFU",breaks=trans_breaks("log10",function(x) 10^x),labels=trans_format("log10",math_format(10^.x)))
F1b<-F1b+theme_bw(16)+theme(legend.position="bottom")+guides(fill=guide_legend(title=NULL))
F1b<-F1b+theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank())
gateMin<-2
gateMax<-95
gateVert<-0.9
F1b<-F1b+geom_segment(aes(x=gateMin,y=gateVert,xend=gateMax,yend=gateVert))+geom_segment(aes(x=gateMin-0.05,xend=gateMin-0.05,y=gateVert-0.1,yend=gateVert+0.1))+geom_segment(aes(x=gateMax+0.05,xend=gateMax+0.05,y=gateVert-0.1,yend=gateVert+0.1))
F1b<-F1b+annotate("text", x = 12.5, y = 1, label = "Unbiased Gate")
#Figure 1C sorted clones
rfuTransform<-function(transformId,r=256,min=0){
t=new("transform",.Data=function(x) 10^((x/r)+min))
t@transformationId = transformId
t
}
channelTrans<-rfuTransform("256 per decade",min=-1) #trasform channel numbers to 10^-1-10^3
plates<-list.dirs("/Users/jonathan/Documents/JF_SD_paperFigs/Data/LGM2_allClones_10kreads/")
LGM2.Clones<-read.flowSet(path=plates[[2]],dataset=2,transformation=FALSE)
for (i in 3:length(plates)) {
LGM2.Clones<-rbind2(LGM2.Clones,read.flowSet(path=plates[[i]],dataset=2,transformation=FALSE))
}
colnames(LGM2.Clones)[1:3]<-c("FSC","SSC","GFP")
LGM2.Clones.gated<-Subset(LGM2.Clones,norm2Filter("SSC","FSC"))
LGM2.Clones.filtered=Subset(LGM2.Clones.gated,boundaryFilter(x=c("GFP"),side="both"))
LGM2.Clones.RFU<-transform(LGM2.Clones.filtered,`GFP`=channelTrans(`GFP`))
LGM2.Clones.df<-flowset2ggplot(LGM2.Clones.RFU[,c(1:3)])
cloneMeans<-rename(ddply(LGM2.Clones.df, c("name"), function(df)mean(df$GFP)),c(V1="GFP"))
cloneMeans<-cloneMeans[with(cloneMeans, order(GFP)), ]
desired_order<-cloneMeans$name
LGM2.Clones.df$name <- factor( as.character(LGM2.Clones.df$name), levels=desired_order )
LGM2.Clones.df <- LGM2.Clones.df[order(LGM2.Clones.df$name),]
F1c<-ggplot(LGM2.Clones.df,aes(y=name,x=GFP))+ stat_density(aes(fill=..density..), geom="tile", position="identity")
F1c<-F1c+scale_x_log10(name="GFP RFU",breaks=trans_breaks("log10",function(x) 10^x),labels=trans_format("log10",math_format(10^.x)))
F1c<-F1c
F1c<-F1c+theme_bw(18)+theme(axis.text.y = element_blank())+scale_y_discrete(breaks=NULL)+
ylab("227 LGM2 Clones")+theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank())
pdf("Figure1.pdf",width=10,height=10,colormodel="cmyk")
grid.arrange(F1a,arrangeGrob(F1b,F1c,ncol=2),ncol=1)
dev.off()
#generate Figure 2
fano<-function(x){
return(var(x)/mean(x))
}
cv<-function(x){
return(sqrt(var(x))/mean(x))
}
LGM2.Stats<-read.table("/Users/jonathan/Documents/JF_SD_paperFigs/Data/LGM2_NSA_withErrors_11202012.csv",sep=",",header=TRUE)
FISHnames<-as.matrix(LGM2.Stats$Clone)
FISHnames<-apply(FISHnames,1,function(x){strsplit(as.character(x),"_")[[1]][[2]]})
LGM2.Stats$Clone<-FISHnames
LGM2.SelectedClones.df<-subset(LGM2.Clones.df,name %in% FISHnames)
cloneMoments.all<-rename(ddply(LGM2.Clones.df, c("name"), function(df)c(mean(df$GFP),var(df$GFP),cv(df$GFP))),c(V1="Mean",V2="Var",V3="CV"))
cloneMoments.FISH<-rename(ddply(LGM2.SelectedClones.df, c("name"), function(df)c(mean(df$GFP),var(df$GFP),cv(df$GFP))),c(V1="Mean",V2="Var",V3="CV"))
pointSize<-1.3
poissonVar<-function(x){x}
scalingVarAll<-function(x){2*x-0.92}
scalingVarSubset<-function(x){2*x-0.73}
telegraphScaling<-function(x){sqrt(x)}
telegraphMean<-apply(as.matrix(cloneMoments.all$Var),1,telegraphScaling)
cloneMoments.all<-cbind(cloneMoments.all,telegraphMean)
F2a<-ggplot(cloneMoments.all,aes(x=log10(Mean),y=log10(Var)))+geom_point(size=pointSize)+geom_smooth(method=lm)+ylab("Log10(GFP Variance)")+xlab("Log10(<GFP>)")+stat_function(fun=poissonVar,colour="black",linetype="dashed")+stat_function(fun=scalingVarAll,colour="green",linetype="dashed")+theme_grey(14)
F2b<-ggplot(cloneMoments.all,aes(x=log10(Mean),y=log10(CV)))+geom_point(size=pointSize)+geom_smooth(method=lm)+ylab("Log10(GFP CV)")+xlab("Log10(<GFP>)")+theme_grey(14)
F2c<-ggplot(cloneMoments.FISH,aes(x=log10(Mean),y=log10(Var)))+geom_point(size=pointSize)+geom_smooth(method=lm)+ylab("Log10(GFP Variance)")+xlab("Log10(<GFP>)")+stat_function(fun=poissonVar,colour="black",linetype="dashed")+stat_function(fun=scalingVarSubset,colour="green",linetype="dashed")+theme_grey(14)
F2d<-ggplot(cloneMoments.FISH,aes(x=log10(Mean),y=log10(CV)))+geom_point(size=pointSize)+geom_smooth(method=lm)+ylab("Log10(GFP CV)")+xlab("Log10(<GFP>)")+theme_grey(14)
pdf("Figure2_allClones.pdf",width=10,height=10,colormodel="cmyk")
grid.arrange(F2a,F2b,F2c,F2d,ncol=2,nrow=2)
dev.off();
#Figure 3 subpanel
BC6data<-read.csv("/Users/jonathan/Documents/JF_SD_paperFigs/Data/LGM2_GFP_FISH_csv/BC6.csv")
scatter<-ggplot(BC6data,aes(x=RNA,y=GFP))+geom_point()
RNAhist<-ggplot(BC6data)+geom_histogram(aes(RNA,colour=red))
pdf("Figure3x.pdf",width=10,height=10,colormodel="cmyk")
grid.arrange(RNAhist,scatter,nrow=2)
dev.off()
#Figure 4
cloneMoments.FISH$name<-as.character(droplevels(cloneMoments.FISH)$name)
cloneMoments.FISH<-arrange(cloneMoments.FISH,name)
LGM2.Stats<-arrange(LGM2.Stats,Clone)
F4b<-ggplot(LGM2.Stats,aes(x=log10(RNA.Mean),y=log10(RNA.Var)))+geom_point()+geom_smooth(method=lm)+ylab("Log10(RNA Variance)")+xlab("Log10(<RNA>)")+theme_grey(16)+geom_errorbarh(aes(xmin=log10(RNA.Mean.Min),xmax=log10(RNA.Mean.Max),height=0.02))+geom_errorbar(aes(ymin=log10(RNA.Var.Min),ymax=log10(RNA.Var.Max),width=0.02))
F4c<-ggplot(LGM2.Stats,aes(x=log10(RNA.Mean),y=log10(RNA.CV)))+geom_point()+geom_smooth(method=lm)+ylab("Log10(RNA CV)")+xlab("Log10(<RNA>)")+theme_grey(16)+geom_errorbarh(aes(xmin=log10(RNA.Mean.Min),xmax=log10(RNA.Mean.Max),width=0.02))+geom_errorbarh(aes(xmin=log10(RNA.Mean.Min),xmax=log10(RNA.Mean.Max),height=0.02))+geom_errorbar(aes(ymin=log10(RNA.CV.Min),ymax=log10(RNA.CV.Max)), height=0.02)
F4d<-ggplot(LGM2.Stats,aes(x=log10(RNA.Mean),y=log10(GFP.Mean)))+geom_point()+geom_smooth(method=lm)+ylab("Log10(<GFP>)")+xlab("Log10(<RNA>)")+theme_grey(16)+geom_errorbarh(aes(xmin=log10(RNA.Mean.Min),xmax=log10(RNA.Mean.Max),height=0.02))
F4e<-ggplot(LGM2.Stats,aes(x=log10(RNA.Var),y=log10(GFP.Var)))+geom_point()+geom_smooth(method=lm)+ylab("Log10(GFP Variance)")+xlab("Log10(RNA Variance)")+theme_grey(16)+geom_errorbarh(aes(xmin=log10(RNA.Var.Min),xmax=log10(RNA.Var.Max),height=0.02))
pdf("Figure4bcde.pdf",width=10,height=10,colormodel="cmyk")
grid.arrange(F4b,F4c,F4d,F4e,nrow=2,ncol=2)
dev.off();
LGM2.Stats.outliersRemoved<-subset(LGM2.Stats,!Clone %in% c("ID3","CA2"))
LGM2.Stats.noID3<-subset(LGM2.Stats,!Clone %in% c("ID3"))
LGM2.Stats.noCA2<-subset(LGM2.Stats,!Clone %in% c("CA2"))
LGM2.Stats.outliersRemovedB<-subset(LGM2.Stats,!Clone %in% c("BC6","CA2"))
##Figure 5
ktd<-0.34
ktdMin<-0.26
ktdMax<-0.43
F5a<-ggplot(LGM2.Stats.noID3,aes(x=B,y=RNA.Mean))+geom_point()+geom_smooth(method=lm)+ylab("<RNA>")+xlab("Burst Size")+theme_grey(18)+geom_errorbar(aes(ymin=RNA.Mean.Min,ymax=RNA.Mean.Max,width=0.5))+geom_errorbarh(aes(xmin=B.min,xmax=B.max,height=0.5))
F5b<-ggplot(LGM2.Stats.noID3,aes(y=RNA.Mean,x=ka/ktd))+geom_point()+geom_smooth(method=lm)+ylab("<RNA>")+xlab("Normalized On rate")+theme_grey(18)+geom_errorbar(aes(ymin=RNA.Mean.Min,ymax=RNA.Mean.Max,width=0.05))+geom_errorbarh(aes(xmin=ka.Min/ktd,xmax=ka.Max/ktd,height=0.5))
F5c<-ggplot(LGM2.Stats.noID3,aes(y=RNA.CV,x=B))+geom_point()+geom_smooth(method=lm)+ylab("RNA CV")+xlab("Burst Size")+theme_grey(18)+geom_errorbar(aes(ymin=RNA.CV.Min,ymax=RNA.CV.Max,width=0.5))+geom_errorbarh(aes(xmin=B.min,xmax=B.max,height=0.02))
F5d<-ggplot(LGM2.Stats.noID3,aes(y=RNA.CV,x=ka/ktd))+geom_point()+geom_smooth(method=lm)+ylab("RNA CV")+xlab("Normalized On Rate")+theme_grey(18)+geom_errorbar(aes(ymin=RNA.CV.Min,ymax=RNA.CV.Max,width=0.2))+geom_errorbarh(aes(xmin=ka.Min/ktd,xmax=ka.Max/ktd,height=0.02))
pdf("Figure5.pdf",width=10,height=10,colormodel="cmyk")
grid.arrange(F5a,F5b,F5c,F5d,ncol=2,nrow=2)
dev.off();
##Figure 7
F7a<-ggplot(LGM2.Stats.noCA2,aes(x=Nuc1,y=ka/ktd))+geom_point()+geom_smooth(method=lm)+xlab("Nuc-1 Chromatin Inaccessibility")+ylab("Normalized On Rate")+theme_grey(18)+geom_errorbarh(aes(xmin=Nuc1-Nuc1.Error,xmax=Nuc1+Nuc1.Error,height=0.05))+geom_errorbar(aes(ymin=ka.Min/ktd,ymax=ka.Max/ktd,width=0.02))
F7b<-ggplot(LGM2.Stats.NSA,aes(x=log(HSS),y=kr/ktd))+geom_point()+geom_smooth(method=lm)+xlab("Log(HSS Chromatin Inaccessibility)")+ylab("Normalized Off Rate")+theme_grey(16)+ylim(-10,40)+labs(title="Exponential Fit")
F7c<-ggplot(LGM2.Stats.NSA,aes(x=log(HSS),y=kr/ktd))+geom_point()+geom_smooth()+xlab("Log(HSS Chromatin Inaccessibility)")+ylab("Normalized Off Rate")+theme_grey(16)+ylim(-10,40)+labs(title="LOESS")
pdf("Figure7.pdf",width=6,height=3,colormodel="cmyk")
grid.arrange(F7a,F7b,F7c,nrow=1)
dev.off()
#Supp Figure- FSC vs. GFP
LGM2.Clones.Channels.df<-flowset2ggplot(LGM2.Clones.filtered[,c(1:3)])
LGM2.FSCvGFP<-ddply(LGM2.Clones.Channels.df, c("name"), function(df)(c(Pearson.r=rcorr(df$GFP,df$FSC,type=("pearson"))$r[[1,2]],Spearman.r=rcorr(df$GFP,df$FSC,type=("spearman"))$r[[1,2]],Pearson.p=rcorr(df$GFP,df$FSC,type=("pearson"))$P[[1,2]],Spearman.p=rcorr(df$GFP,df$FSC,type=("pearson"))$P[[1,2]],r.squared=summary(lm(df$GFP~df$FSC))$r.squared)))
FSCGFP_pearsonR2r<-ggplot(LGM2.FSCvGFP,aes(x=Pearson.r,y=r.squared))+geom_point(aes(colour=name))+geom_rug()+guides(colour=FALSE)
FSCGFP_pearsonR2p<-ggplot(LGM2.FSCvGFP,aes(x=Pearson.p,y=r.squared))+geom_point(aes(colour=name))+geom_rug()+guides(colour=FALSE)
FSCGFP_spearmanR2r<-ggplot(LGM2.FSCvGFP,aes(x=Spearman.r,y=r.squared))+geom_point(aes(colour=name))+geom_rug()+guides(colour=FALSE)
FSCGFP_spearmanR2p<-ggplot(LGM2.FSCvGFP,aes(x=Spearman.p,y=r.squared))+geom_point(aes(colour=name))+geom_rug()+guides(colour=FALSE)
pdf("AllClones_FSCvGFP_uncorrelated.pdf",width=10,height=10,colormodel="cmyk")
grid.arrange(FSCGFP_pearsonR2r,FSCGFP_spearmanR2r,FSCGFP_pearsonR2p,FSCGFP_spearmanR2p, nrow=2,ncol=2)
dev.off()
pdf("SelectedClones_FSCvGFP.pdf",width=10,height=10,colormodel="cmyk")
ggplot(LGM2.SelectedClones.df,aes(y=log10(GFP),x=log10(FSC)))+geom_point(shape=1)+facet_wrap(~name,ncol=4)+geom_smooth(method=lm,aes(colour="red"))
dev.off()
pdf("FISHclones_gating.pdf",width=10,height=10,colormodel="cmyk")
print(xyplot(`SSC` ~ `FSC`,LGM2.Clones[which(sampleNames(LGM2.Clones) %in% FISHnames)],filter=norm2Filter("SSC","FSC")))
dev.off()
#Supp Figure-RNA Degradation
RNAdeg<-read.csv("/Users/jonathan/Documents/JF_SD_paperFigs/Data/RNA_degradation.csv")
dfc <- summarySE(RNAdeg, measurevar="GFP", groupvars=c("Time","Condition"))
LGM2degmodel<-lm(log(RNAdeg[16:24,]$GFP)~RNAdeg[16:24,]$Time)
BactModel<-lm(log(RNAdeg[25:36,]$GFP)~RNAdeg[25:36,]$Time)
LGM2cntrlModel<-lm(log(RNAdeg[1:12,]$GFP)~RNAdeg[1:12,]$Time)
BActcntrlModel<-lm(log(RNAdeg[37:51,]$GFP)~RNAdeg[37:51,]$Time)
LGM2deg<- ggplot(RNAdeg[1:24,], aes(x=Time, y=log(GFP), colour=Condition))+geom_point()+stat_smooth(data=RNAdeg[16:24,],method=lm)+stat_smooth(data=RNAdeg[1:12,],method=lm)+theme_grey(16)+theme(legend.position="bottom")+ylab("ln(LGM2 RNA) Arbitrary Units")
BActdeg<- ggplot(RNAdeg[25:51,], aes(x=Time, y=log(GFP), colour=Condition))+geom_point()+stat_smooth(data=RNAdeg[25:36,],method=lm)+stat_smooth(data=RNAdeg[37:51,],method=lm)+theme_grey(16)+theme(legend.position="bottom")+ylab("ln(B-Actin RNA) Arbitrary Units")
pdf("RNA_degradation.pdf",width=10,height=10,colormodel="cmyk")
grid.arrange(LGM2deg,BActdeg,ncol=1)
dev.off()
#Supp Figure-Spearman Matrix
LGM2.Stats.reduced<-data.frame(LGM2.Stats.outliersRemoved$RNA.Mean,LGM2.Stats.outliersRemoved$RNA.Var,LGM2.Stats.outliersRemoved$RNA.CV,LGM2.Stats.outliersRemoved$GFP.Mean,LGM2.Stats.outliersRemoved$GFP.Var,LGM2.Stats.outliersRemoved$GFP.CV,LGM2.Stats.outliersRemoved$ka,LGM2.Stats.outliersRemoved$B,LGM2.Stats.outliersRemoved$Nuc0,LGM2.Stats.outliersRemoved$HSS,LGM2.Stats.outliersRemoved$Nuc1)
colnames(LGM2.Stats.reduced)<-c("RNA.Mean","RNA.Var","RNA.CV","GFP.Mean","GFP.Var","GFP.CV","ka","B","Nuc0","HSS","Nuc1")
pdf("Spearman_visualization_p025.pdf",width=10,height=10,colormodel="cmyk")
corrplot.mixed(rcorr(as.matrix(LGM2.Stats.reduced),type=c("spearman"))$r,lower=c("number"),upper=c("circle"),p.mat=rcorr(as.matrix(LGM2.Stats.reduced),type=c("spearman"))$P,tl.pos=c("lt"),sig.level=0.025,insig=c("pch"),diag=c("n"))
dev.off()
#supp Figure Cell Size
files<-list.files(path="../Data/CellSize/")
cellSizes<-read.csv(paste0("../Data/CellSize/",files[1]))
cellSizes<-cbind(Clone=as.matrix(rep(strsplit(files[1],".csv")[1]),dim(cellSizes)[1]),cellSizes)
for (i in 2:length(files)){
cellSize<-read.csv(paste0("../Data/CellSize/",files[i]))
cellSizes<-rbind(cbind(Clone=as.matrix(rep(strsplit(files[i],".csv")[1]),dim(cellSize)[1]),cellSize),cellSizes)
}
cellSizes$Clone<-as.character(cellSize$Clone)
CA2<-subset(cellSizes,Clone %in% c("CA2"))
IC4<-subset(cellSizes,Clone %in% c("IC4"))
IB4<-subset(cellSizes,Clone %in% c("IB4"))
AD1<-subset(cellSizes,Clone %in% c("AD1"))
EC5<-subset(cellSizes,Clone %in% c("EC5"))
CA2plot<-ggplot(CA2,aes(x=CellSize))+geom_histogram(aes(y=..density..))+stat_function(fun=dnorm, args=list(mean=mean(CA2$CellSize),sd=sd(CA2$CellSize)),aes(colour="red"))
AD1plot<-ggplot(AD1,aes(x=CellSize))+geom_histogram(aes(y=..density..))+stat_function(fun=dnorm, args=list(mean=mean(AD1$CellSize),sd=sd(AD1$CellSize)),aes(colour="red"))
IC4plot<-ggplot(IC4,aes(x=CellSize))+geom_histogram(aes(y=..density..))+stat_function(fun=dnorm, args=list(mean=mean(IC4$CellSize),sd=sd(IC4$CellSize)),aes(colour="red"))
IB4plot<-ggplot(IB4,aes(x=CellSize))+geom_histogram(aes(y=..density..))+stat_function(fun=dnorm, args=list(mean=mean(IB4$CellSize),sd=sd(IB4$CellSize)),aes(colour="red"))
EC5plot<-ggplot(EC5,aes(x=CellSize))+geom_histogram(aes(y=..density..))+stat_function(fun=dnorm, args=list(mean=mean(EC5$CellSize),sd=sd(EC5$CellSize)),aes(colour="red"))
cellSize.boxplot<-ggplot(cellSizes,aes(factor(Clone),CellSize))+geom_boxplot()
pdf("CellSize_wNorm.pdf",width=10,height=10,colormodel="cmyk")
grid.arrange(cellSize.boxplot,arrangeGrob(AD1plot,CA2plot,IC4plot,EC5plot,nrow=2,ncol=2),nrow=2)
dev.off()
#Supp Figure FISH intensity
FISHIntensity<-rename(read.csv("/Users/jonathan/Documents/JF_SD_paperFigs/Data/FISHIntensity.csv",header=FALSE),c(V1="Intensity"))
pdf("FISH_IntensitywNorm.pdf",width=5,height=5,colormodel="cmyk")
ggplot(FISHIntensity,aes(x=Intensity))+geom_histogram(aes(y=..density..),binwidth=3)+stat_function(fun=dnorm, args=list(mean=mean(FISHIntensity$Intensity),sd=sd(FISHIntensity$Intensity)),aes(colour="red"))+theme(legend.position="none")+xlab("Intensity Arbitrary Units")
dev.off()
#Supp Figure ka v. B
pdf("ka_B_regression.pdf",width=5,height=5,colormodel="cmyk")
ggplot(LGM2.Stats.noID3,aes(y=B,x=ka/ktd))+geom_point()+geom_smooth(method=lm)+ylab("Burst Size")+xlab("Normalized On rate")+theme_grey(16)+geom_errorbar(aes(ymin=B.min,ymax=B.max,width=0.2))+geom_errorbarh(aes(xmin=ka.Min/ktd,xmax=ka.Max/ktd,height=0.2))
dev.off()
#Supp Figure Nuc0,HS vs. ka
Nuc0ka<-ggplot(LGM2.Stats.noCA2,aes(x=Nuc0,y=ka/ktd))+geom_point()+geom_smooth(method=lm)+xlab("Nuc0 Chromatin Inaccessibility")+ylab("Normalized On Rate")+theme_grey(16)+geom_errorbarh(aes(xmin=Nuc0-Nuc0.Error,xmax=Nuc0+Nuc0.Error,height=0.05))+geom_errorbar(aes(ymin=ka.Min/ktd,ymax=ka.Max/ktd,width=0.02))
HSSka<-ggplot(LGM2.Stats.noCA2,aes(x=HSS,y=ka/ktd))+geom_point()+geom_smooth(method=lm)+xlab("HSS Chromatin Inaccessibility")+ylab("Normalized On Rate")+theme_grey(16)+geom_errorbarh(aes(xmin=HSS-HSS.Error,xmax=HSS+HSS.Error,height=0.05))+geom_errorbar(aes(ymin=ka.Min/ktd,ymax=ka.Max/ktd,width=0.02))
pdf("Nuc0HSS_ka.pdf",width=8,height=4,colormodel="cmyk")
grid.arrange(Nuc0ka,HSSka,ncol=2)
dev.off()
#Regression Models...this should be refactored by creating a mixed data frame and then feeding vector of regression pairs to
#some function that generates all this in a nice compact fashion.... this is a copy paste hell
MuVarModel<-lm(log10(cloneMoments.all$Var)~log10(cloneMoments.all$Mean))
MuVarTgraphModel<-lm(log10(cloneMoments.all$Var)~log10(cloneMoments.all$telegraphMean))
MuVarCorr<-rcorr(log10(cloneMoments.all$Var),log10(cloneMoments.all$Mean),type=c("spearman"))
MuCVModel<-lm(log10(cloneMoments.all$CV)~log10(cloneMoments.all$Mean))
MuCVCorr<-rcorr(log10(cloneMoments.all$CV),log10(cloneMoments.all$Mean),type=c("spearman"))
SubsetMuVarModel<-lm(log10(cloneMoments.FISH$Var)~log10(cloneMoments.FISH$Mean))
SubsetMuVarCorr<-rcorr(log10(cloneMoments.FISH$Var),log10(cloneMoments.FISH$Mean),type=c("spearman"))
SubsetMuCVModel<-lm(log10(cloneMoments.FISH$CV)~log10(cloneMoments.FISH$Mean))
SubsetMuCVCorr<-rcorr(log10(cloneMoments.FISH$CV),log10(cloneMoments.FISH$Mean),type=c("spearman"))
RNAMuCVModel<-lm(log10(LGM2.Stats$RNA.CV)~log10(LGM2.Stats$RNA.Mean))
RNAMuCVCorr<-rcorr(log10(LGM2.Stats$RNA.CV),log10(LGM2.Stats$RNA.Mean),type=c("spearman"))
RNAMuVarModel<-lm(log10(LGM2.Stats$RNA.Var)~log10(LGM2.Stats$RNA.Mean))
RNAMuVarCorr<-rcorr(log10(LGM2.Stats$RNA.Var),log10(LGM2.Stats$RNA.Mean),type=c("spearman"))
RNAMuGFPMuModel<-lm(log10(LGM2.Stats$GFP.Mean)~log10(LGM2.Stats$RNA.Mean))
RNAMuGFPMuCorr<-rcorr(log10(LGM2.Stats$GFP.Mean),log10(LGM2.Stats$RNA.Mean),type=c("spearman"))
RNAVarGFPVarModel<-lm(log10(LGM2.Stats$GFP.Var)~log10(LGM2.Stats$RNA.Var))
RNAVarGFPVarCorr<-rcorr(log10(LGM2.Stats$GFP.Var),log10(LGM2.Stats$RNA.Var),type=c("spearman"))
MuBModel<-lm(LGM2.Stats.noID3$RNA.Mean~LGM2.Stats.noID3$B)
MuBCorr<-rcorr(LGM2.Stats.noID3$RNA.Mean,LGM2.Stats.noID3$B,type=c("spearman"))
MukaModel<-lm(LGM2.Stats.noID3$RNA.Mean~LGM2.Stats.noID3$ka)
MukaCorr<-rcorr(LGM2.Stats.noID3$RNA.Mean,LGM2.Stats.noID3$ka,type=c("spearman"))
CVBModel<-lm(LGM2.Stats.noID3$RNA.CV~LGM2.Stats.noID3$B)
CVBCorr<-rcorr(LGM2.Stats.noID3$RNA.CV,LGM2.Stats.noID3$B,type=c("spearman"))
CVkaModel<-lm(LGM2.Stats.noID3$RNA.CV~LGM2.Stats.noID3$ka)
CVkaCorr<-rcorr(LGM2.Stats.noID3$RNA.CV,LGM2.Stats.noID3$ka,type=c("spearman"))
Nuc1kaModel<-lm(LGM2.Stats.noCA2$ka~LGM2.Stats.noCA2$Nuc1)
Nuc1kaCorr<-rcorr(LGM2.Stats.noCA2$Nuc1,LGM2.Stats.noCA2$ka,type=c("spearman"))
Nuc1CVModel<-lm(LGM2.Stats.noCA2$RNA.CV~LGM2.Stats.noCA2$Nuc1)
Nuc1CVCorr<-rcorr(LGM2.Stats.noCA2$RNA.CV,LGM2.Stats.noCA2$Nuc1,type=c("spearman"))
HSSkaModel<-lm(LGM2.Stats.noCA2$ka~LGM2.Stats.noCA2$HSS)
HSSkaCorr<-rcorr(LGM2.Stats.noCA2$HSS,LGM2.Stats.noCA2$ka,type=c("spearman"))
Nuc0kaModel<-lm(LGM2.Stats.noCA2$ka~LGM2.Stats.noCA2$Nuc0)
Nuc0kaCorr<-rcorr(LGM2.Stats.noCA2$Nuc0,LGM2.Stats.noCA2$ka,type=c("spearman"))
kaBModel<-lm(LGM2.Stats.noID3$B~LGM2.Stats.noID3$ka)
kaBCorr<-rcorr(LGM2.Stats.noID3$B,LGM2.Stats.noID3$ka,type=c("spearman"))
models<-list(MuVarModel,MuCVModel,SubsetMuVarModel,SubsetMuCVModel,RNAMuCVModel,RNAMuVarModel,RNAMuGFPMuModel,RNAVarGFPVarModel,MuBModel,MukaModel,CVBModel,CVkaModel,Nuc1kaModel,Nuc1CVModel,kaBModel)
corrs<-list(MuVarCorr,MuCVCorr,SubsetMuVarCorr,SubsetMuCVCorr,RNAMuCVCorr,RNAMuVarCorr,RNAMuGFPMuCorr,RNAVarGFPVarCorr,MuBCorr,MukaCorr,CVBCorr,CVkaCorr,Nuc1kaCorr,Nuc1CVCorr,kaBCorr)
modelStats<-lapply(models,function(x){data.frame(explanatory=as.character(x$call$formula)[[3]],response=as.character(x$call$formula)[[2]],intercept=x$coefficients[[1]],slope=x$coefficients[[2]],slope95CI=x$coefficients[[2]]-confint(x)[[2,1]],r.squared=summary(x)$r.squared,regression.p=summary(x)$coefficients[[2,4]])})
corrStats<-lapply(corrs,function(x){data.frame(Spearman.r=x$r[[1,2]],corelation.p=x$P[[1,2]])})
corrStats.df=corrStats[[1]];
modelStats.df=cbind(modelStats[[1]],corrStats[[1]])
for(i in 2:length(modelStats)){
modelStats.df=rbind(modelStats.df,cbind(modelStats[[i]],corrStats[[i]]))
}
|
78e6320138ec1ed3f27dd26ef95968ff58255574
|
7cdefcb3fcc0fde97aa607043ffd55bab8c3d21d
|
/project_code.R
|
21ebff320b3bf55ebf4137e778affdc632d707f8
|
[] |
no_license
|
rahulb99/covid19
|
f3d3f8f2fe92ad556db43edfa7e4ae8a8dbb1183
|
e555667c21f5f8862869ec68f589be101764de11
|
refs/heads/master
| 2022-12-24T17:55:28.206450
| 2022-12-11T09:22:31
| 2022-12-11T09:22:31
| 261,263,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,269
|
r
|
project_code.R
|
install.packages("devtools")
devtools::install_github("covid19r/coronavirus") # coronavirus dataset - updates every 24 hours
library(coronavirus)
data("coronavirus")
View(coronavirus)
coronavirus = subset(coronavirus, select = -c(Province.State)) # removes the Province.State column from the data
#EDA
library(tidyr)
summary_df <- coronavirus %>%
select(country = Country.Region, type, cases) %>%
group_by(country, type) %>%
summarise(total_cases = sum(cases)) %>%
pivot_wider(names_from = type,
values_from = total_cases) %>%
arrange(-confirmed)
summary_df %>% head(10)
coronavirus$cases <- abs(coronavirus$cases) # remove negative cases
coronavirus.cases <- dplyr::filter(coronavirus, grepl("confirmed", type)) # focus on confirmed cases for now
coronavirus.cases = subset(coronavirus.cases, select = -c(type))
## cleaned coronavirus confirmed cases dataset - coronavirus.cases
coronavirus.deaths <- dplyr::filter(coronavirus, grepl("death", type))
coronavirus.deaths = subset(coronavirus.cases, select = -c(type))
coronavirus.recovered <- dplyr::filter(coronavirus, grepl("recovered", type))
coronavirus.recovered = subset(coronavirus.recovered, select = -c(type))
# EDA
library(dplyr)
summary_confirmed <- coronavirus.cases %>% group_by(`Country.Region`) %>%
summarise(cases = sum(cases)) %>%
arrange(-cases)
summary_deaths <- coronavirus.deaths %>% group_by(`Country.Region`) %>%
summarise(cases = sum(cases)) %>%
arrange(-cases)
summary_recovered <- coronavirus.recovered %>% group_by(`Country.Region`) %>%
summarise(cases = sum(cases)) %>%
arrange(-cases)
counts <- c(sum(summary_confirmed$cases), sum(summary_recovered$cases),sum(summary_recovered$cases))
barplot(counts, main="Total counts", names.arg = c("confirmed","deaths", "recovered"))
library(dplyr)
summary_df <- coronavirus.cases %>% group_by(Country.Region, date) %>%
summarise(total_cases = sum(cases)) %>%
arrange(-total_cases)
summary_df %>% head(10)
top <- summary_df %>% filter(date == max(date)) %>% arrange(desc(total_cases))
top <- top_n(ungroup(top), 5, total_cases)
library(ggplot2)
summary_df %>% filter(`Country.Region` %in% top$`Country.Region`) %>% group_by(date, total_cases) %>%
ggplot(aes(x=as.POSIXct.Date(date), y=total_cases, color=`Country.Region`)) geom_point(size = .5) geom_line(alpha=0.5)
scale_color_viridis_d(option="A") scale_x_datetime() +
labs(title="Total COVID-19 Confirmed Case Count By Date") +
xlab("Date")
ylab("Cases")
summary_df <- coronavirus.cases %>% group_by(Country.Region) %>%
summarise(total_cases = sum(cases)) %>%
arrange(-total_cases)
summary_df %>% head(10) %>% ggplot(aes(x=reorder(`Country.Region`,total_cases), y=total_cases)) +
geom_segment( aes(x=reorder(`Country.Region`,total_cases), xend=reorder(`Country.Region`,total_cases), y=0, yend=total_cases)) +
geom_point(size=5, color="red", fill=alpha("pink", 0.3), alpha=0.7, shape=21, stroke=2) +
coord_flip() +
labs(x="Country", y="Count") +
labs(title="Top 10 Countries Confirmed Case Count")
summary_confirmed <- coronavirus.cases %>% group_by(date) %>%
summarise(total_cases = sum(cases)) %>%
arrange(-total_cases)
summary_deaths <- coronavirus.deaths %>% group_by(date) %>%
summarise(total_cases = sum(cases)) %>%
arrange(-total_cases)
summary_recovered <- coronavirus.recovered %>% group_by(date) %>%
summarise(total_cases = sum(cases)) %>%
arrange(-total_cases)
plot(summary_confirmed$date, summary_confirmed$total_cases, xlab = "Timeline", ylab = "Cumulative count", col = "red")
points(summary_deaths$date, summary_deaths$total_cases, col = "black")
points(summary_recovered$date, summary_recovered$total_cases, col = "green")
title(main = "Timeline of cumulative count of infected (red), dead(black) and recovereds (green)")
# Train test split
set.seed(100)
train_indices = sample(seq_len(nrow(coronavirus.cases)), size = floor(0.8*nrow(coronavirus.cases)))
train_cases = coronavirus.cases[train_indices,]
test_cases = coronavirus.cases[-train_indices,]
## Regression trees
#feature engineering
train_cases$log_ConfirmedCases <- log(train_cases$cases + 1)
train_cases$Day <- as.integer(train_cases$date - min(train_cases$date))
summary(train_cases$Day)
test_cases$Day <- as.integer(test_cases$date - min(test_cases$date))
summary(test_cases$Day)
##
library(caret)
num_folds <- trainControl(method = "cv", number = 5)
parameter_grid <- expand.grid(.cp = seq(0, 0.01, 0.001))
grid_search_1 <- train(
log_ConfirmedCases ~ Country.Region Lat Long Day,
data = train_cases,
method = "rpart", # CART algorithm
trControl = num_folds,
tuneGrid = parameter_grid
)
print(grid_search_1)
library(rpart)
tree_1 <- rpart(
log_ConfirmedCases ~ Country.Region + Lat + Long + Day,
data = train_cases,
cp = 0
)
data.frame(
variable = names(tree_1$variable.importance),
importance = tree_1$variable.importance,
stringsAsFactors = FALSE,
row.names = NULL
) %>%
arrange(desc(importance)) # variable importance
train_cases$Pred_1 <- exp(predict(tree_1, newdata = train_cases)) - 1
summary(train_cases$Pred_1)
RMSLE_1 <- sqrt(mean((log(train_cases$Pred_1 + 1) - log(train_cases$cases + 1))^2))
test_cases$predictedCases <- exp(predict(tree_1, newdata = test_cases)) - 1
plot(train_cases$date, train_cases$cases, xlab="Timeline",ylab="cases", col="green")
points(train_cases$date, train_cases$Pred_1, col="red")
### poisson
glm_1 <- glm(
cases ~ log(Day) + Country.Region + Lat + Long,
data = train_cases,
family = "poisson"
)
train_cases$Pred_2 <- predict(glm_1, newdata = train_cases, type = "response")
RMSLE_2 <- sqrt(mean((log(train_cases$Pred_2 + 1) - log(train_cases$cases + 1))^2))
RMSLE_2
#### Polynomial regression
train <- read.csv("~/Downloads/covid19-global-forecasting-week-4/train.csv")
View(train)
test <- read.csv("~/Downloads/covid19-global-forecasting-week-4/test.csv")
View(test)
df <- train
df$Area <- paste(df$Country_Region, df$Province_State, sep = "-")
colSums(is.na(df))
library(stringr)
#preparation for object
total <- data.frame()
#iterate through month and day
for(month in 1:3) {
for(day in 1:31) {
reqDate <- paste("2020-0", month, "-", str_pad(day, 2, pad = "0"), sep = "")
iter <- as.data.frame(colSums(df[as.character(df$Date) == reqDate,
c("ConfirmedCases", "Fatalities")]))
iter2 <- data.frame(Num = (month - 1) * 31 + day,
Month = month, Day = day, ConfirmedCases = iter[1, ],
Fatalities = iter[2, ])
if(iter[1, ] != 0) total <- rbind(total, iter2)
}
}
#create plot of cummulative confirmed cases
plot(total$Num, total$ConfirmedCases,
type = "l", col = "blue",
xlab = "Day of (since January 22, 2020)",
ylab = "Number of Person",
main = "Cummulative Worldwide Confirmed Cases and Fatalities of Covid-19")
par(new = TRUE)
#create plot of cummulative
plot(total$Num, total$Fatalities,
type = "l", lty = 2, col = "red", xaxt = "n", yaxt = "n", xlab = "", ylab = "")
axis(side = 4)
legend("topleft", inset = .05,
legend = c("Confirmed Cases [Left axis]", "Fatalities [Right axis]"),
col = c("blue", "red"), bg = "gray",
lty = c(1, 2))
df2 <- list()
confirmed.cases.model <- list()
fatalities.model <- list()
#extract Area information
areadata <- as.data.frame(table(df$Area))
areadata$Num <- row(areadata)
for(area in areadata$Var1) {
#get per area data
buffer <- df[df$Area == area, ]
rownames(buffer) <- NULL
buffer$Day <- as.numeric(rownames(buffer))
df2[[area]] <- buffer
#create models
#note: polinomial is choosen by trial basis
confirmed.cases.model[[area]] <- lm(ConfirmedCases ~ Day + I(Day^2) + I(Day^3) + I(Day^5), df2[[area]])
fatalities.model[[area]] <- lm(Fatalities ~ Day + I(Day^2) + I(Day^3) + I(Day^5), df2[[area]])
}
area <- "Italy-"
#retrieve the data
data <- as.data.frame(df2[[area]])
#create plot
plot(data$Day, data$ConfirmedCases,
type = "l", lty = 2,
col = "blue",
ylim = c(0, max(data$ConfirmedCases)),
xlab = "Day of (Since January 22, 2020)", ylab = "Number of People",
main = paste("Covid-19 Confirmed Cases in", area))
par(new = TRUE)
plot(data$Day, fitted(confirmed.cases.model[[area]]),
type = "l", lty = 3,
ylim = c(0, max(data$ConfirmedCases)),
col = "red",
xlab = "", ylab = "")
par(new = TRUE)
plot(data$Day, data$Fatalities,
type = "l", lty = 3,
col = "green",
ylim = c(0, max(data$Fatalities)),
xlab = "", ylab = "", xaxt = "n", yaxt = "n")
par(new = TRUE)
plot(data$Day, fitted(fatalities.model[[area]]),
type = "l", lty = 4,
ylim = c(0, max(data$Fatalities)),
col = "black",
xlab = "", ylab = "", xaxt = "n", yaxt = "n")
par(new = TRUE)
axis(side = 4)
legend("topleft", inset = .05,
legend = c("Confirmed Cases [Left Figure]",
"Estimated Cases (based on Model) [Left Figure]",
"Confirmed Fatality [Right Figure]",
"Estimated Fatality (based on Model) [Right Figure]"),
col = c("blue", "red", "green", "black"), bg = "gray",
lty = c(2, 3, 4, 5),
cex = 0.75)
day <- max(data$Day)
#prepare the object
accuracy <- data.frame()
#iterate the confirmed vs prediction on each of area
for(area in areadata$Var1) {
data <- df2[[area]]
buffer <- data.frame(Area = area,
ConfirmedCases = data$ConfirmedCases[day],
EstimatedCases = round(predict(confirmed.cases.model[[area]],
newdata = data.frame(Day = day))),
ConfirmedFatalities = data$Fatalities[day],
EstimatedFatalities = round(predict(fatalities.model[[area]],
newdata = data.frame(Day = day)))
)
accuracy <- rbind(accuracy, buffer)
}
#calculate accuracy for cases and vatality; confirmed vs estimation
accuracy$AccuracyCases <- 1 - (abs(accuracy$ConfirmedCases - accuracy$EstimatedCases) / accuracy$ConfirmedCases)
accuracy$AccuracyFatalities <- 1 - (abs(accuracy$ConfirmedFatalities - accuracy$EstimatedFatalities) / accuracy$ConfirmedFatalities)
#fix the estimated 0 actual 0 calculation
accuracy[is.nan(accuracy$AccuracyFatalities), "AccuracyFatalities"] <- 1
#print the result
accuracy
print(paste("Worldwide Accuracy of Cases: ", mean(accuracy$AccuracyCases)))
print(paste("Worldwide Accuracy of Fatalities: ", mean(accuracy$AccuracyFatalities)))
test$Day <- as.integer(as.Date(test$Date) - as.Date("2020-01-22"))
test$Area <- paste(test$Country_Region, test$Province_State, sep = "-")
final <- data.frame()
#iterate the prediction
for (id in test$ForecastId){
pred <- test[test$ForecastId == id, ]
pred.cases <- predict(confirmed.cases.model[[pred$Area]],
newdata = data.frame(Day = pred$Day))
pred.fatality <- predict(fatalities.model[[pred$Area]],
newdata = data.frame(Day = pred$Day))
buffer <- data.frame(ForecastId = id,
ConfirmedCases = pred.cases,
Fatalities = pred.fatality)
final <- rbind(final, buffer)
}
final$ConfirmedCases <- ifelse(final$ConfirmedCases < 0, 0, final$ConfirmedCases)
final$Fatalities <- ifelse(final$Fatalities < 0, 0, final$Fatalities)
###
coronavirus <- coronavirus %>% pivot_wider(names_from = type, values_from = cases, values_fill = list(cases = 0))
coronavirus$Day <- as.integer(coronavirus$date - min(coronavirus$date))
coronavirus$Area <- paste(coronavirus$Country.Region, coronavirus$Province.State, sep = "-")
# colSums(is.na(coronavirus))
# coronavirus[is.na(coronavirus)] <- 0
coronavirus$death <- abs(coronavirus$death)
coronavirus$confirmed <- abs(coronavirus$confirmed)
coronavirus$recovered <- abs(coronavirus$recovered)
italy <- coronavirus %>% filter(Country.Region == 'Italy')
italy <- subset(italy, select = -c(Province.State))
data <- italy
data$realR <- data$recovered + data$death
data$realI <- data$confirmed - data$realR
library(DEoptim)
evo <- DEoptim(lifespan, data, lower=lower, upper=upper, control=list(itermax=500, trace=100))
summary(evo)
plot(evo, plot.type="bestvalit", type="l")
best <- evo$optim$bestmem
# solve SIR for this best parameters, for 120 days
m <- solveSIR(best, 120)
# assign a date to t
m$date <- as.Date(m$t, origin=as.Date("2020-01-22"))
plot(m$date, m$X1, type="l", col="blue", main="Fitted SIR Model for Italy", xlab="t", ylab="cases")
lines(m$date, m$X2, type="l", col="red")
lines(m$date, m$X3, type="l", col="green")
lines(m$date, m$X2 + m$X3, type="l", col="black")
points(data$date, data$confirmed, pch=4)
points(data$date, data$realR, pch=1)
legend("left", bty="n",
legend=c("Fitted S", "Fitted I", "Fitted R", "Fitted I cumulative", "Confirmed Cases", "Recovered + Death"),
col=c("blue", "red", "green", "black", "black", "black"),
lty=c(1, 1, 1, 1, 0, 0), pch=c(-1, -1, -1, -1, 4, 1))
### public health interventions
by_date <- coronavirus %>% group_by(date) %>% summarise(total_confirmed=sum(confirmed),total_death=sum(death),total_recovered=sum(recovered))
confirmed_plot<- ggplot(by_date, aes(x=date)) +
geom_line(aes(y = total_confirmed),color = 'red')
combined_plot<- ggplot(by_date, aes(x=date)) +
geom_line(aes(y = total_confirmed), color = "darkred") +
geom_line(aes(y = total_death), color="steelblue", linetype="twodash")
death_plot <- ggplot(by_date, aes(x=date))+
geom_line(aes(y = total_death), color="steelblue", linetype="twodash")
install.packages("grid")
install.packages("gridExtra")
library(grid)
library(gridExtra)
grid.arrange(combined_plot, death_plot, nrow = 1)
who_events <- tribble(
~ date, ~ event,
"2020-01-30", "Global health\nemergency declared",
"2020-03-11", "Pandemic\ndeclared",
"2020-02-13", "China reporting\nchange"
) %>%
mutate(date = as.Date(date))
confirmed_plot +
geom_vline(data = who_events, aes( xintercept = date),linetype = "dashed") +
geom_text(data = who_events,y = 1e5, aes(x = date, label = event))+theme_minimal()
library(earlyR)
library(incidence)
library(EpiModel)
# Total pop of 1k
control <- control.icm(type = "SIR", nsteps = 100, nsims = 10)
init <- init.icm(s.num = 997, i.num = 3, r.num = 0)
# exposure-to-infection rate of 10 times per day
# 0.05% chance of infection in exposure to infection.That is probably realistic,
# given that many exposures are likely to be minimal,
# such as touching surfaces contaminated with virus,
# and only a few will be high-risk, such as being directly coughed or sneezed on by an infected individual
# recovery rate of 0.05 - this will have a mean of about 20 days but median of 14
# recovery for each individual is determined by draws from a binomial distribution
# with mean set to the recovery rate
# a global crude death rate is 7/1000 per annum.Daily death rate then is (7/365)/1000
# Set the arrival rate at 50% higher than the death rate to account for births and immigration
param <- param.icm(inf.prob = 0.05, act.rate = 10, rec.rate = 1/20,
a.rate = (10.5/365)/1000, ds.rate = (7/365)/1000, di.rate = (14/365)/1000,
dr.rate = (7/365)/1000)
# Run the simulation
sim <- icm(param, init, control)
sim
plot(sim)
# Plot the incidence - total new cases per day
plot(sim, y = "si.flow", mean.col = "red", qnts.col = "red", main = "New cases per day")
# This peak at two weeks and its corresponding R0 value of 2.2 is consistent with
# current WHO estimates for COVID-19
# Despite the model being overly simplistic
# lets explore the effects of various public health interventions
# First - social distancing, to account for this in our model, we can reduce the average number
# of exposures per day(acts). We will explore the value sof 10,5 and 2 mean exposures per day.
# Second - hygiene measures (masks, hand washing, no face touching)
# we will model these interventions by adjusting the probability of infection (at each occasion of exposure).
# values - 0.05, 0.025, 0.01
run_sir_sim <- function(inf_prob, act_rate, pop_size = 1000,
i_num = 3, n_steps = 365, n_sims = 10, si_mean = 7.5, si_sd = 3.4) {
# set up simulation parameters
param <- param.icm(inf.prob = inf_prob, act.rate = act_rate,
rec.rate = 1/20, a.rate = (10.5/365)/1000, ds.rate = (7/365)/1000,
di.rate = (14/365)/1000, dr.rate = (7/365)/1000)
init <- init.icm(s.num = pop_size - i_num, i.num = i_num,
r.num = 0)
control <- control.icm(type = "SIR", nsteps = n_steps, nsims = n_sims)
# run the simulation
sim <- icm(param, init, control)
# collect the relevant results in a data frame
incidence_rates <- as.data.frame(sim, out = "mean") %>% select(time,
si.flow, i.num) %>% mutate(act_rate = act_rate, inf_prob = inf_prob,
total_cases = sum(si.flow), max_prev = max(i.num, na.rm = TRUE))
# use the data frame of results to create an incidence()
# object
local_case_dates <- incidence_rates %>% filter(time <= 300,
act.rate == act_rate, inf.prob == inf_prob) %>% select(time,
si.flow) %>% uncount(si.flow) %>% pull(time)
if (length(local_case_dates) > 0) {
local_cases <- local_case_dates %>% incidence(.)
# find the incidence peak from the incidence object
peaky_blinder <- find_peak(local_cases)
# recreate the incidence object using data only up to the
# peak
local_growth_phase_case_dates <- incidence_rates %>%
filter(time <= peaky_blinder) %>% select(time, si.flow) %>%
uncount(si.flow) %>% pull(time)
local_growth_phase_cases <- local_growth_phase_case_dates %>%
incidence(., last_date = peaky_blinder)
# get a MLE estimate of the basic reproduction number, R0
res <- get_R(local_growth_phase_cases, si_mean = si_mean,
si_sd = si_sd)
# add that as a column to the data frame of results
incidence_rates <- incidence_rates %>% mutate(mle_R0 = res$R_ml)
} else {
# can't calculate R0 - set to NA
incidence_rates <- incidence_rates %>% mutate(mle_R0 = NA)
}
# return the data frame
return(incidence_rates)
} # end function definition
# set up an empty data frame to which to append results from
# each simulation
sims_incidence_rates <- tibble(time = integer(0), si.flow = numeric(0),
i.num = numeric(0), act_rate = numeric(0), inf_prob = numeric(0),
total_cases = numeric(0), max_prev = numeric(0), mle_R0 = numeric(0))
# the parameters to step through
act.rates <- c(10, 5, 2)
inf.probs <- c(0.05, 0.025, 0.01)
# loop through the parameter space
for (act.rate in act.rates) {
for (inf.prob in inf.probs) {
sims_incidence_rates <- sims_incidence_rates %>% bind_rows(run_sir_sim(inf.prob,
act.rate))
}
}
## Create plots ##
# create facet columns as descending ordered factors
sims_incidence_rates <- sims_incidence_rates %>% mutate(act_rate_facet_label = paste(act_rate,
"exposures per day"), inf_prob_facet_label = paste("Probability of infection\nat each exposure:",
inf_prob)) %>% arrange(desc(act_rate)) %>% mutate_at(vars(act_rate_facet_label),
funs(factor(., levels = unique(.)))) %>% arrange(desc(inf_prob)) %>%
mutate_at(vars(inf_prob_facet_label), funs(factor(., levels = unique(.)))) %>%
arrange(desc(act_rate), desc(inf_prob), time)
# add annotation text for each facet
sims_incidence_rates_facet_annotations <- sims_incidence_rates %>%
mutate(label = paste("R0 =", format(mle_R0, digits = 3),
"\n", round(100 * total_cases/1000, digits = 0), "% of population infected")) %>%
select(inf_prob_facet_label, act_rate_facet_label, label) %>%
distinct()
sims_incidence_rates %>% filter(time <= 365) %>% ggplot(aes(x = time,
y = si.flow)) + geom_line(colour = "blue", size = 1.5) + theme_linedraw()+
facet_grid(inf_prob_facet_label ~ act_rate_facet_label) +
geom_text(data = sims_incidence_rates_facet_annotations,
mapping = aes(x = 50, y = 0.8 * max(sims_incidence_rates$si.flow,
na.rm = TRUE), label = label), parse = FALSE, hjust = 0,
vjust = 0, size = 3) + labs(x = "Days since start of epidemic",
y = "New cases per day", title = "Modelling of new cases of COVID-19 per day: incidence rate",
subtitle = paste("with varying levels of social mixing (exposures per day)",
"and probabilities of infection at each exposure")) +
theme(legend.position = "top", strip.text = element_text(size = 14))
## Build on top of the existing model architecture ##
# function to set-up and run the baseline simulations
simulate <- function(# control.icm params
type = "SEIQHRF",
nsteps = 366,
nsims = 8,
ncores = 4,
prog.rand = FALSE,
rec.rand = FALSE,
fat.rand = TRUE,
quar.rand = FALSE,
hosp.rand = FALSE,
disch.rand = TRUE,
infection.FUN = infection.seiqhrf.icm,
recovery.FUN = progress.seiqhrf.icm,
departures.FUN = departures.seiqhrf.icm,
arrivals.FUN = arrivals.icm,
get_prev.FUN = get_prev.seiqhrf.icm,
# init.icm params
s.num = 9997,
e.num=0,
i.num = 3,
q.num=0,
h.num=0,
r.num = 0,
f.num = 0,
# param.icm params
inf.prob.e = 0.02,
act.rate.e = 10,
inf.prob.i = 0.05,
act.rate.i = 10,
inf.prob.q = 0.02,
act.rate.q = 2.5,
quar.rate = 1/30,
hosp.rate = 1/100,
disch.rate = 1/15,
prog.rate = 1/10,
prog.dist.scale = 5,
prog.dist.shape = 1.5,
rec.rate = 1/20,
rec.dist.scale = 35,
rec.dist.shape = 1.5,
fat.rate.base = 1/50,
hosp.cap = 40,
fat.rate.overcap = 1/25,
fat.tcoeff = 0.5,
vital = TRUE,
a.rate = (10.5/365)/1000,
a.prop.e = 0.01,
a.prop.i = 0.001,
a.prop.q = 0.01,
ds.rate = (7/365)/1000,
de.rate = (7/365)/1000,
di.rate = (7/365)/1000,
dq.rate = (7/365)/1000,
dh.rate = (20/365)/1000,
dr.rate = (7/365)/1000,
out="mean"
) {
control <- control.icm(type = type,
nsteps = nsteps,
nsims = nsims,
ncores = ncores,
prog.rand = prog.rand,
rec.rand = rec.rand,
infection.FUN = infection.FUN,
recovery.FUN = recovery.FUN,
arrivals.FUN = arrivals.FUN,
departures.FUN = departures.FUN,
get_prev.FUN = get_prev.FUN)
init <- init.icm(s.num = s.num,
e.num = e.num,
i.num = i.num,
q.num = q.num,
h.num = h.num,
r.num = r.num,
f.num = f.num)
param <- param.icm(inf.prob.e = inf.prob.e,
act.rate.e = act.rate.e,
inf.prob.i = inf.prob.i,
act.rate.i = act.rate.i,
inf.prob.q = inf.prob.q,
act.rate.q = act.rate.q,
quar.rate = quar.rate,
hosp.rate = hosp.rate,
disch.rate = disch.rate,
prog.rate = prog.rate,
prog.dist.scale = prog.dist.scale,
prog.dist.shape = prog.dist.shape,
rec.rate = rec.rate,
rec.dist.scale = rec.dist.scale,
rec.dist.shape = rec.dist.shape,
fat.rate.base = fat.rate.base,
hosp.cap = hosp.cap,
fat.rate.overcap = fat.rate.overcap,
fat.tcoeff = fat.tcoeff,
vital = vital,
a.rate = a.rate,
a.prop.e = a.prop.e,
a.prop.i = a.prop.i,
a.prop.q = a.prop.q,
ds.rate = ds.rate,
de.rate = de.rate,
di.rate = di.rate,
dq.rate = dq.rate,
dh.rate = dh.rate,
dr.rate = dr.rate)
sim <- icm.seiqhrf(param, init, control)
sim_df <- as.data.frame(sim, out=out)
return(list(sim=sim, df=sim_df))
}
install.packages("ggridges")
library(ggridges)
# create one column for both intervention parameters
sims_incidence_rates <- sims_incidence_rates %>% mutate(intervention_level_label = paste(act_rate,
"exp/day,", inf_prob * 100, "% inf risk/exp")) %>% arrange(max_prev,
time) %>% mutate_at(vars(intervention_level_label), funs(factor(.,
levels = unique(.), ordered = TRUE)))
sims_incidence_rates %>% filter(time <= 365) %>% ggplot(aes(x = time,
y = intervention_level_label, height = i.num, fill = intervention_level_label)) +
geom_density_ridges(stat = "identity", show.legend = FALSE) +
labs(x = "Days since start of epidemic", y = "Prevalent (current number of active) cases",
title = "Modelling of COVID-19 transmission in 1,000 simulated people",
subtitle = paste("with varying levels of social mixing (exposures per day)",
"and risk of infection at each exposure,\n", "ordered by descending maximum number of prevalent cases per day")) +
theme_minimal() + theme(legend.position = "top", strip.text = element_text(size = 12)) +
scale_fill_brewer(type = "seq", palette = "Blues")
|
2d88320fe8e2ed57228a856af33f707e1089d7db
|
f2bb233babbf25ab5373f543e3416dc3d5f11b51
|
/examples/simulation_study/Simulate_Growth.R
|
67b7d91663c5146b76380624254b35b7215d41da
|
[
"MIT"
] |
permissive
|
quantifish/TagGrowth
|
0170febd1697cda65887b2921c119f5e4cc9dc42
|
cd57fd83902b976348f513262e533bf63603db1b
|
refs/heads/master
| 2021-01-17T14:47:25.920221
| 2018-06-30T20:51:38
| 2018-06-30T20:51:38
| 23,371,721
| 0
| 0
| null | 2015-05-12T23:22:34
| 2014-08-27T00:42:42
|
R
|
UTF-8
|
R
| false
| false
| 4,459
|
r
|
Simulate_Growth.R
|
#=================================================================================
# SIMULATION DESIGN
#=================================================================================
# Make sure R is clean
rm(list=ls())
#=================================================================================
# USER SPECIFICATIONS
# =================================================================================
# Directory to save simulations to (save as .RData files), these are also the
# scenarios
#scenarios <- c("v0/","v1/","v2/","v3/")
#xx <- expand.grid(simulator = c("none","k","z","kz"), estimator = c("none","k","z","kz"))
#scenarios <- paste0(xx[,1], "_", xx[,2], "/")
scenarios <- c("sim_none/","sim_k/","sim_z/","sim_kz/")
Ndesign <- 200 # The number of simulations we will do
set.seed(15) # A random number seed
power <- c(50, 100, 250, 500) # Power analysis
Nareas <- 1 # The number of areas in our simulation
#=================================================================================
# SET UP SIMULATION
#=================================================================================
# Import library and load data (for resampling)
require(TagGrowth)
source("Growth_Model.R")
data(toothfish)
# Annual
t0 = 0.021
k = 0.090
Linf = 180.20
age = 1:20
#plot(age, Linf*(1 - exp(-k*(age - t0))))
# Weekly
t0 = -0.256*52
k = 0.093/52
Linf = 169.07
age = 1:20*52
#plot(age, Linf*(1 - exp(-k*(age - t0))))
#=================================================================================
# SPECIFY PARAMETERS FOR SIMULATION
#=================================================================================
# These are the par values from the Dunn et al. 2008 paper
t0 <- c(0.021, -0.256)
k <- c(0.090, 0.093)
Linf <- c(180.20, 169.07)
cv <- 0.102
psi <- 0
L0 <- Linf * (1 - exp(k/52 * t0*52))
b <- k/52
gamma <- (b * Linf) / (b^psi)
# sd_obs is actually a cv
# females, males
L0 <- c(L0[1], L0[2]) # L0 = Linf*(1 - exp(k*t0))
bmean <- c(b[1], b[2])
gamma <- c(gamma[1], gamma[2]) # gamma = (b * Linf) / (k^psi)
psi <- c(0.0, 0.0)
sd_y <- c(0.0, 0.0)
#=================================================================================
# RUN THE SIMULATION MODEL
#=================================================================================
for (Iscenario in scenarios)
{
# Identify which simulator/estimator we are using
#xx <- unlist(strsplit(Iscenario, split = "_"))
#csim <- xx[1]
#cest <- gsub("/", "", xx[2])
# Parameters specific to each scenario
if (Iscenario == "sim_none/")
{
sd_b <- c(0.0, 0.0)
sd_z <- c(0.0, 0.0)
sd_obs <- c(0.102, 0.102)
}
if (Iscenario == "sim_k/")
{
sd_b <- c(0.1, 0.2)
sd_z <- c(0.0, 0.0)
sd_obs <- c(0.05, 0.05)
}
if (Iscenario == "sim_z/")
{
sd_b <- c(0.0, 0.0)
sd_z <- c(0.3, 0.3)
sd_obs <- c(0.05, 0.05)
}
if (Iscenario == "sim_kz/")
{
sd_b <- c(0.1, 0.2)
sd_z <- c(0.3, 0.3)
sd_obs <- c(0.05, 0.05)
}
# Folder structure
directory <- gsub("/", "", Iscenario)
dir.create(directory)
# Collect up the parameters
Pars <- rbind(L0, bmean, sd_b, gamma, psi, sd_obs, sd_z, sd_y)
colnames(Pars) <- c("female", "male")
# Do the simulation
Nindiv <- 315
for (Isim in 1:Ndesign)
{
ATR_sim <- GrowthModel(obs_err = TRUE, tvi_err = FALSE, Pars = Pars, Nindiv = Nindiv, data = toothfish)
sim <- list(Sim = ATR_sim, Parameters = Pars)
save(sim, file = paste0(Iscenario, "sim", Isim, ".RData"))
}
}
Iscenario <- "sim_kz/"
#lapply(file.path(Iscenario, power), dir.create)
for (Ipow in power)
{
if (Iscenario == "sim_kz/")
{
sd_b <- c(0.1, 0.2)
sd_z <- c(0.3, 0.3)
sd_obs <- c(0.05, 0.05)
}
# Collect up the parameters
Pars <- rbind(L0, bmean, sd_b, gamma, psi, sd_obs, sd_z, sd_y)
colnames(Pars) <- c("female", "male")
Nindiv <- Ipow
for (Isim in 1:Ndesign)
{
ATR_sim <- GrowthModel(obs_err = TRUE, tvi_err = FALSE, Pars = Pars, Nindiv = Nindiv, ATR_mod = ATR_mod)
sim <- list(Sim = ATR_sim, Parameters = Pars)
save(sim, file = paste0(Iscenario, Ipow, "/sim", Isim, ".RData"))
#plot_growth(ATR_sim, Isim)
}
}
# END
|
b195b398b9e06a453c04b23d12e35582c3ed0139
|
8eafdded307265eecb95adcbea7711d42113a9bb
|
/src/06_score_data.R
|
ecb987989b5340f6b2b907f4b3bba7a914e01b15
|
[] |
no_license
|
jvenzor23/ReboundNet
|
5144c819027cc2c1d69399468648fbeac5e82efa
|
3d7dfa9debebd740bf9297ff1a6ad6d99718db69
|
refs/heads/master
| 2022-12-07T12:49:12.283960
| 2020-09-02T21:49:39
| 2020-09-02T21:49:39
| 286,569,078
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,522
|
r
|
06_score_data.R
|
# This code examines the basketball pbp data
# Clean workspace
rm(list=ls())
# Setting Working Directory
setwd("~/Desktop/Deep_Learning/nba-movement-data/rebounding_data/output/")
# Calling Necessary Libraries
library(tidyverse)
library(dplyr)
library(ggplot2)
library(lubridate)
library(reticulate)
library(ggimage)
library(magick)
# Reading in the Data -----------------------------------------------------
setwd("~/Desktop/Deep_Learning/nba-movement-data/rebounding_data/landing_spots_full_output2/")
files = dir()
files = files[grep("_rebounding_data", files)]
length(unique(files))
data_tot = data.frame()
for(file in files){
data_tot = rbind(data_tot,
read.csv(file) %>%
select(-X))
}
# players
scripts = dir()
scripts = scripts[grep("_player", scripts)]
players_info = data.frame()
for(script in scripts){
players_info = rbind(players_info,
read.csv(script)) %>%
distinct()
}
players_info = players_info %>%
select(-gameId, -X, -jersey) %>%
distinct() %>%
group_by(firstname, lastname, playerid) %>%
summarize(position = position[1])
# results
setwd("~/Desktop/Deep_Learning/nba-movement-data/rebounding_data/data_sets/")
preds = read.csv("landing_isotropic_preds3.csv")
preds_long = preds %>%
pivot_longer(cols = starts_with("p"),
names_to = "player",
values_to = "rebound_prob") %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
mutate(player_num = row_number())
# Reading in Landing Data
setwd("~/Desktop/Deep_Learning/nba-movement-data/rebounding_data/landing_spots_full_output2/")
files = dir()
files = files[grep("_landing_data", files)]
length(unique(files))
landing_data_tot = data.frame()
for(file in files){
landing_data_tot = rbind(landing_data_tot,
read.csv(file) %>%
select(-X))
}
# Joining in Rebound Probabilites -----------------------------------------
shot_full_data_w_probs = data_tot %>%
arrange(GAME_ID, GAME_EVENT_ID, IsOnOffense, IsShooter, player_id) %>%
filter(!IsBall) %>%
group_by(GAME_ID, GAME_EVENT_ID, IsOnOffense, IsShooter) %>%
mutate(player_num = dense_rank(player_id)) %>%
mutate(player_num = case_when(IsShooter ~ as.integer(10),
IsOnOffense ~ player_num + as.integer(5),
TRUE ~ as.integer(player_num))) %>%
inner_join(preds_long) %>%
select(GAME_ID, GAME_EVENT_ID, rebound_prob, player_id)
data_tot2 = data_tot %>%
inner_join(shot_full_data_w_probs)
defRebProb = data_tot2 %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
filter(!IsBall) %>%
summarize(def_players = sum(rebound_prob*as.numeric(!IsOnOffense))) %>%
mutate(defRebProbability = def_players) %>%
select(GAME_ID, GAME_EVENT_ID, defRebProbability)
data_tot3 = data_tot2 %>%
inner_join(defRebProb)
no_rebound_plays = data_tot3 %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
summarize(value = sum(IsRebounder)) %>%
filter(value == 0) %>%
select(GAME_ID, GAME_EVENT_ID)
data_tot3 = data_tot3 %>%
anti_join(no_rebound_plays)
aggressiveness_check = data_tot3 %>%
group_by(GAME_ID,GAME_EVENT_ID) %>%
summarize(max_prob = max(rebound_prob)) %>%
ungroup() %>%
summarize(avg_max_prob = mean(max_prob),
perc_over_95 = mean(max_prob >= .95),
perc_over_90 = mean(max_prob >= .90),
perc_over_50 = mean(max_prob >= .50),
perc_under_30 = mean(max_prob <= .30))
# Aggregate Totals --------------------------------------------------------
summary_stats = data_tot3 %>%
mutate(era = as.numeric(IsRebounder) - rebound_prob) %>%
group_by(player_id) %>%
summarize(n_games = length(unique(GAME_ID)),
Plays = n(),
REB = sum(IsRebounder, na.rm = TRUE),
REB_perc = REB/Plays,
eREB = sum(rebound_prob, na.rm = TRUE),
netReb = sum(era, na.rm = TRUE),
REB_added_per_play = mean(era, na.rm = TRUE),
eREBperPlay = sum(rebound_prob, na.rm = TRUE)/Plays) %>%
filter(Plays > 200) %>%
arrange(desc(REB_added_per_play)) %>%
inner_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything()) %>%
filter(grepl('F', position)|grepl('C', position))
summary_stats %>%
filter(grepl('F', position)|grepl('C', position)) %>%
ggplot() +
geom_point(aes(x = eREBperPlay, y = REB_added_per_play))
summary_stats_off = data_tot3 %>%
filter(IsOnOffense) %>%
mutate(era = as.numeric(IsRebounder) - rebound_prob) %>%
group_by(player_id) %>%
summarize(OFF_Plays = n(),
n_games = length(unique(GAME_ID)),
OFF_REB = sum(IsRebounder, na.rm = TRUE),
OFF_REB_perc = OFF_REB/OFF_Plays,
OFF_eREB = sum(rebound_prob, na.rm = TRUE),
OFF_netReb = sum(era, na.rm = TRUE),
OFF_REB_added_per_play = mean(era, na.rm = TRUE),
OFF_eREBperPlay = sum(rebound_prob, na.rm = TRUE)/n()) %>%
filter(OFF_Plays > 200) %>%
arrange(desc(OFF_eREBperPlay)) %>%
inner_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything())
summary_stats_def = data_tot3 %>%
filter(!IsOnOffense) %>%
mutate(era = as.numeric(IsRebounder) - rebound_prob) %>%
group_by(player_id) %>%
summarize(DEF_Plays = n(),
DEF_REB = sum(IsRebounder, na.rm = TRUE),
DEF_REB_perc = DEF_REB/DEF_Plays,
DEF_eREB = sum(rebound_prob, na.rm = TRUE),
DEF_netReb = sum(era, na.rm = TRUE),
DEF_REB_added_per_play = mean(era, na.rm = TRUE),
DEF_eREBperPlay = sum(rebound_prob, na.rm = TRUE)/n()) %>%
filter(DEF_Plays > 200) %>%
arrange(desc(DEF_eREBperPlay)) %>%
inner_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything())
summary_stats_both = summary_stats_off %>%
inner_join(summary_stats_def)
summary_stats_both %>%
filter(OFF_Plays > 500, DEF_Plays > 500) %>%
filter(grepl('F', position)|grepl('C', position)) %>%
ggplot() +
geom_point(aes(x = OFF_REB_added_per_play, y = DEF_REB_added_per_play))
summary_stats_both %>%
filter(OFF_Plays > 500, DEF_Plays > 500) %>%
filter(grepl('F', position)|grepl('C', position)) %>%
ggplot() +
geom_point(aes(x = OFF_eREBperPlay, y = DEF_eREBperPlay))
# Russel Westbrook Effect -------------------------------------------------
summary_stats_westbrook = data_tot3 %>%
filter(!IsOnOffense) %>%
inner_join(defRebProb) %>%
mutate(era = as.numeric(IsRebounder) - rebound_prob) %>%
mutate(def_reb_team_prob = floor(defRebProbability*10)/10) %>%
group_by(player_id, def_reb_team_prob) %>%
summarize(Plays = n(),
REB = sum(IsRebounder, na.rm = TRUE),
REB_perc = REB/Plays,
eREB = sum(rebound_prob),
netReb = sum(era),
REB_added_per_play = mean(era),
eREBperPlay = sum(rebound_prob)/Plays) %>%
inner_join(players_info, by = c("player_id" = "playerid")) %>%
arrange(desc(def_reb_team_prob)) %>%
select(position, firstname, lastname, player_id, everything()) %>%
filter(firstname == "Russell")
# Rebound % Stats ---------------------------------------------------------
summary_stats2 = data_tot3 %>%
filter(player_id != -1) %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
mutate(rebound_prob_rank = min_rank(desc(rebound_prob))) %>%
ungroup() %>%
group_by(player_id) %>%
summarize(n_games = length(unique(GAME_ID)),
Plays = n(),
TopReboundPlays = sum(rebound_prob_rank == 1, na.rm = TRUE),
TopReboundPlaysPerc = TopReboundPlays/Plays,
TopReboundPerc = sum(as.numeric(IsRebounder)*as.numeric(rebound_prob_rank == 1), na.rm = TRUE)/sum(rebound_prob_rank == 1, na.rm = TRUE),
SecondReboundPlays = sum(rebound_prob_rank == 2, na.rm = TRUE),
SecondReboundPlaysPerc = SecondReboundPlays/Plays,
SecondReboundPerc = sum(as.numeric(IsRebounder)*as.numeric(rebound_prob_rank ==2), na.rm = TRUE)/sum(rebound_prob_rank == 2, na.rm = TRUE),
ThirdReboundPlays = sum(rebound_prob_rank == 3, na.rm = TRUE),
ThirdReboundPlaysPerc = ThirdReboundPlays/Plays,
ThirdReboundPerc = sum(as.numeric(IsRebounder)*as.numeric(rebound_prob_rank ==3), na.rm = TRUE)/sum(rebound_prob_rank == 3, na.rm = TRUE)) %>%
filter(Plays > 500) %>%
arrange(desc(TopReboundPerc)) %>%
inner_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything()) %>%
filter(grepl('F', position)|grepl('C', position))
summary_stats2_off = data_tot3 %>%
filter(player_id != -1) %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
mutate(rebound_prob_rank = min_rank(desc(rebound_prob))) %>%
ungroup() %>%
filter(IsOnOffense) %>%
group_by(player_id) %>%
summarize(n_games = length(unique(GAME_ID)),
Plays = n(),
TopReboundPlays = sum(rebound_prob_rank == 1, na.rm = TRUE),
TopReboundPlaysPerc = TopReboundPlays/Plays,
TopReboundPerc = sum(as.numeric(IsRebounder)*as.numeric(rebound_prob_rank == 1), na.rm = TRUE)/sum(rebound_prob_rank == 1, na.rm = TRUE),
SecondReboundPlays = sum(rebound_prob_rank == 2, na.rm = TRUE),
SecondReboundPlaysPerc = SecondReboundPlays/Plays,
SecondReboundPerc = sum(as.numeric(IsRebounder)*as.numeric(rebound_prob_rank ==2), na.rm = TRUE)/sum(rebound_prob_rank == 2, na.rm = TRUE),
ThirdReboundPlays = sum(rebound_prob_rank == 3, na.rm = TRUE),
ThirdReboundPlaysPerc = ThirdReboundPlays/Plays,
ThirdReboundPerc = sum(as.numeric(IsRebounder)*as.numeric(rebound_prob_rank ==3), na.rm = TRUE)/sum(rebound_prob_rank == 3, na.rm = TRUE)) %>%
filter(TopReboundPlays > 30) %>%
arrange(desc(TopReboundPerc)) %>%
inner_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything()) %>%
filter(grepl('F', position)|grepl('C', position))
summary_stats2_def = data_tot3 %>%
filter(player_id != -1) %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
mutate(rebound_prob_rank = min_rank(desc(rebound_prob))) %>%
ungroup() %>%
filter(!IsOnOffense) %>%
group_by(player_id) %>%
summarize(n_games = length(unique(GAME_ID)),
Plays = n(),
TopReboundPlays = sum(rebound_prob_rank == 1, na.rm = TRUE),
TopReboundPlaysPerc = TopReboundPlays/Plays,
TopReboundPerc = sum(as.numeric(IsRebounder)*as.numeric(rebound_prob_rank == 1), na.rm = TRUE)/sum(rebound_prob_rank == 1, na.rm = TRUE),
SecondReboundPlays = sum(rebound_prob_rank == 2, na.rm = TRUE),
SecondReboundPlaysPerc = SecondReboundPlays/Plays,
SecondReboundPerc = sum(as.numeric(IsRebounder)*as.numeric(rebound_prob_rank ==2), na.rm = TRUE)/sum(rebound_prob_rank == 2, na.rm = TRUE),
ThirdReboundPlays = sum(rebound_prob_rank == 3, na.rm = TRUE),
ThirdReboundPlaysPerc = ThirdReboundPlays/Plays,
ThirdReboundPerc = sum(as.numeric(IsRebounder)*as.numeric(rebound_prob_rank ==3), na.rm = TRUE)/sum(rebound_prob_rank == 3, na.rm = TRUE)) %>%
filter(Plays > 200) %>%
arrange(desc(TopReboundPerc)) %>%
inner_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything()) %>%
filter(grepl('F', position)|grepl('C', position))
# Summary Stats BEST ------------------------------------------------------
# REBOUNDER: Credited with 1 - (Team Rebound Prob)
# Non-Rebounders on Rebounder's Team: No Change
# Non-Rebounders on Rebounder's Opponent: Debited with Team Rebound Prob * Team Rebound % Share
# Zero Sum Crediting and Debting, leading to a TeamRebound_added_per_play metric
summary_stats3 = data_tot3 %>%
filter(player_id != -1) %>%
mutate(teamRebProb = if_else(IsOnOffense, 1 - defRebProbability,
defRebProbability),
teamRebShare = rebound_prob/teamRebProb,
teamReboundFlag = if_else(IsOnOffense, !IsDefensiveRebound,
IsDefensiveRebound)) %>%
filter(!is.na(teamRebProb)) %>%
group_by(GAME_ID, GAME_EVENT_ID, IsOnOffense) %>%
mutate(playerTeamReboundProb = sum(rebound_prob)) %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
mutate(NoPlayerRebound = sum(IsRebounder) == 0) %>%
ungroup() %>%
mutate(etra = case_when(NoPlayerRebound & teamReboundFlag ~ (rebound_prob/playerTeamReboundProb)*(1-teamRebProb),
NoPlayerRebound & !teamReboundFlag ~ -(rebound_prob/playerTeamReboundProb)*teamRebProb,
IsRebounder ~ 1 - teamRebProb,
teamReboundFlag ~ 0,
TRUE ~ -teamRebProb*(rebound_prob/playerTeamReboundProb))) %>%
mutate(era = as.numeric(IsRebounder) - rebound_prob) %>%
group_by(player_id) %>%
summarize(n_games = length(unique(GAME_ID)),
Plays = n(),
REB = sum(IsRebounder, na.rm = TRUE),
REB_perc = REB/Plays,
RebPer100 = REB_perc*100,
eREB = sum(rebound_prob, na.rm = TRUE),
netReb = sum(era, na.rm = TRUE),
REB_added_per_play = mean(era, na.rm = TRUE),
netPlayerRebPer100 = 100*REB_added_per_play,
eREBperPlay = sum(rebound_prob, na.rm = TRUE)/Plays,
netTeamReb = sum(etra, na.rm = TRUE),
TEAM_REB_added_per_play = mean(etra, na.rm = TRUE),
netTeamRebPer100 = TEAM_REB_added_per_play*100,
TeamRebAddedPerReb = sum(if_else(etra > 0, etra, 0))/
sum(if_else(etra > 0, 1, 0))) %>%
arrange(desc(netTeamReb)) %>%
left_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything())
# Example Play to Explain Metric
summary_stats3ex = data_tot3 %>%
filter(player_id != -1) %>%
mutate(teamRebProb = if_else(IsOnOffense, 1 - defRebProbability,
defRebProbability),
teamRebShare = rebound_prob/teamRebProb,
teamReboundFlag = if_else(IsOnOffense, !IsDefensiveRebound,
IsDefensiveRebound)) %>%
group_by(GAME_ID, GAME_EVENT_ID, IsOnOffense) %>%
mutate(playerTeamReboundProb = sum(rebound_prob)) %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
mutate(NoPlayerRebound = sum(IsRebounder) == 0) %>%
ungroup() %>%
mutate(etra = case_when(NoPlayerRebound & teamReboundFlag ~ (rebound_prob/playerTeamReboundProb)*(1-teamRebProb),
NoPlayerRebound & !teamReboundFlag ~ -(rebound_prob/playerTeamReboundProb)*teamRebProb,
IsRebounder ~ 1 - teamRebProb,
teamReboundFlag ~ 0,
TRUE ~ -teamRebProb*(rebound_prob/playerTeamReboundProb))) %>%
mutate(era = as.numeric(IsRebounder) - rebound_prob) %>%
left_join(players_info, by = c("player_id" = "playerid")) %>%
ungroup() %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
mutate(etra_rank = row_number(etra)) %>%
mutate(third_worst_etra = rep(etra[etra_rank == 3], 10),
shot_distance = rep(sqrt((x_loc[IsShooter] - 5.25)^2 + (y_loc[IsShooter] - 25)^2), 10)) %>%
filter(third_worst_etra < -.1) %>%
filter(firstname == "Kevin", lastname == "Love") %>%
arrange(desc(etra))
# GAME_ID == 21500424
# GAME_EVENT_ID == 11
setwd("~/Desktop/Deep_Learning/nba-movement-data/rebounding_data/images/")
playersimages = read.csv("playerspictures.csv") %>%
mutate(name = as.character(name))
test = image_read("https://web.archive.org/web/20161208080529im_/http://a.espncdn.com/combiner/i?img=/i/headshots/nba/players/full/2774.png")
print(test)
image_crop(test, "600x350+60")
summary_stats3_qualifying = summary_stats3 %>%
filter(grepl('F', position)|grepl('C', position)) %>%
filter(Plays > 800) %>%
mutate(fullname = paste(firstname, lastname)) %>%
inner_join(playersimages, by = c("fullname" = "name")) %>%
mutate(url_current = paste0("https://a.espncdn.com/combiner/i?img=/i/headshots/nba/players/full/",
as.character(espn_player_id),
".png")) %>%
mutate(url_old = paste0("https://web.archive.org/web/20161208080529im_/http://a.espncdn.com/combiner/i?img=/i/headshots/nba/players/full/",
as.character(espn_player_id),
".png")) %>%
filter(espn_player_id != 6485) %>%
arrange(netTeamRebPer100)
stats_plt = summary_stats3_qualifying %>%
mutate(url_old = as.factor(url_old)) %>%
mutate(url_old = fct_reorder(url_old, desc(netTeamRebPer100))) %>%
mutate(eREBper100Plays = eREBperPlay*100)
ggplot() +
geom_image(data = stats_plt %>%
filter(!(fullname %in% c('Doug McDermott',
'Harrison Barnes',
'Jae Crowder',
'Marvin Williams',
'Jeff Green',
'LeBron James',
'Kawhi Leonard'))),
aes(x = eREBper100Plays, y = netTeamRebPer100,
image=url_old), size=.1, alpha = 0.1) +
geom_polygon(data = data.frame(x = c(5, 5, 22, 22, 5),
y = c(-2, 2.5, 2.5, -2, -2)),
aes(x = x, y = y), fill = "white", alpha = 0.7) +
geom_hline(aes(yintercept = 0), color = "black") +
geom_vline(aes(xintercept = 12.5), color = "black") +
geom_image(data = stats_plt %>%
filter(fullname %in% c('Doug McDermott',
'Harrison Barnes',
'Jae Crowder',
'Marvin Williams',
'Jeff Green',
'LeBron James',
'Kawhi Leonard')),
aes(x = eREBper100Plays, y = netTeamRebPer100,
image=url_old), size=.1) +
theme(panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.major = element_line(colour = 'grey', linetype = "dashed")) +
labs(x = "Expected Rebounds Per 100 Opportunities",
y = "Team Rebounds Added Per 100 Opportunities",
title = "Team Rebounds Added vs. Quality of Opportunity",
subtitle = "Data From First 40 Games of 2015-2016 Season") +
xlim(5, 22) +
ylim(-2,2.5)
ggsave("~/Desktop/Deep_Learning/nba-movement-data/rebounding_data/images/PlayersPlotCstEnv.png",
height = 7, width = 8)
summary_stats3_justmissedqualifying = summary_stats3 %>%
filter(grepl('F', position)|grepl('C', position)) %>%
filter(Plays < 900,
Plays > 200) %>%
mutate(fullname = paste(firstname, lastname)) %>%
inner_join(playersimages, by = c("fullname" = "name")) %>%
mutate(url_current = paste0("https://a.espncdn.com/combiner/i?img=/i/headshots/nba/players/full/",
as.character(espn_player_id),
".png")) %>%
mutate(url_old = paste0("https://web.archive.org/web/20161208080529im_/http://a.espncdn.com/combiner/i?img=/i/headshots/nba/players/full/",
as.character(espn_player_id),
".png")) %>%
filter(espn_player_id != 6485) %>%
arrange(netTeamRebPer100)
summary_stats3_justmissedqualifying %>%
mutate(url_old = as.factor(url_old)) %>%
mutate(url_old = fct_reorder(url_old, desc(netTeamRebPer100))) %>%
mutate(eREBper100Plays = eREBperPlay*100) %>%
ggplot() +
geom_point(aes(x = eREBper100Plays, y = netTeamRebPer100)) +
geom_image(aes(x = eREBper100Plays, y = netTeamRebPer100,
image=url_old), size=.1) +
geom_hline(aes(yintercept = 0), color = "black") +
geom_vline(aes(xintercept = 12.5), color = "black") +
theme(panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.major = element_line(colour = 'grey', linetype = "dashed")) +
labs(x = "Expected Rebounds Per 100 Opportunities",
y = "Team Rebounds Added Per 100 Opportunities",
title = "Team Rebounds Added vs. Quality of Opportunity",
subtitle = "Data From First 40 Games of 2015-2016 Season")
summary_stats3_qualifyingGuards = summary_stats3 %>%
filter(!grepl('F', position),
!grepl('C', position)) %>%
filter(Plays > 800) %>%
mutate(fullname = paste(firstname, lastname)) %>%
inner_join(playersimages, by = c("fullname" = "name")) %>%
mutate(url_current = paste0("https://a.espncdn.com/combiner/i?img=/i/headshots/nba/players/full/",
as.character(espn_player_id),
".png")) %>%
mutate(url_old = paste0("https://web.archive.org/web/20161208080529im_/http://a.espncdn.com/combiner/i?img=/i/headshots/nba/players/full/",
as.character(espn_player_id),
".png")) %>%
filter(espn_player_id != 6485)
ggsave("~/Desktop/Deep_Learning/nba-movement-data/rebounding_data/images/GuardsPlot.png",
height = 7, width = 8)
summary_stats3_qualifyingGuards %>%
mutate(url_old = as.factor(url_old)) %>%
mutate(url_old = fct_reorder(url_old, desc(netTeamRebPer100))) %>%
mutate(eREBper100Plays = eREBperPlay*100) %>%
ggplot() +
geom_hline(aes(yintercept = 0), color = "black") +
geom_vline(aes(xintercept = 6.5), color = "black") +
geom_image(aes(x = eREBper100Plays, y = netTeamRebPer100,
image=url_old), size=.1) +
theme(panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.major = element_line(colour = 'grey', linetype = "dashed")) +
labs(x = "Expected Rebounds Per 100 Opportunities",
y = "Team Rebounds Added Per 100 Opportunities",
title = "Rebounds Added vs. Quality of Opportunity",
subtitle = "Data From First 40 Games of 2015-2016 Season")
summary_stats3_qualifying %>%
ggplot() +
geom_point(aes(x = netTeamRebPer100, y = netPlayerRebPer100)) +
geom_smooth(aes(x = netTeamRebPer100, y = netPlayerRebPer100), method = "lm",
se = FALSE)
ReboundStealers = summary_stats3_qualifying %>% filter(TEAM_REB_added_per_play < 0, REB_added_per_play > 0)
UnsungHeroes = summary_stats3_qualifying %>% filter(TEAM_REB_added_per_play > 0, REB_added_per_play < 0)
summary_stats3disc = data_tot3 %>%
filter(player_id != -1) %>%
mutate(teamRebProb = if_else(IsOnOffense, 1 - defRebProbability,
defRebProbability),
teamRebShare = rebound_prob/teamRebProb,
teamReboundFlag = if_else(IsOnOffense, !IsDefensiveRebound,
IsDefensiveRebound)) %>%
filter(!is.na(teamRebProb)) %>%
group_by(GAME_ID, GAME_EVENT_ID, IsOnOffense) %>%
mutate(playerTeamReboundProb = sum(rebound_prob)) %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
mutate(NoPlayerRebound = sum(IsRebounder) == 0) %>%
ungroup() %>%
mutate(etra = case_when(NoPlayerRebound & teamReboundFlag ~ (rebound_prob/playerTeamReboundProb)*(1-teamRebProb),
NoPlayerRebound & !teamReboundFlag ~ -(rebound_prob/playerTeamReboundProb)*teamRebProb,
IsRebounder ~ 1 - teamRebProb,
teamReboundFlag ~ 0,
TRUE ~ -teamRebProb*(rebound_prob/playerTeamReboundProb))) %>%
mutate(era = as.numeric(IsRebounder) - rebound_prob) %>%
group_by(IsOnOffense, player_id) %>%
summarize(n_games = length(unique(GAME_ID)),
Plays = n(),
REB = sum(IsRebounder, na.rm = TRUE),
REB_perc = REB/Plays,
RebPer100 = REB_perc*100,
eREB = sum(rebound_prob, na.rm = TRUE),
netReb = sum(era, na.rm = TRUE),
REB_added_per_play = mean(era, na.rm = TRUE),
netPlayerRebPer100 = 100*REB_added_per_play,
eREBperPlay = sum(rebound_prob, na.rm = TRUE)/Plays,
netTeamReb = sum(etra, na.rm = TRUE),
TEAM_REB_added_per_play = mean(etra, na.rm = TRUE),
netTeamRebPer100 = TEAM_REB_added_per_play*100,
TeamRebAddedPerReb = sum(if_else(etra > 0, etra, 0))/
sum(if_else(etra > 0, 1, 0))) %>%
arrange(IsOnOffense,desc(netTeamReb)) %>%
left_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything()) %>%
filter(Plays > 400) %>%
filter(grepl('F', position)|grepl('C', position)) %>%
select(-Plays, -n_games) %>%
pivot_wider(names_from = IsOnOffense,
values_from = contains("REB")) %>%
filter(!is.na(netTeamRebPer100_TRUE),
!is.na(netTeamRebPer100_FALSE)) %>%
select(position, firstname, lastname, netTeamRebPer100_TRUE, netTeamRebPer100_FALSE)
summary_stats3discplt = summary_stats3disc %>%
mutate(fullname = paste(firstname, lastname)) %>%
inner_join(playersimages, by = c("fullname" = "name")) %>%
mutate(url_current = paste0("https://a.espncdn.com/combiner/i?img=/i/headshots/nba/players/full/",
as.character(espn_player_id),
".png")) %>%
mutate(url_old = paste0("https://web.archive.org/web/20161208080529im_/http://a.espncdn.com/combiner/i?img=/i/headshots/nba/players/full/",
as.character(espn_player_id),
".png")) %>%
filter(espn_player_id != 6485) %>%
arrange(netTeamRebPer100_TRUE)
summary_stats3discplt %>%
ggplot() +
geom_point(aes(x = netTeamRebPer100_FALSE, y = netTeamRebPer100_TRUE)) +
labs(x = "Defensive Net REB Per 100 Possessions",
y = "Offensive Net REB Per 100 Possessions") +
geom_image(aes(x = netTeamRebPer100_FALSE, y = netTeamRebPer100_TRUE,
image=url_old), size=.07) +
geom_hline(aes(yintercept = 0), color = "black") +
geom_vline(aes(xintercept = 0), color = "black") +
theme(panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.major = element_line(colour = 'grey', linetype = "dashed"))
# Getting Team Level Stats ------------------------------------------------
setwd("~/Desktop/Deep_Learning/nba-movement-data/data/events/")
scripts = dir()
scripts = scripts[grep(".csv", scripts)]
events_data = data.frame()
for(script in scripts){
x = read.csv(script) %>%
select(GAME_ID, starts_with("PLAYER1"))
if(dim(x)[1] > 0){
events_data = rbind(events_data,
x)
}
}
team_mapping = events_data %>%
filter(!is.na(PLAYER1_TEAM_ID)) %>%
mutate(TeamName = paste(PLAYER1_TEAM_CITY, PLAYER1_TEAM_NICKNAME)) %>%
rename(team_id = PLAYER1_TEAM_ID,
TeamAbbr = PLAYER1_TEAM_ABBREVIATION) %>%
select(team_id, TeamName, TeamAbbr) %>%
distinct()
# Get Team Winning Percentages --------------------------------------------
setwd("~/Desktop/Deep_Learning/nba-movement-data/data/events/")
scripts = dir()
scripts = scripts[grep(".csv", scripts)]
events_data = data.frame()
for(script in scripts){
x = read.csv(script) %>%
select(GAME_ID, SCOREMARGIN, HOMEDESCRIPTION,
VISITORDESCRIPTION, PLAYER1_TEAM_ID)
if(dim(x)[1] > 0){
events_data = rbind(events_data,
x)
}
}
library(modeest)
hometeam = events_data %>%
group_by(GAME_ID) %>%
filter(!grepl("Jump Ball", HOMEDESCRIPTION),
!grepl("Jump Ball", VISITORDESCRIPTION),
VISITORDESCRIPTION == "",
HOMEDESCRIPTION != "",
!is.na(PLAYER1_TEAM_ID)) %>%
summarize(homeTeam = mlv(PLAYER1_TEAM_ID, method='mfv'))
awayteam = events_data %>%
group_by(GAME_ID) %>%
filter(!grepl("Jump Ball", HOMEDESCRIPTION),
!grepl("Jump Ball", VISITORDESCRIPTION),
HOMEDESCRIPTION == "",
VISITORDESCRIPTION != "",
!is.na(PLAYER1_TEAM_ID)) %>%
summarize(awayTeam = mlv(PLAYER1_TEAM_ID, method='mfv'))
winningTeam = events_data %>%
filter(!is.na(SCOREMARGIN)) %>%
group_by(GAME_ID) %>%
filter(row_number() == n()) %>%
ungroup() %>%
rowwise() %>%
mutate(winningTeam = if_else(as.integer(as.character(SCOREMARGIN)) > 0, "home", "away")) %>%
select(GAME_ID, winningTeam)
teamsInGame = hometeam %>%
inner_join(awayteam) %>%
inner_join(winningTeam) %>%
filter(homeTeam != awayTeam) %>%
mutate(winningTeam = if_else(winningTeam == "away", awayTeam, homeTeam)) %>%
pivot_longer(cols = c("homeTeam", "awayTeam"),
names_to = "home_away",
values_to = "team_id") %>%
mutate(result = winningTeam == team_id) %>%
ungroup() %>%
group_by(team_id) %>%
summarize(win_perc = mean(result)) %>%
arrange(desc(win_perc))
summary_stats4 = data_tot3 %>%
filter(player_id != -1) %>%
mutate(teamRebProb = if_else(IsOnOffense, 1 - defRebProbability,
defRebProbability),
teamReboundFlag = if_else(IsOnOffense, !IsDefensiveRebound,
IsDefensiveRebound)) %>%
inner_join(team_mapping) %>%
select(team_id, TeamName, TeamAbbr,
GAME_ID, GAME_EVENT_ID, teamRebProb, teamReboundFlag) %>%
distinct() %>%
filter(!is.na(teamRebProb)) %>%
group_by(team_id, TeamName, TeamAbbr) %>%
summarize(Plays = n(),
eREB = sum(teamRebProb, na.rm = TRUE),
eREBperPlay = eREB/Plays,
REB = sum(teamReboundFlag, na.rm = TRUE),
netREB = REB - eREB,
netREBperPlay = netREB/Plays,
netREBper100 = 100*netREBperPlay) %>%
arrange(desc(netREBperPlay))
summary_stats4disc = data_tot3 %>%
filter(player_id != -1) %>%
mutate(teamRebProb = if_else(IsOnOffense, 1 - defRebProbability,
defRebProbability),
teamReboundFlag = if_else(IsOnOffense, !IsDefensiveRebound,
IsDefensiveRebound)) %>%
inner_join(team_mapping) %>%
select(team_id, TeamName, TeamAbbr,
GAME_ID, GAME_EVENT_ID, teamRebProb, teamReboundFlag, IsOnOffense) %>%
distinct() %>%
filter(!is.na(teamRebProb)) %>%
group_by(IsOnOffense, team_id, TeamName, TeamAbbr) %>%
summarize(Plays = n(),
eREB = sum(teamRebProb, na.rm = TRUE),
eREBperPlay = eREB/Plays,
REB = sum(teamReboundFlag, na.rm = TRUE),
netREB = REB - eREB,
netREBperPlay = netREB/Plays,
netREBper100 = 100*netREBperPlay) %>%
arrange(IsOnOffense, desc(netREBperPlay)) %>%
select(-Plays) %>%
pivot_wider(names_from = IsOnOffense,
values_from = contains("REB")) %>%
mutate(team_logo = case_when(TeamAbbr == "ATL" ~ "https://content.sportslogos.net/logos/6/220/full/9168_atlanta_hawks-primary-2016.png",
TeamAbbr == "BKN" ~ "https://content.sportslogos.net/logos/6/3786/full/345_brooklyn-nets-secondary-2013.png",
TeamAbbr == "BOS" ~ "https://content.sportslogos.net/logos/6/213/full/slhg02hbef3j1ov4lsnwyol5o.png",
TeamAbbr == "CHA" ~ "https://content.sportslogos.net/logos/6/5120/full/1926_charlotte__hornets_-primary-2015.png",
TeamAbbr == "CHI" ~ "https://content.sportslogos.net/logos/6/221/full/hj3gmh82w9hffmeh3fjm5h874.png",
TeamAbbr == "CLE" ~ "https://content.sportslogos.net/logos/6/222/full/e4701g88mmn7ehz2baynbs6e0.png",
TeamAbbr == "DAL" ~ "https://content.sportslogos.net/logos/6/228/full/ifk08eam05rwxr3yhol3whdcm.png",
TeamAbbr == "DEN" ~ "https://content.sportslogos.net/logos/6/229/full/8926_denver_nuggets-primary-2019.png",
TeamAbbr == "DET" ~ "https://content.sportslogos.net/logos/6/223/full/2164_detroit_pistons-primary-2018.png",
TeamAbbr == "GSW" ~ "https://content.sportslogos.net/logos/6/235/full/3152_golden_state_warriors-primary-2020.png",
TeamAbbr == "HOU" ~ "https://content.sportslogos.net/logos/6/230/full/6830_houston_rockets-primary-2020.png",
TeamAbbr == "IND" ~ "https://content.sportslogos.net/logos/6/224/full/4812_indiana_pacers-primary-2018.png",
TeamAbbr == "LAC" ~ "https://content.sportslogos.net/logos/6/236/full/5462_los_angeles_clippers-primary-2016.png",
TeamAbbr == "LAL" ~ "https://content.sportslogos.net/logos/6/237/full/uig7aiht8jnpl1szbi57zzlsh.png",
TeamAbbr == "MEM" ~ "https://content.sportslogos.net/logos/6/231/full/4373_memphis_grizzlies-primary-2019.png",
TeamAbbr == "MIA" ~ "https://content.sportslogos.net/logos/6/214/full/burm5gh2wvjti3xhei5h16k8e.gif",
TeamAbbr == "MIL" ~ "https://content.sportslogos.net/logos/6/225/full/8275_milwaukee_bucks-primary-2016.png",
TeamAbbr == "MIN" ~ "https://content.sportslogos.net/logos/6/232/full/9669_minnesota_timberwolves-primary-2018.png",
TeamAbbr == "NOP" ~ "https://content.sportslogos.net/logos/6/4962/full/2681_new_orleans_pelicans-primary-2014.png",
TeamAbbr == "NYK" ~ "https://content.sportslogos.net/logos/6/216/full/2nn48xofg0hms8k326cqdmuis.gif",
TeamAbbr == "OKC" ~ "https://content.sportslogos.net/logos/6/2687/full/khmovcnezy06c3nm05ccn0oj2.png",
TeamAbbr == "ORL" ~ "https://content.sportslogos.net/logos/6/217/full/wd9ic7qafgfb0yxs7tem7n5g4.gif",
TeamAbbr == "PHI" ~ "https://content.sportslogos.net/logos/6/218/full/7034_philadelphia_76ers-primary-2016.png",
TeamAbbr == "PHX" ~ "https://content.sportslogos.net/logos/6/238/full/4370_phoenix_suns-primary-2014.png",
TeamAbbr == "POR" ~ "https://content.sportslogos.net/logos/6/239/full/9725_portland_trail_blazers-primary-2018.png",
TeamAbbr == "SAC" ~ "https://content.sportslogos.net/logos/6/240/full/4043_sacramento_kings-primary-2017.png",
TeamAbbr == "SAS" ~ "https://content.sportslogos.net/logos/6/233/full/827.png",
TeamAbbr == "TOR" ~ "https://content.sportslogos.net/logos/6/227/full/4578_toronto_raptors-primary-2016.png",
TeamAbbr == "UTA" ~ "https://content.sportslogos.net/logos/6/234/full/6749_utah_jazz-primary-2017.png",
TeamAbbr == "WAS" ~ "https://content.sportslogos.net/logos/6/219/full/5671_washington_wizards-primary-2016.png",
TRUE ~ "error"))
summary_stats4disc %>%
ungroup() %>%
mutate(team_logo = as.factor(team_logo)) %>%
mutate(team_logo = fct_reorder(team_logo, desc(netREBper100_TRUE))) %>%
ggplot() +
geom_point(aes(x = netREBper100_FALSE, y = netREBper100_TRUE)) +
labs(x = "Defensive Net REB Per 100 Opportunities",
y = "Offensive Net REB Per 100 Opportunities",
title = "Offensive vs. Defensive Team Net Rebound Ratings",
subtitle = "Data From First 40 Games of 2015-2016 Season") +
geom_image(aes(x = netREBper100_FALSE, y = netREBper100_TRUE,
image=team_logo), size = 0.06) +
#size = I(win_perc/8))) +
geom_hline(aes(yintercept = 0), color = "black") +
geom_vline(aes(xintercept = 0), color = "black") +
theme(panel.background = element_rect(fill = 'white', colour = 'black'),
panel.grid.major = element_line(colour = 'grey', linetype = "dashed"))
ggsave("~/Desktop/Deep_Learning/nba-movement-data/rebounding_data/images/TeamsPlot.png",
height = 5, width = 6)
baselineTeamPreds = data_tot3 %>%
filter(player_id != -1) %>%
mutate(teamRebProb = if_else(IsOnOffense, 1 - defRebProbability,
defRebProbability),
teamReboundFlag = if_else(IsOnOffense, !IsDefensiveRebound,
IsDefensiveRebound)) %>%
inner_join(team_mapping) %>%
select(team_id, TeamName, TeamAbbr,
GAME_ID, GAME_EVENT_ID, teamRebProb, teamReboundFlag, IsOnOffense) %>%
distinct() %>%
group_by(IsOnOffense) %>%
summarize(reboundPerc = mean(teamReboundFlag),
reboundProb = mean(teamRebProb))
# Player-Team Stats -------------------------------------------------------
player_plays = data_tot3 %>%
filter(player_id != -1,
rebound_prob > .1) %>%
select(player_id, team_id, GAME_ID, GAME_EVENT_ID) %>%
distinct() %>%
arrange(player_id)
summary_stats5pt1 = data_tot3 %>%
filter(player_id != -1) %>%
mutate(teamRebProb = if_else(IsOnOffense, 1 - defRebProbability,
defRebProbability),
teamReboundFlag = if_else(IsOnOffense, !IsDefensiveRebound,
IsDefensiveRebound)) %>%
inner_join(team_mapping) %>%
filter(!is.na(teamRebProb)) %>%
select(team_id, TeamName, TeamAbbr, player_id,
GAME_ID, GAME_EVENT_ID, teamRebProb, teamReboundFlag) %>%
inner_join(player_plays) %>%
group_by(player_id, team_id, TeamName, TeamAbbr) %>%
summarize(Plays = n(),
eREB = sum(teamRebProb, na.rm = TRUE),
eREBperPlay = eREB/Plays,
REB = sum(teamReboundFlag, na.rm = TRUE),
netREB = REB - eREB,
netREBperPlay = netREB/Plays,
netREBper100 = 100*netREBperPlay) %>%
left_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything()) %>%
arrange(desc(netREB)) %>%
filter(Plays > 200) %>%
filter(grepl('F', position)|grepl('C', position),
!grepl('G', position))
# for all players, get a list of plays that they played in
player_plays = data_tot3 %>%
filter(player_id != -1) %>%
select(player_id, team_id, GAME_ID, GAME_EVENT_ID) %>%
distinct() %>%
arrange(player_id)
player_team_not_plays = data_tot3 %>%
filter(player_id != -1) %>%
select(team_id, GAME_ID, GAME_EVENT_ID) %>%
distinct() %>%
inner_join(player_plays %>%
select(player_id, team_id) %>%
distinct()) %>%
anti_join(player_plays) %>%
arrange(player_id)
summary_stats5pt2 = player_team_not_plays %>%
inner_join(data_tot3 %>%
filter(player_id != -1) %>%
mutate(teamRebProb = if_else(IsOnOffense, 1 - defRebProbability,
defRebProbability),
teamReboundFlag = if_else(IsOnOffense, !IsDefensiveRebound,
IsDefensiveRebound)) %>%
inner_join(team_mapping) %>%
select(team_id, TeamName, TeamAbbr,
GAME_ID, GAME_EVENT_ID, teamRebProb, teamReboundFlag) %>%
distinct(),
by = c("team_id", "GAME_ID", "GAME_EVENT_ID")) %>%
select(team_id, TeamName, TeamAbbr, player_id,
GAME_ID, GAME_EVENT_ID, teamRebProb, teamReboundFlag) %>%
group_by(player_id, team_id, TeamName, TeamAbbr) %>%
summarize(not_on_Plays = n(),
not_on_eREB = sum(teamRebProb, na.rm = TRUE),
not_on_eREBperPlay = not_on_eREB/not_on_Plays,
not_on_REB = sum(teamReboundFlag, na.rm = TRUE),
not_on_netREB = not_on_REB - not_on_eREB,
not_on_netREBperPlay = not_on_netREB/not_on_Plays,
not_on_netREBper100 = 100*not_on_netREBperPlay) %>%
left_join(players_info, by = c("player_id" = "playerid")) %>%
select(position, firstname, lastname, player_id, everything()) %>%
arrange(desc(not_on_eREB))
summary_stats5 = summary_stats5pt1 %>%
inner_join(summary_stats5pt2) %>%
filter(Plays > 500,
not_on_Plays > 200) %>%
mutate(netREBper100_diff = netREBper100 - not_on_netREBper100) %>%
arrange(desc(netREBper100_diff))
# Checking Player Rebound Probs
shooter_check = data_tot3 %>%
filter(IsShooter) %>%
summarize(reboundFreq = mean(IsRebounder),
reboundProb = mean(rebound_prob))
small_probs_check1 = data_tot3 %>%
mutate(rebound_prob_group = (floor(rebound_prob*100) + .5)/100,
miss = IsRebounder - rebound_prob_group) %>%
group_by(rebound_prob_group) %>%
summarize(count = n(),
reboundFreq = mean(IsRebounder),
reboundProb = mean(rebound_prob),
miss = mean(miss),
miss_perc = miss/reboundProb) %>%
arrange(rebound_prob_group)
# Off Reb Prob vs. Distance -----------------------------------------------
offRebvsDist = data_tot3 %>%
filter(IsShooter) %>%
mutate(offRebProbability = 1- defRebProbability,
dist = sqrt((x_loc - 5.25)^2 + (y_loc - 25)^2)) %>%
mutate(dist_disc = floor(dist) + 0.5) %>%
group_by(dist_disc) %>%
summarize(count = n(),
offRebProb = 1 - mean(IsDefensiveRebound)) %>%
filter(count > 100) %>%
arrange(dist_disc)
offRebvsDist %>%
ggplot() +
geom_point(aes(x = dist_disc, y = offRebProb)) +
geom_smooth(aes(x = dist_disc, y = offRebProb))
correct_team_prob = data_tot3 %>%
group_by(GAME_ID, GAME_EVENT_ID) %>%
filter(IsRebounder) %>%
mutate(correct_pred = IsDefensiveRebound*(defRebProbability >= .5)) %>%
ungroup() %>%
summarize(accuracy = mean(correct_pred))
|
4d5cb0959a5f35f6df0bfd0aa3d09d272f8c2c4a
|
861b10b00863c66d1fefcf7451dc4cf0ab0e38c0
|
/man/ggstrip-6v.rd
|
c7967c9937cdce7235b8c98968b4139e9259fd67
|
[] |
no_license
|
rmasinidemelo/ggplot
|
c96e50f1fa0f432a596a7649bd1285cfbb5e968e
|
749060851fdc76f9885fb57196f93eb7ade02f74
|
refs/heads/master
| 2021-05-27T19:21:16.534374
| 2007-05-05T00:00:00
| 2007-05-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
rd
|
ggstrip-6v.rd
|
\name{ggstrip}
\alias{ggstrip}
\title{Grob strip}
\author{Hadley Wickham <h.wickham@gmail.com>}
\description{
Grob for strip labels
}
\usage{ggstrip(text, horizontal=TRUE, strip.gp=ggopt()$strip.gp, text.gp=ggopt()$strip.text.gp)}
\arguments{
\item{text}{text to display}
\item{horizontal}{orientation, horizontal or vertical}
\item{strip.gp}{}
\item{text.gp}{}
}
\details{}
\examples{}
\keyword{hplot}
\keyword{internal}
|
fe3219caa01d97ee528de0b6c773a1d0d40aca1d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nlme/examples/as.matrix.pdMat.Rd.R
|
1a04faebdbb3f56438bacc3dad6cda82828875c2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
as.matrix.pdMat.Rd.R
|
library(nlme)
### Name: as.matrix.pdMat
### Title: Matrix of a pdMat Object
### Aliases: as.matrix.pdMat
### Keywords: models
### ** Examples
as.matrix(pdSymm(diag(4)))
|
b7fefd870eb16a30ff125a98a8d0397801536f58
|
dfcb29724707768dcff0e0dbb4fc200c97caa1aa
|
/Faster-with-attributes/faster_multinet_SIR_only.R
|
c920d7bbd7b1935bad4ca968d305e11ffb0221b7
|
[] |
no_license
|
niebieska/MasterThesis
|
f497fe1fc9dc6c99d4867e1062999513bcd8b130
|
930574bfa88b1326c6b14e6d4fd34a2667646b77
|
refs/heads/master
| 2022-11-06T14:58:40.211424
| 2020-06-22T22:34:24
| 2020-06-22T22:34:24
| 258,297,641
| 1
| 1
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 7,516
|
r
|
faster_multinet_SIR_only.R
|
# biblioteka
library(multinet)
listBeta <- c(0.19,0.28,0.22)
listgamma <-c(0.1,0.08,0.02)
networkName <- "MoscowAthletics2013"
countryDirectory <-"Italy"
scritpType<-"SIR_only"
networkFileName <-"MoscowAthletics2013_4NoNatureNoLoops.edges"
network<-read_ml(paste("C:/Users/Paulina/Downloads/FullNet/",networkFileName,sep=""), name=networkName, sep=',', aligned=FALSE)
layerName <- "RE"
for(value in 1:length(listBeta))
{
if(value == 1){
# Parametry zapisu eksperymentu -------------------------------------------------
# # Folder roboczy
setwd("C:/Users/Paulina/Documents/MasterThesis/Eksperiments/")
getwd()
#zmienne pomocniecze do zapisu
experimentsMainDirectory <- paste("SIR&SIS",networkName, sep="-")
if(dir.exists(experimentsMainDirectory) == FALSE) dir.create(experimentsMainDirectory)
#folder dla eksperymentów
setwd(paste("C:/Users/Paulina/Documents/MasterThesis/Eksperiments/", experimentsMainDirectory, sep=""))
if(dir.exists(countryDirectory) == FALSE) dir.create(countryDirectory)
setwd(paste(getwd(), countryDirectory,sep="/"))
}
setwd(paste(paste("C:/Users/Paulina/Documents/MasterThesis/Eksperiments/",experimentsMainDirectory,sep=""),countryDirectory,sep="/"))
mainDirectory <-paste(listBeta[value],listgamma[value], sep = "-")
if(dir.exists(mainDirectory) == FALSE) dir.create(mainDirectory)
setwd(paste(getwd(),mainDirectory, sep="/") )
if(dir.exists(scritpType) == FALSE) dir.create(scritpType)
setwd(paste(getwd(),scritpType, sep="/") )
getwd()
experimentNumber <- 20 #0
for(e in 1:experimentNumber)
{
# wczytanie sieci
#net <- ml_aucs()
net<-network
AllLayers <- layers_ml(net)
#read_ml("C:/Users/Paulina/Downloads/FullNet/CS-Aarhus_4NoNature.edges", name="CS", sep=' ', aligned=FALSE)
# aktualna warstwa
#parametry sieci
numberOfActors <- num_actors_ml(net)
numberOfActorsInLayer <- num_actors_ml(net,layerName)
layerActors <- actors_ml(net,layerName)
networkActors <- actors_ml(net)
# definicje zmiennych
#czas trwania "epidemii" - liczba dni
time <- 150
# prawdopodobieństwa SIR
beta <- listBeta[value] # zarażenia
gamma <- listgamma[value] # wyzdrowienia
#probabilities <- paste(paste("beta:",beta),paste("gamma:",gamma))
#Stan SIR
numberOfSusceptible <- numberOfActorsInLayer
numberOfInfected <- 0 #
numberOfRecovered <- 0 # ozdrowieńcy
SIR_Sum<- numberOfSusceptible + numberOfInfected + numberOfRecovered
# Stan początkowy dla macierzy liczności
SIR_group_States <- matrix(cbind(0,numberOfSusceptible, numberOfInfected, numberOfRecovered, SIR_Sum))
# zmienne pomocnicze
new_infected <- NULL # nowe zachorowania
new_recovered <- NULL # nowe ozdrowienia
#dodanie atrybutów (state -stan dla SIR,)
add_attributes_ml(net, "state", type = "string", target="actor", layer ="")
#ustawienie wartości atrybutu state dla SIR domyślnie na S
set_values_ml(net, "state",actors_ml(net,layerName), values ="S" )
#sprawdzenie ustawionych wartości
#get_values_ml(net,"state",actors_ml(net))
#get_values_ml(net,"awareness",actors_ml(net))
#get_values_ml(net,"epsilon",actors_ml(net))
#get_values_ml(net,"beta",actors_ml(net))
# stan początkowy dla I ---------------------------------------------------
# (A)losowo X osób
# x<-5
# infected <- trunc(runif(x,1,215))
# while (n>0)
# { (print (infected[n]))
# set_values_ml(net, "state",infected[n], values ="I" )
# print(get_values_ml(net, "state",infected[n]))
# n=n-1
# }
# (B)losowo x % sieci - preferowane np 1% - liczymi ile to aktorów w sieci a potem losujemy tylu aktorów jako seedy
x<-0.01
n<- round( x * num_actors_ml(net,layerName)) # dla warstwy x % aktorów z wybranej warstwy
infected <- trunc(runif(n,1,numberOfActorsInLayer))
# Aktualizowanie stanu SIR
numberOfInfected <- n
numberOfSusceptible <- numberOfSusceptible - numberOfInfected
# zainfekowanie wylosowanych aktorów
while (n>0)
{ print( paste("infekowanie w toku - aktor :", infected[n]))
set_values_ml(net, "state",layerActors[infected[n]], values ="I" )
#print(paste(n, get_values_ml(net, "state",infected[n]))) do sprawdzenia
n=n-1
}
timeline_SIR<- as.matrix(layerActors)
timeline_SIR<-cbind(timeline_SIR, get_values_ml(net,"state", layerActors ))
startTime <- Sys.time()
TimeMeasure <- 0
for(i in 1:time ) # odliczamy kolejne dni 1 iteracja - 1 dzień
{
print (paste("czas:",Sys.time()- startTime ))
TimeMeasure<- cbind(TimeMeasure,Sys.time()- startTime)
startTime <- Sys.time()
# wypisuje - Stan SIR na konsole
SIR_group_States <- cbind(SIR_group_States,rbind(i,numberOfSusceptible,numberOfInfected,numberOfRecovered, SIR_Sum))
print(paste("Dzień epidemii:", i))
print(paste("Stan SIR:", paste( paste( paste("Susceptible:", numberOfSusceptible),paste("Infected:", numberOfInfected), sep = " ; "),paste("Recovered:", numberOfRecovered),sep =" ; ")))
if(numberOfRecovered == numberOfActorsInLayer) break
if(numberOfInfected == 0) break
# aktorzy, którzy zmienią stan w kolejnej iteracji
new_infected <- NULL
new_recovered <- NULL
#tablica stanów w sieci
layersAttributes <- get_values_ml(net,"state",actors_ml(net,layerName))
actualInfectedInLayer <- which(layersAttributes == "I")
print (actualInfectedInLayer)
# Pętla SIR
if(length(actualInfectedInLayer) != 0)
for (j in 1:length(actualInfectedInLayer)) # odwiedzam po kolei aktorów
{
# if(get_values_ml(net,"state",layerActors[j]) =="I") # jeśli aktor jest zarażony
# {
# szukamy sąsiadów
neighbors <- neighbors_ml(net,layerActors[actualInfectedInLayer[j]],layerName,mode="all")
for(s in 1:length(neighbors))
{ if(get_values_ml(net, "state", neighbors[s])=="S")
{
if( runif(1) < beta)
{ #print( value)
if(!(neighbors[s] %in% new_infected)) # is.element(neighbors[s])
new_infected <- cbind(new_infected,neighbors[s]) # mamy tymczasową listę nowo zainfekowanych
# }
}
}
if( runif(1) < gamma)
{
if(!is.element(layerActors[actualInfectedInLayer[j]],new_recovered)){ new_recovered=cbind(new_recovered,layerActors[actualInfectedInLayer[j]])}
}
}
}
# aktualizacja nowych zakażeń i ozdrowienia jeśli się pojawiły
if(!is.null(new_infected)) set_values_ml(net, "state",new_infected, values ="I" )
if(!is.null(new_recovered)) set_values_ml(net, "state",new_recovered, values ="R" )
#Sprawdzenie stanu atrybutów - zawartość wektora
SIR_attributes <- get_values_ml(net,"state", layerActors)
numberOfSusceptible <- length( which('S' == SIR_attributes))
numberOfInfected <- length( which('I' == SIR_attributes))
numberOfRecovered <- length( which('R' == SIR_attributes))
Sum = numberOfSusceptible + numberOfInfected + numberOfRecovered
# zapis stanów pośrednich
timeline_SIR <- cbind(timeline_SIR, SIR_attributes)
}
SIR_group_States <- t(SIR_group_States)
# zapis wyników z e-tej iteracji -----------------------------------------
experimentDescription <- paste("eksperymentData",e, sep="-")
# zapis do pliku dat.
write.table(SIR_group_States,file=paste("Summary_SIR",(paste(e,".dat", sep = "")), sep="-"), col.names =TRUE, sep =";", row.names = TRUE )
write.table(timeline_SIR,file=paste("timelineStates_SIR",(paste(e,".dat", sep = "")), sep="-"), col.names =TRUE, sep =";", row.names = TRUE )
# zapis RData
save(list = ls(all.names = TRUE), file =paste( experimentDescription,".RData",sep=""), envir = .GlobalEnv)
}
}
|
8d7f5e3f09799f3074e998be17613b85b949639b
|
678c7a152cc00df4d1c38fae6c59511b417aef54
|
/R/aggregateData.R
|
d31186bbcf35744bcca3d5a21d9649515e04c6c6
|
[] |
no_license
|
GabrielHoffman/muscat
|
368c21e3caae95abe29d6b2be79b174ed4ef79c0
|
93a3d88fd4a6bacb92811f12af10b5895b912bd7
|
refs/heads/master
| 2023-08-24T10:27:27.795931
| 2021-10-14T17:04:31
| 2021-10-14T17:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,244
|
r
|
aggregateData.R
|
#' @rdname aggregateData
#' @title Aggregation of single-cell to pseudobulk data
#'
#' @description ...
#'
#' @param x a \code{\link[SingleCellExperiment]{SingleCellExperiment}}.
#' @param assay character string specifying the assay slot to use as
#' input data. Defaults to the 1st available (\code{assayNames(x)[1]}).
#' @param by character vector specifying which
#' \code{colData(x)} columns to summarize by (at most 2!).
#' @param fun a character string.
#' Specifies the function to use as summary statistic.
#' Passed to \code{\link[scuttle]{summarizeAssayByGroup}}.
#' @param scale logical. Should pseudo-bulks be scaled
#' with the effective library size & multiplied by 1M?
#' @param BPPARAM a \code{\link[BiocParallel]{BiocParallelParam}}
#' object specifying how aggregation should be parallelized.
#' @param verbose logical. Should information on progress be reported?
#'
#' @return a \code{\link[SingleCellExperiment]{SingleCellExperiment}}.
#' \itemize{
#' \item{If \code{length(by) == 2}, each sheet (\code{assay}) contains
#' pseudobulks for each of \code{by[1]}, e.g., for each cluster when
#' \code{by = "cluster_id"}. Rows correspond to genes, columns to
#' \code{by[2]}, e.g., samples when \code{by = "sample_id"}}.
#' \item{If \code{length(by) == 1}, the returned SCE will contain only
#' a single \code{assay} with rows = genes and colums = \code{by}.}}
#'
#' Aggregation parameters (\code{assay, by, fun, scaled}) are stored in
#' \code{metadata()$agg_pars}, and the number of cells that were aggregated
#' are accessible in \code{int_colData()$n_cells}.
#'
#' @examples
#' # pseudobulk counts by cluster-sample
#' data(example_sce)
#' pb <- aggregateData(example_sce)
#'
#' library(SingleCellExperiment)
#' assayNames(example_sce) # one sheet per cluster
#' head(assay(example_sce)) # n_genes x n_samples
#'
#' # scaled CPM
#' cpm <- edgeR::cpm(assay(example_sce))
#' assays(example_sce)$cpm <- cpm
#' pb <- aggregateData(example_sce, assay = "cpm", scale = TRUE)
#' head(assay(pb))
#'
#' # aggregate by cluster only
#' pb <- aggregateData(example_sce, by = "cluster_id")
#' length(assays(pb)) # single assay
#' head(assay(pb)) # n_genes x n_clusters
#'
#' @author Helena L Crowell & Mark D Robinson
#'
#' @references
#' Crowell, HL, Soneson, C, Germain, P-L, Calini, D,
#' Collin, L, Raposo, C, Malhotra, D & Robinson, MD:
#' On the discovery of population-specific state transitions from
#' multi-sample multi-condition single-cell RNA sequencing data.
#' \emph{bioRxiv} \strong{713412} (2018).
#' doi: \url{https://doi.org/10.1101/713412}
#'
#' @importFrom Matrix colSums
#' @importFrom purrr map
#' @importFrom S4Vectors DataFrame metadata
#' @importFrom SingleCellExperiment SingleCellExperiment int_colData<-
#' @importFrom SummarizedExperiment colData colData<-
#' @export
aggregateData <- function(x,
assay = NULL, by = c("cluster_id", "sample_id"),
fun = c("sum", "mean", "median", "prop.detected", "num.detected"),
scale = FALSE, verbose = TRUE, BPPARAM = SerialParam(progressbar = verbose)) {
# check validity of input arguments
fun <- match.arg(fun)
if (is.null(assay))
assay <- assayNames(x)[1]
.check_arg_assay(x, assay)
.check_args_aggData(as.list(environment()))
stopifnot(is(BPPARAM, "BiocParallelParam"))
# assure 'by' colData columns are factors
# so that missing combinations aren't dropped
for (i in by)
if (!is.factor(x[[i]]))
x[[i]] <- factor(x[[i]])
# compute pseudo-bulks
pb <- .pb(x, by, assay, fun, BPPARAM)
if (scale & length(by) == 2) {
# compute library sizes
cs <- if (assay == "counts" && fun == "sum")
pb else .pb(x, by, "counts", "sum", BPPARAM)
ls <- lapply(cs, colSums)
# scale pseudobulks by CPM
pb <- lapply(seq_along(pb), function(i) pb[[i]] / 1e6 * ls[[i]])
names(pb) <- names(ls)
}
# construct SCE
md <- metadata(x)
md$agg_pars <- list(assay = assay, by = by, fun = fun, scale = scale)
pb <- SingleCellExperiment(pb, rowData = rowData(x), metadata = md)
# tabulate number of cells
cd <- data.frame(colData(x)[, by])
for (i in names(cd))
if (is.factor(cd[[i]]))
cd[[i]] <- droplevels(cd[[i]])
ns <- table(cd)
if (length(by) == 2) {
ns <- asplit(ns, 2)
ns <- map(ns, ~c(unclass(.)))
} else ns <- c(unclass(ns))
int_colData(pb)$n_cells <- ns
# propagate 'colData' columns that are unique across 2nd 'by'
if (length(by) == 2) {
cd <- colData(x)
ids <- colnames(pb)
counts <- vapply(ids, function(u) {
m <- as.logical(match(cd[, by[2]], u, nomatch = 0))
vapply(cd[m, ], function(u) length(unique(u)), numeric(1))
}, numeric(ncol(colData(x))))
cd_keep <- apply(counts, 1, function(u) all(u == 1))
cd_keep <- setdiff(names(which(cd_keep)), by)
if (length(cd_keep) != 0) {
m <- match(ids, cd[, by[2]], nomatch = 0)
cd <- cd[m, cd_keep, drop = FALSE]
rownames(cd) <- ids
colData(pb) <- cd
}
}
return(pb)
}
|
6846486c39830d78334325c6a35b0df8f01a78f2
|
4320dcc8598eb1bf08ee2ebd71dcd2558fb579d8
|
/man/gn_leaflet_basic.Rd
|
14c65e6bd8c9a58afa21f8caf579554f87dc6a6b
|
[] |
no_license
|
jacob-ogre/us.geonames
|
74716ee395fc44aa4b472ff0b71b4f2a35e593aa
|
94b2f8b5a8adb415c8c351312685a545e6aabf09
|
refs/heads/master
| 2021-01-20T10:29:47.349100
| 2017-10-24T18:36:08
| 2017-10-24T18:36:08
| 100,292,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 784
|
rd
|
gn_leaflet_basic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map.R
\name{gn_leaflet_basic}
\alias{gn_leaflet_basic}
\title{Basic leaflet map of points identified in a \code{us.geonames} search}
\usage{
gn_leaflet_basic(df, weight = 1, color = "red", fillColor = "red",
fillOpacity = 0.3, radius = 5)
}
\arguments{
\item{df}{A dataframe from a \link{us.geonames} search}
\item{weight}{Weight of circle marker border (default = 1)}
\item{color}{Color of circle marker border (default = "red")}
\item{fillColor}{Fill color of circle marker (default = "red")}
\item{fillOpacity}{Opacity of a single marker (default = 0.3)}
\item{radius}{Radius of circle marker (default = 5)}
}
\description{
Basic leaflet map of points identified in a \code{us.geonames} search
}
|
8c2c525c3776406e258006d80ab82d548d162f44
|
56208c93517c510bbe3a25fbee15735001f3fae4
|
/Binaritize.R
|
38730d0df6c36697129ec99e7ca10d24d07df0f2
|
[] |
no_license
|
AdiModi96/Hindi-Numbers-OCR
|
d83caea3fa00e7a9b8c80076fd67b9c6e73c4d43
|
c1ef253bfdd04892826d375393fe87a7e396b655
|
refs/heads/master
| 2021-07-16T14:21:09.936474
| 2017-08-11T12:20:37
| 2017-08-11T12:20:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,134
|
r
|
Binaritize.R
|
#installing and importing package
#declaring reading location
read_loc <- ("D:/Codes/Data-Science-OCR/OCR Test Alphabets-Discretized")
#declaring writing location
write_loc <- ("D:/Codes/Data-Science-OCR/OCR Test Alphabets-Binaritized")
#Getting the list of directories in the base read location
dirs_in_discretized <- list.dirs(read_loc)
dirs_in_discretized <- dirs_in_discretized[2:length(dirs_in_discretized)]
#Getting the list of directories in the base write location
dirs_in_binaritized <- list.dirs(write_loc)
dirs_in_binaritized <- dirs_in_binaritized[2:length(dirs_in_binaritized)]
#Reding discretized .csv(s) files and storing it in the proper Folder
f <- 1
namef <- 1
name <- ""
for(dir in dirs_in_discretized)
{
namef <- 1
for(csvs in list.files(dir))
{
csvs <- read.csv(paste(dir, csvs, sep = '/'))
csvs <- csvs[,2:ncol(csvs)]
csvs <- as.matrix(csvs)
csvs <- replace(csvs, csvs < 1, 0)
name <- paste(as.character(namef),".csv")
write.csv(csvs, file = file.path(dirs_in_binaritized[f], name))
namef <- namef+1
}
f <- f+1
}
|
47ed5869432377141ab7a3a31b6c926a2391a52a
|
ca41bb40b940bf70e3f3895ce1953a82e449c2f6
|
/2020-08-19/app.R
|
acea9f0116015b1daf6452a0c01dbb419ba3491e
|
[
"MIT"
] |
permissive
|
colinquirk/LivestreamCode
|
e78f5183296c4352c545ace49c5f7298ec89f300
|
67aa98ec0e35028284fbe1969f69bec4c926e851
|
refs/heads/master
| 2022-12-17T09:22:33.233048
| 2020-09-24T03:46:27
| 2020-09-24T03:46:27
| 283,785,358
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,563
|
r
|
app.R
|
library(shiny)
library(lubridate)
library(tidyverse)
theme_set(theme_minimal())
ridership = read_csv("CTA_Ridership.csv") %>%
mutate(service_date = mdy(service_date))
ui <- fluidPage(
titlePanel("CTA Ridership Data"),
sidebarLayout(
sidebarPanel(
sliderInput("year", "Pick a year:", min = 2001, max = 2020, value = 2020)
),
mainPanel(
plotOutput("RidershipOverTime", hover = hoverOpts("year_hover")),
"Number of Riders: ",
textOutput("yearHoverOutput"),
)
)
)
server <- function(input, output) {
ridership_by_month_year = ridership %>%
mutate(service_month = month(service_date),
service_year = factor(year(service_date))) %>%
group_by(service_month, service_year) %>%
summarise(total_rides = mean(total_rides))
output$yearHoverOutput = renderText({
if(is.null(input$year_hover$y)) {
return(NULL)
} else {
return(as.character(round(input$year_hover$y)))
}
})
output$RidershipOverTime = renderPlot({
annotate_data = ridership_by_month_year %>%
filter(service_year == input$year)
ggplot(ridership_by_month_year, aes(x = service_month, y = total_rides, group=service_year)) +
geom_line(color="#CCCCCC") +
geom_line(data = annotate_data, color = "firebrick2", size=2) +
guides(color = FALSE) +
ylim(0, 2000000)
})
}
shinyApp(ui = ui, server = server)
|
b214361824c23ced65b0fb78618734f32d3731f8
|
aae46958c9b9ca7b33fd2e530f8cfc713d546560
|
/ITEX_analyses/old_code/TraitsImputation_Bootstrapping.R
|
6b4d4f5782280044fbb7fd6fd9a9fe05233811be
|
[] |
no_license
|
EnquistLab/PFTC4_Svalbard
|
a38f155c5f905af74b7e265ace7dc256eaa4e2f9
|
f372df13dc8002f347e2fe8810c4034ed02f06fc
|
refs/heads/master
| 2022-02-16T09:20:34.170459
| 2022-01-28T14:14:50
| 2022-01-28T14:14:50
| 130,364,868
| 2
| 13
| null | 2021-05-19T11:11:34
| 2018-04-20T13:19:53
|
HTML
|
UTF-8
|
R
| false
| false
| 1,909
|
r
|
TraitsImputation_Bootstrapping.R
|
#### COMMUNITY WEIGHTED MEANS ####
# Libraries
# install.packages("devtools")
#devtools::install_github("richardjtelford/traitstrap")
library("traitstrap")
library("tidyverse")
comm <- read_csv(file = "community/cleaned_data/ITEX_Svalbard_2003_2015_Community_cleaned.csv", col_names = TRUE)
traits <- read_csv(file = "traits/cleaned_Data/PFTC4_Svalbard_2018_ITEX.csv", col_names = TRUE)
# Transform data sets
comm <- comm %>%
filter(Year == 2015) %>%
select(-Spp, -FunctionalGroup) %>%
select(-Year)
traits <- traits %>%
# remove 3 Betula nana ind.
filter(Taxon != "betula nana") %>%
# select important columns
select(Site, Treatment, PlotID, Taxon, Individual_nr:LDMC, P_percent, C_percent:dC13_permil) %>% #, ID, Flag) %>%
# Make long data frame
pivot_longer(cols = c(Individual_nr:dC13_permil), names_to = "Trait", values_to = "Value") %>%
filter(!is.na(Value)) %>%
# log transform growth traits
mutate(Value = ifelse(Trait %in% c("Plant_Height_cm", "Wet_Mass_g", "Dry_Mass_g", "Leaf_Area_cm2", "Leaf_Thickness_Ave_mm"), suppressWarnings(log(Value)), Value),
Trait = recode(Trait, "Plant_Height_cm" = "Plant_Height_cm_log", "Wet_Mass_g" = "Wet_Mass_g_log", "Dry_Mass_g" = "Dry_Mass_g_log", "Leaf_Area_cm2" = "Leaf_Area_cm2_log", "Leaf_Thickness_Ave_mm" = "Leaf_Thickness_Ave_mm_log"))
# Impute missing traits
ImputedTraits_15 <- trait_impute(comm = comm,
traits = traits,
scale_hierarchy = c("Site", "Treatment", "PlotID"),
taxon_col = "Taxon",
trait_col = "Trait",
value_col = "Value",
abundance_col = "Abundance")
# Coverage Plot
autoplot(ImputedTraits_15)
# Boostrapping
BootstrapedTraits <- trait_np_bootstrap(imputed_traits = ImputedTraits_15,
nrep = 100, sample_size = 200)
SumMoments <- SummariseBootMoments(BootstrapMoments = BootstrapedTraits)
|
019516ca18f764b4b5e9b8033704f17547f7d19f
|
1b672dbb6a88af1ca8c99d0784d62f577c5e7ae0
|
/plot3.R
|
3119cfbc9ac2f40859e31f992941b8f98d069b0b
|
[] |
no_license
|
50stuck/ExData_Plotting1
|
2cfa5e9e3503fcbe655df2d59a32312d4d204417
|
dccc7a25a66dea4bf8175578e39214e18b4da977
|
refs/heads/master
| 2021-01-09T06:36:21.394778
| 2015-01-10T02:28:57
| 2015-01-10T02:28:57
| 29,040,156
| 0
| 0
| null | 2015-01-09T23:29:39
| 2015-01-09T23:29:39
| null |
UTF-8
|
R
| false
| false
| 1,660
|
r
|
plot3.R
|
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";",
na.strings="?")
data$Date <- as.Date(data$Date, "%d/%m/%Y") #converting date to propar class
subdata <- data[(data$Date=="2007-02-01" | data$Date=="2007-02-02"),] #subsetting
#the two days
subdata$DateTime <- paste(subdata$Date, subdata$Time) #creating a new column that
#contains both date and time
subdata$DataTime <- strptime(subdata$DateTime, format="%Y-%m-%d %H:%M:%S") #formatting
#the new column
Sys.setlocale(category = "LC_TIME", locale = "C") #so that the time will appear
#in english and not my local language
png(filename="plot3.png", width=480, height=480, bg="transparent") #creating the
#png file in the properties
#asked for by the instructions
with(subdata, plot(DataTime, Sub_metering_1, type="n", ylab="Energy Sub metering",
xlab="")) #creating the canvas
with(subdata, lines(DataTime, Sub_metering_1, col="black")) #adding the black line
with(subdata, lines(DataTime, Sub_metering_2, col="red")) #adding the red line
with(subdata, lines(DataTime, Sub_metering_3, col="blue")) #adding the blue line
legend("topright", lwd=1, col=c("black", "red", "blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) #adding the legend
dev.off() #closing the png device so file will be created
|
8926a4636256b58cde902b222f63a557d05159cb
|
91c63b8afda86eafa167101f8e2451eb570de3e1
|
/R/hello.R
|
ec6da4d4780331a11c55d9cd6938762c53139314
|
[] |
no_license
|
valenwp/gitprobability
|
0e796490049b3d4b967c95ca91a33c978dbf07ff
|
e64216474f556b012336ebde92a610fd5bd1803f
|
refs/heads/master
| 2020-04-08T13:11:45.963428
| 2018-12-06T17:49:06
| 2018-12-06T17:49:06
| 159,379,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 465
|
r
|
hello.R
|
#' @description Rejection sample from a pdf
#'
#' @param n a number
#' @param pdf a random variable
#' @param a a number
#' @param b a number
#' @param C a number
#'
#' @return the samples
#'
#' @examples
#' pdf<-function(x){x/2}
#' rejsample(2, pdf, 0, 5, .5)
rejsample<-function(n, pdf, a, b, C){
accepted<-0
samp<-rep(NA,n)
while(accepted<n){
u<-runif(1,a,b)
v<-runif(1,0,C)
if(pdf(u)>=v){
accepted<-accepted+1
samp[accepted]=u
}}
samp}
|
fa92be5724421aa808bd5493ff01bead25afe5fe
|
70a9d1afa993fdf7b36673a6a72a4635d0948cb2
|
/3.R
|
81bdd67c6ee141d1f06a2c3720053af666f238e5
|
[] |
no_license
|
blanket77/R_Coding
|
a8171ca56c68102d8a483319968e3702c66e938a
|
0033ba50226d1a9325b89596208bb924df6bd18a
|
refs/heads/master
| 2023-06-23T22:16:23.697148
| 2021-07-25T05:01:32
| 2021-07-25T05:01:32
| 388,464,562
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 937
|
r
|
3.R
|
library()
# Sys.setlocale("LC_ALL", locale ="English")
library()
# Sys.setlocale()
installed.packages()
colnames(installed.packages())
installed.packages()[, c("Package", "Version")]
search()
m <- matrix(1:6, 3, 2)
m
t(m)
t <- function(x) x+100
t(m)
base::t(m)
xyplot(dist ~ speed, data=cars)
library(lattice)
search()
xyplot(dist ~ speed, data=cars)
library(lattice) # 여러번 적재해도 알아서 중복없이 해줌
detach(package:lattice) # 적재 취소하기
search()
xyplot(dist ~ speed, data=cars)
.libPaths("C:/myRproject/library")
.libPaths()
install.packages("ggplot2")
library(help=ggplot2)
library(ggplot2) # 메모리에 적재
ls("package:ggplot2")
search()
data()
head(AirPassengers)
head(cars)
tail(cars)
head(cars, 10)
cars
help(cars)
data(package="MASS")
data(Animals, package = "MASS") # Animals 데이터셋 메모리에 적재
head(Animals)
head(UScrime)
library(MASS)
head(UScrime)
head(Rabbit)
|
1809200888cdd546b0b22c56d49770845b5e59da
|
bc536251f89d76d70f702647fc63a0b8df50032c
|
/Estructuras-Programacion-R/closure.R
|
129ddc66816724ab3defc18631e40b3590ee4ac8
|
[] |
no_license
|
Louiso/Curso-R
|
50ff8439ff42e6cb987bee9ead88a795e04e51ed
|
0c0880196ca086b51eb718983fc45cfdab1c3ad1
|
refs/heads/master
| 2021-01-24T00:03:55.885946
| 2016-06-11T03:13:49
| 2016-06-11T03:13:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 144
|
r
|
closure.R
|
# Closure en R
w <- 12
f <- function(y){
d <- 8
h <- function(){
return(d * (w + y))
}
return (h())
}
environment(f)
ls()
ls.str()
|
beb8fedb69e548a00224ecafb5a5f26327c40cf8
|
2fe4c16e0377a99e198ab04d5c378ca247ae4329
|
/Rscript/R/mzkit/man/MolWeight.Rd
|
7b4e937882b35a7d8af297a4ea4562dfa4a290b7
|
[
"MIT"
] |
permissive
|
xieguigang/mzkit
|
1964d28b0fad5f6d44950fdccdd4a70877f75c29
|
6391304b550f7e4b8bb6097a6fb1c0d3b6785ef1
|
refs/heads/master
| 2023-08-31T06:51:55.354166
| 2023-08-30T08:56:32
| 2023-08-30T08:56:32
| 86,005,665
| 37
| 11
|
MIT
| 2023-03-14T14:18:44
| 2017-03-23T23:03:07
|
Visual Basic .NET
|
UTF-8
|
R
| false
| true
| 353
|
rd
|
MolWeight.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MolWeight.R
\name{MolWeight}
\alias{MolWeight}
\title{Molecule weight calculate helper}
\usage{
MolWeight()
}
\value{
A list module that contains member function:
\enumerate{
\item \code{Eval}
\item \code{Weight}
}
}
\description{
Molecule weight calculate helper
}
|
bb3bda1706fe4355f9202c4673aef74c6d9b1340
|
35f05bea37f788d5eb969131c46b223d539a3452
|
/2021/Week 23 - Survivor/survivor.R
|
ec28e33c70dcffa57f6b74be001958bf2dc9e54e
|
[] |
no_license
|
Rohan4201/tidy-tuesdays
|
66551be773da9a1dc5e1aa976efc90ab7e009a71
|
03a438ddeba91a290b998d682344e842f10f762b
|
refs/heads/master
| 2023-07-16T15:59:14.055589
| 2021-08-31T19:50:54
| 2021-08-31T19:50:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,767
|
r
|
survivor.R
|
library(ggplot2)
library(dplyr)
library(tidyr)
library(ggtext)
#fonts
sysfonts::font_add_google(name = "Roboto","Roboto")
showtext::showtext_auto()
castaways <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-06-01/castaways.csv')
df <- castaways %>%
select(season, personality_type, order) %>%
filter(!is.na(personality_type)) %>%
separate(personality_type, into = c("mind", "energy", "nature", "tactics"),
sep = 1:3) %>%
group_by(season) %>%
mutate(order_rev = pmin(18, max(order) - order + 1)) %>%
ungroup()
df %>%
count(order_rev, mind) %>%
ggplot(aes(x = order_rev, y = n, fill = mind, col = mind, alpha = order_rev %in% c(1:3, 15:18))) +
geom_col(position = "dodge") +
#geom_stream(alpha = 0.5, type = "proportional") +
labs(title = "Survivor participants and extraversion",
subtitle = glue::glue("<span style='color:darkorange;'>Extroverts</span> are often voted out early - but more likely to make it to the final 3
compared to <span style='color:darkblue;'>introverts</span>"),
x = "Final position") +
scale_x_continuous(breaks = 1:18, labels = c(1:17, ">18")) +
scale_fill_manual(values = c("darkorange","darkblue")) +
scale_color_manual(values = c("darkorange", "darkblue")) +
scale_alpha_discrete() +
theme_light() +
theme(axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
plot.subtitle = element_markdown(size = 16),
plot.title = element_text(size = 20),
panel.grid.minor = element_blank(),
axis.title = element_text(size = 12),
axis.text = element_text(size = 12),
legend.position = "none",
text = element_text(family = "Roboto"))
|
15a3b76cd48244f3e42b84eb13b8c2703a58c8db
|
05f8b78b517ff731153deb34ee005e7883fa5510
|
/man/aliases.Rd
|
10b72b4ecbfebcdb7b539c8274fb25c8c27458ef
|
[] |
no_license
|
cran/Rd
|
468728b63f36c7c5f47422210aa40b3b57dfe19e
|
17dfa33df36554a6b7483425cad7bbc5f4fa9f3c
|
refs/heads/master
| 2020-04-17T05:11:26.453022
| 2019-05-23T03:10:27
| 2019-05-23T03:10:27
| 166,267,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,949
|
rd
|
aliases.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util-aliases.R
\name{aliases}
\alias{aliases}
\alias{s}
\alias{cl}
\alias{undim}
\alias{named}
\alias{clean_Rd}
\alias{get_attr}
\alias{forward_attributes}
\alias{fwd}
\alias{is_whitespace}
\title{Internal Utilities
These utilities are used internally and not exported.
They are however documented for completeness}
\usage{
s(x, ...)
cl(x, new)
undim(x)
named(...)
clean_Rd(obj, ...)
get_attr(x, which, default = NULL, exact = TRUE)
forward_attributes(x, obj)
fwd(x, obj)
is_whitespace(x)
}
\arguments{
\item{x, obj}{An object, any object.}
\item{...}{passed on to other function(s).}
\item{new}{the new class(es) to append.}
\item{which}{name of the attribute to extract.}
\item{default}{the default value to return if not found.}
\item{exact}{exact or partial matching?}
}
\description{
Internal Utilities
These utilities are used internally and not exported.
They are however documented for completeness
}
\section{Functions}{
\itemize{
\item \code{s}: Alias for structure, but also adds automatic naming when unnamed,
\item \code{cl}: Specify an additional class for an object, or class when none is set.
\item \code{undim}: Remove the \code{dim} attribute.
\item \code{named}: Create a named list with names inferred if needed.
\item \code{clean_Rd}: Alias for \code{\link[tools:toRd.default]{tools::toRd.default()}}
\item \code{get_attr}: Alias for \code{attr(x, which) \%||\% default}.
\item \code{forward_attributes}: Forward attributes from object to value.
\item \code{fwd}: Alias for \code{forward_attributes()}.
\item \code{is_whitespace}: Check if a string is composed of only whitespace, with \link[base:grep]{regex} pattern \code{"^\\s+$"}.
}}
\seealso{
\code{\link[purrr:\%||\%]{purrr::\%||\%()}}
\code{\link[base:attributes]{base::attributes()}}
}
|
a40e7d8e28aae5fd53694483429ad4ff297f00cf
|
83058c3faf9a4b2c7e0d0de1bf7426b60a59d64f
|
/man/bm_compound_poisson.Rd
|
2de3b5432ad3dbd6ef2996e9d3ebe6352e7f493e
|
[
"MIT"
] |
permissive
|
valcourgeau/ntwk
|
55d163d11cb79eb888c2c4ad6b9e9f73e8bfb515
|
668657ee962fa6ccbd0cbf195673a36aed50b79b
|
refs/heads/main
| 2023-07-26T18:28:44.556409
| 2021-09-03T10:35:28
| 2021-09-03T10:35:28
| 391,296,572
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,019
|
rd
|
bm_compound_poisson.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/path_generation.R
\name{bm_compound_poisson}
\alias{bm_compound_poisson}
\title{Generates a (correlated) Brownian motion path with
correlated but unsynchronised Gaussian jumps.}
\usage{
bm_compound_poisson(
n,
sigma,
jump_sigma,
n_jumps,
delta_time,
synchronised = F
)
}
\arguments{
\item{n}{Length of the path}
\item{sigma}{Correlation matrix for the Brownian part.}
\item{jump_sigma}{Correlation matrix for the jump part.}
\item{n_jumps}{Number of jumps.}
\item{delta_time}{Time step.}
\item{synchronised}{Boolean to synchronise all jumps or not.}
}
\value{
A BM path with Correlated Gaussian jumps
}
\description{
Generates a (correlated) Brownian motion path with
correlated but unsynchronised Gaussian jumps.
}
\examples{
n <- 1000
sigma <- matrix(c(1.0, 0.2, 0.2, 0.2, 1.0, 0.2, 0.2, 0.2, 1.0), 3, 3)
jump_sigma <- sigma
n_jumps <- 50
delta_time <- 0.5
bm_compound_poisson(n, sigma, jump_sigma, n_jumps, delta_time)
}
|
526a43f9cc504f4d11e0f7ec818c2abc914d4def
|
3d07946ba8756030d99e4afcf2ec79023d6451d6
|
/drawNodeSupportSymbol.r
|
88ee1f76a6715cf3d8af7e16157f86a736e27d9a
|
[] |
no_license
|
samuelcrane/draw-node-support-symbol
|
a33f843ec02505250a7a4256540daa506a9e2e3f
|
4dcd86b69468900665caa90a3ec7361c4aef6a05
|
refs/heads/master
| 2020-05-19T22:03:16.371586
| 2013-06-26T17:06:06
| 2013-06-26T17:06:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,287
|
r
|
drawNodeSupportSymbol.r
|
## This R script draws filled circles on the nodes of a phylogeny corresponding to the bootstrap
## support values. It has been adapted from the excellent book, "Analysis of Phylogenetics and
## Evolution with R" by Emmanuel Paradis.
## To handle posterior probablities from a MrBayes consensus file, either change the values in the p
## matrix or use this handy bit to change the values to probabilities:
## signif(as.numeric(bayesTree$node.label)*100, digits=2)
library(ape)
targetTree <- read.tree("bootTree.tre")
co <- c("black", "grey", "white")
p <- character(length(targetTree$node.label))
p[as.numeric(targetTree$node.label) >= 90] <- co[1]
p[as.numeric(targetTree$node.label) < 90 & as.numeric(targetTree$node.label) >= 70] <- co[2]
p[as.numeric(targetTree$node.label) < 70 | targetTree$node.label == ""] <- co[3]
## To plot a circle on all nodes:
plot(targetTree)
nodelabels(cex=0.75, bg=p, pch=21, frame="n")
## To plot a circle on only those nodes with good support (no white or unfilled circles):
plot(targetTree)
for(j in 1:Nnode(targetTree))
{
if(targetTree$node.label[[j]] != "" & as.numeric(targetTree$node.label[[j]]) > 70)
{
nodelabels("", j+length(targetTree$tip.label), cex=0.75, bg=p[j], pch=21, frame="n")
}
}
|
22d578be0775bf1243a84703f4b21c01ffd487cb
|
f05ce58140ec9316e2449cda141d3df089fdd363
|
/src/main/java/time_series/m4.6/m4.6.data.R
|
5ba4bd046ab5b44b988c539cd3325f34480ed111
|
[] |
no_license
|
zhekunz2/Stan2IRTranslator
|
e50448745642215c5803d7a1e000ca1f7b10e80c
|
5e710a3589e30981568b3dde8ed6cd90556bb8bd
|
refs/heads/master
| 2021-08-05T16:55:24.818560
| 2019-12-03T17:41:51
| 2019-12-03T17:41:51
| 225,680,232
| 0
| 0
| null | 2020-10-13T17:56:36
| 2019-12-03T17:39:38
|
Java
|
UTF-8
|
R
| false
| false
| 35,769
|
r
|
m4.6.data.R
|
n <- 544
height <-
c(151.765, 139.7, 136.525, 156.845, 145.415, 163.83, 149.225, 168.91, 147.955, 165.1, 154.305,
151.13, 144.78, 149.9, 150.495, 163.195, 157.48, 143.9418, 121.92, 105.41, 86.36, 161.29, 156.21, 129.54,
109.22, 146.4, 148.59, 147.32, 137.16, 125.73, 114.3, 147.955, 161.925, 146.05, 146.05, 152.7048, 142.875,
142.875, 147.955, 160.655, 151.765, 162.8648, 171.45, 147.32, 147.955, 144.78, 121.92, 128.905, 97.79,
154.305, 143.51, 146.7, 157.48, 127, 110.49, 97.79, 165.735, 152.4, 141.605, 158.8, 155.575, 164.465,
151.765, 161.29, 154.305, 145.415, 145.415, 152.4, 163.83, 144.145, 129.54, 129.54, 153.67, 142.875, 146.05,
167.005, 158.4198, 91.44, 165.735, 149.86, 147.955, 137.795, 154.94, 160.9598, 161.925, 147.955, 113.665,
159.385, 148.59, 136.525, 158.115, 144.78, 156.845, 179.07, 118.745, 170.18, 146.05, 147.32, 113.03, 162.56,
133.985, 152.4, 160.02, 149.86, 142.875, 167.005, 159.385, 154.94, 148.59, 111.125, 111.76, 162.56, 152.4,
124.46, 111.76, 86.36, 170.18, 146.05, 159.385, 151.13, 160.655, 169.545, 158.75, 74.295, 149.86, 153.035,
96.52, 161.925, 162.56, 149.225, 116.84, 100.076, 163.195, 161.925, 145.415, 163.195, 151.13, 150.495,
141.605, 170.815, 91.44, 157.48, 152.4, 149.225, 129.54, 147.32, 145.415, 121.92, 113.665, 157.48, 154.305,
120.65, 115.6, 167.005, 142.875, 152.4, 96.52, 160, 159.385, 149.86, 160.655, 160.655, 149.225, 125.095,
140.97, 154.94, 141.605, 160.02, 150.1648, 155.575, 103.505, 94.615, 156.21, 153.035, 167.005, 149.86,
147.955, 159.385, 161.925, 155.575, 159.385, 146.685, 172.72, 166.37, 141.605, 142.875, 133.35, 127.635,
119.38, 151.765, 156.845, 148.59, 157.48, 149.86, 147.955, 102.235, 153.035, 160.655, 149.225, 114.3,
100.965, 138.43, 91.44, 162.56, 149.225, 158.75, 149.86, 158.115, 156.21, 148.59, 143.51, 154.305, 131.445,
157.48, 157.48, 154.305, 107.95, 168.275, 145.415, 147.955, 100.965, 113.03, 149.225, 154.94, 162.56,
156.845, 123.19, 161.0106, 144.78, 143.51, 149.225, 110.49, 149.86, 165.735, 144.145, 157.48, 154.305,
163.83, 156.21, 153.67, 134.62, 144.145, 114.3, 162.56, 146.05, 120.65, 154.94, 144.78, 106.68, 146.685,
152.4, 163.83, 165.735, 156.21, 152.4, 140.335, 158.115, 163.195, 151.13, 171.1198, 149.86, 163.83, 141.605,
93.98, 149.225, 105.41, 146.05, 161.29, 162.56, 145.415, 145.415, 170.815, 127, 159.385, 159.4, 153.67,
160.02, 150.495, 149.225, 127, 142.875, 142.113, 147.32, 162.56, 164.465, 160.02, 153.67, 167.005, 151.13,
147.955, 125.3998, 111.125, 153.035, 139.065, 152.4, 154.94, 147.955, 143.51, 117.983, 144.145, 92.71,
147.955, 155.575, 150.495, 155.575, 154.305, 130.6068, 101.6, 157.48, 168.91, 150.495, 111.76, 160.02,
167.64, 144.145, 145.415, 160.02, 147.32, 164.465, 153.035, 149.225, 160.02, 149.225, 85.09, 84.455, 59.6138,
92.71, 111.125, 90.805, 153.67, 99.695, 62.484, 81.915, 96.52, 80.01, 150.495, 151.765, 140.6398, 88.265,
158.115, 149.225, 151.765, 154.94, 123.825, 104.14, 161.29, 148.59, 97.155, 93.345, 160.655, 157.48, 167.005,
157.48, 91.44, 60.452, 137.16, 152.4, 152.4, 81.28, 109.22, 71.12, 89.2048, 67.31, 85.09, 69.85, 161.925,
152.4, 88.9, 90.17, 71.755, 83.82, 159.385, 142.24, 142.24, 168.91, 123.19, 74.93, 74.295, 90.805, 160.02,
67.945, 135.89, 158.115, 85.09, 93.345, 152.4, 155.575, 154.305, 156.845, 120.015, 114.3, 83.82, 156.21,
137.16, 114.3, 93.98, 168.275, 147.955, 139.7, 157.48, 76.2, 66.04, 160.7, 114.3, 146.05, 161.29, 69.85,
133.985, 67.945, 150.495, 163.195, 148.59, 148.59, 161.925, 153.67, 68.58, 151.13, 163.83, 153.035, 151.765,
132.08, 156.21, 140.335, 158.75, 142.875, 84.455, 151.9428, 161.29, 127.9906, 160.9852, 144.78, 132.08,
117.983, 160.02, 154.94, 160.9852, 165.989, 157.988, 154.94, 97.9932, 64.135, 160.655, 147.32, 146.7, 147.32,
172.9994, 158.115, 147.32, 124.9934, 106.045, 165.989, 149.86, 76.2, 161.925, 140.0048, 66.675, 62.865,
163.83, 147.955, 160.02, 154.94, 152.4, 62.23, 146.05, 151.9936, 157.48, 55.88, 60.96, 151.765, 144.78,
118.11, 78.105, 160.655, 151.13, 121.92, 92.71, 153.67, 147.32, 139.7, 157.48, 91.44, 154.94, 143.51, 83.185,
158.115, 147.32, 123.825, 88.9, 160.02, 137.16, 165.1, 154.94, 111.125, 153.67, 145.415, 141.605, 144.78,
163.83, 161.29, 154.9, 161.3, 170.18, 149.86, 123.825, 85.09, 160.655, 154.94, 106.045, 126.365, 166.37,
148.2852, 124.46, 89.535, 101.6, 151.765, 148.59, 153.67, 53.975, 146.685, 56.515, 100.965, 121.92, 81.5848,
154.94, 156.21, 132.715, 125.095, 101.6, 160.655, 146.05, 132.715, 87.63, 156.21, 152.4, 162.56, 114.935,
67.945, 142.875, 76.835, 145.415, 162.56, 156.21, 71.12, 158.75)
weight_s <-
c(0.829868947760389, 0.0594590877885262, -0.254482930150008, 1.18425748334745,
0.384957253626638, 1.86029213547276, 0.178872616084165, 1.3498956032414, -0.0503243172574648,
1.28248474049386, 0.970468747205254, 0.381105204326779, 0.0286426933896516, 0.821335421060046,
-0.119661204654932, 0.879945588658561, 0.456220165674036, 0.186576714683883, -1.08652557891962,
-1.47173050890555, -1.70670551619697, 0.908835958407505, 0.483184510773051, -0.816882127929469,
-1.33305673411062, -0.0079517749590119, 0.155760320285009, -0.00987779960894186, -0.562646874138754,
-0.878514916727218, -1.20593910721526, 0.31947241552903, 1.32485728279231, 0.128795975185994,
0.196206837933532, 0.747049887813414, 0.219319133732687, -0.00217370100922298, 0.800978578011445,
0.833720997060249, 0.93772632815645, 0.935800303506521, 1.42308453993872, 0.238579380231984,
0.970468747205254, -0.462493592342411, -1.03259688872159, -0.832290325128906, -1.51795510050386,
0.383031228976709, 0.200058887233391, 0.461260970201047, 0.614154186968268, -0.923969098465558,
-1.37157722710921, -1.5526235442026, 1.56175831473366, 0.754753986413133, 0.585263817219323,
1.03873886308812, 1.27092859259428, 0.698899271565173, 0.843351120309897, 1.12840276849949,
0.816386775210882, 0.681565049715806, 0.461998239623825, 0.0594590877885262, 1.38071199764027,
0.107609704036767, -0.75139728983186, -0.678208353134533, 0.862611366809193, 0.117239827286416,
-0.408564902144381, 0.785570380812007, 0.793274479411726, -1.54106739630302, 1.49049540268626,
0.157686344934939, 0.427329795925091, -0.545312652289387, 0.787496405461937, 0.515926929821855,
0.99550706765434, 0.255913602081351, -1.23290345231428, 1.02440382362939, 0.25976565138121,
0.0459769152390184, 0.723937592014258, 0.452368116374176, 0.816386775210882, 1.36530380044083,
-1.14045426911765, 0.879945588658561, 0.48896258472284, -0.036842144707957, -1.20401308256533,
1.43656671248823, -0.554942775539035, 1.06291793040188, 0.789422430111866, 0.361844957827482,
-0.196702190652119, 1.45775298363746, 0.500518732622418, 0.294434095079944, -0.214036412501486,
-1.25601574811343, -1.29838829041188, 0.702751320865032, 0.37340110572706, -1.17897476211624,
-1.39468952290837, -1.63929465344943, 0.814460750560952, 0.128795975185994, 0.639192507417353,
0.452368116374176, 1.30752306094294, 1.21699990239625, 1.12647674384956, -1.75678215709514,
0.461998239623825, 0.949282476056028, -1.52951124840344, 0.415773648025513, 1.38649007159006,
0.444664017774458, -1.10193377611906, -1.39468952290837, 1.18810953264731, 0.993581043004411,
0.469702338223543, 0.916540057007224, 0.196206837933532, 0.964690673255465, -0.427825148643677,
1.64072532538078, -1.62388645625, 0.837573046360108, 0.250135528131562, 0.171168517484446, -0.92473950832553,
0.0864234328875413, 0.442737993124528, -1.07496943102004, -1.27912804391259, 0.608376113018478,
0.831794972410319, -0.98059422317349, -1.13529555719263, 1.3306353567421, -0.177441944152822,
0.357992908527623, -1.51795510050386, 1.05912043577825, 0.912688007707365, 1.21122182844646,
1.25552039539484, 1.34219150464168, 0.450442091724247, -0.899701187876444, 0.361844957827482,
0.956986574655747, 0.592967915819041, 0.702751320865032, 0.43118184522495, 1.07832612760132,
-1.5526235442026, -1.53528932235323, 0.577559718619604, -0.231370634350852, 1.43656671248823,
1.15921916289836, 0.0594590877885262, 0.899205835157857, 1.45004888503774, 0.440811968474598,
0.989728993704551, 0.743197838513555, 1.77939910017571, 0.908835958407505, -0.277595225949164,
-0.231370634350852, -0.805325980029891, -0.761027413081509, -0.957481927374334, -0.0214339475085197,
0.681565049715806, 0.562151521420167, 0.675786975766017, 0.230875281632266, 0.37725315502692,
-1.52758522375351, 0.65460070461679, 1.22470400099597, 1.13418084244928, -1.17319668816646,
-1.48521268145506, 0.236653355582055, -1.56803174140204, 0.685417099015665, 0.325250489478819,
1.07832612760132, 0.20776298583311, 0.246283478831703, 0.592967915819041, 0.265543725330999,
-0.308411620348039, 0.758606035712992, -0.890071064626796, 0.340658686678256, 0.989728993704551,
0.384957253626638, -1.22519935371456, 1.29011159293308, 0.637266482767424, 0.619932260918057,
-1.44091411450668, -1.12504607191821, 0.0132344961902143, 0.652674679966861, 0.849129194259686,
0.683491074365735, -1.00563254362258, 0.870315465408912, 0.379179179676849, 0.190428763983743,
0.442737993124528, -1.21942127976477, 0.178872616084165, 0.864537391459123, 0.225097207682477,
0.300212169029733, 0.991655018354481, 1.26900256794435, 0.678664412229122, 0.350288809927905,
-0.576129046688261, 0.25976565138121, -1.0268188147718, 0.515926929821855, -0.254482930150008,
-0.999854469672786, 0.668082877166298, 0.165390443534657, -1.33305673411062, 0.0324947426895105,
0.357992908527623, 0.835647021710178, 0.822164849160671, 0.731641690613977, 0.37725315502692,
0.0671631863882446, 0.508222831222136, 0.851055218909616, 0.0748672849879634, 1.42308453993872,
0.20968901048304, 0.806756651961234, 0.0401988412892294, -1.4486182131064, 0.384957253626638,
-1.38505939965872, 0.621858285567986, 1.00706321555392, 1.33641343069189, 0.157686344934939,
-0.0079517749590119, 1.55212819148401, -0.959407952024263, 0.598745989768831, 0.597138121468594,
0.608376113018478, 0.612228162318338, 0.331028563428608, 0.575633693969674, -0.761027413081509,
-0.0811407116563389, -0.192850141352259, 0.0228646194398627, 0.947354685003132, 1.19388760659709,
0.0999056054370486, 0.332954588078537, 1.0186193634535, 0.567929595369956, -0.123513253954792,
-0.967112050623982, -1.28683214251231, 0.97012090169801, -0.136995426504299, 0.560225496770237,
0.851055218909616, 0.485110535422981, -0.0522503419073943, -0.782213684230735, -0.115809155355073,
-1.59884813580091, 0.384957253626638, 0.279025897880507, 0.0228646194398627, 1.03980563460272,
0.689269148315524, -0.703246673583619, -1.377355301059, 0.924244155606943, 1.5771665119331,
0.533261151671222, -1.20786513186519, 1.11106854665012, 1.02439743740329, -0.0926968595559172,
0.255913602081351, 1.62724315283127, 0.31947241552903, 1.12455071919963, 0.296360119729874,
0.566003570720026, 1.29018883909358, 0.643044556717213, -1.64122067809936, -1.62003440695014,
-2.01872150948558, -1.59692211115098, -1.17512271281639, -1.64699875204915, 0.388809302926498,
-1.31572251226125, -1.95708872068783, -1.61233030835042, -1.40239362150808, -1.74907805849542,
0.427329795925091, 0.469685353579635, -0.458641543042552, -1.55069751955267, 0.512074880521996,
0.354140859227764, 0.492814634022699, 0.720085542714399, -1.02104074082201, -1.34076083271034,
0.831794972410319, 0.469702338223543, -1.25986779741329, -1.52373317445365, 0.876093539358701,
0.696973246915243, 1.1746273600978, 0.812534725911023, -1.54106739630302, -2.03412970668501,
-0.454789493742693, 0.539039225621011, 0.531335127021292, -1.6373686287995, -1.62388645625,
-1.90701207978966, -1.55647559350246, -1.93012437558881, -1.57958788930161, -1.88967785794029,
1.19581363124702, 0.616080211618197, -1.56610571675211, -1.55647559350246, -1.91856822768923,
-1.79337662544381, 0.787496405461937, -0.474049740241989, -0.267965102699516, 1.41538044133901,
-1.05956123382061, -1.84152724169205, -1.85500941424155, -1.62966453019978, 1.37108187439062,
-1.87812171004071, -0.570350972738472, 0.806756651961234, -1.68551924504774, -1.46787845960569,
0.648822630667001, 0.673860951116087, 0.901131859807787, 0.745123863163485, -1.05185713522089,
-1.18667886071596, -1.67781514644803, 0.562151521420167, -0.574203022038332, -1.12504607191821,
-1.47943460750527, 1.38841609623999, 0.304064218329593, -0.614649539686854, 1.032101536003,
-1.79337662544381, -1.90508605513973, 0.726221415172763, -1.10000775146913, 0.155760320285009,
0.933874278856591, -1.92242027698909, -0.506792159290794, -1.88775183329036, 0.577559718619604,
1.04750973320244, 0.350288809927905, 0.132648024485853, 1.08603022620103, 0.625710334867846,
-1.87426966074085, 0.529409102371363, 0.754753986413133, 0.267469749980929, -0.0561023912072537,
-0.870810818127499, 0.250135528131562, 0.124943925886134, 0.887649687258279, -0.000247676359293498,
-1.78182047754423, 0.550595373520589, 0.854907268209475, -0.391230680295014, 1.04365768390258,
0.569855620019886, -0.497162036041145, -1.03644893802145, 0.854907268209475, 0.242431429531844,
0.752827961763203, 1.41345441668908, 0.88187161330849, 0.856833292859404, -1.51602907585393,
-1.96671884393747, 0.806756651961234, -0.00409972565915294, 0.0672172315251614, 0.906909933757576,
1.06291793040188, 0.741271813863625, 0.0922015068373302, -0.712876796833267, -1.31379648761132,
0.88572366260835, 0.165390443534657, -1.84152724169205, 0.793274479411726, -0.493309986741286,
-1.86656556214113, -1.93012437558881, 1.34411752929161, -0.212110387851556, 1.26322449399456,
0.874167514708772, 0.506296806572207, -1.92627232628895, -0.0965489088557762, 0.974320796505114,
0.386883278276568, -2.08998442153297, -1.99560921368642, 0.592967915819041, -0.146625549753947,
-1.27142394531287, -1.86078748819134, 0.793274479411726, 0.71430746876461, -1.04800508592103,
-1.5526235442026, 0.800978578011445, 0.356066883877693, 1.00128514160413, 0.646896606017072,
-1.62966453019978, 0.450442091724247, 0.409995574075724, -1.79722867474366, 0.652674679966861,
1.06291793040188, -0.97866819852356, -1.63159055484971, 0.928096204906802, -0.520274331840301,
1.05906588110202, 0.560225496770237, -1.21749525511484, -0.00602575030908242, -0.0926968595559172,
0.494239713585193, -0.208258338551697, 0.758606035712992, 0.425403771275161, 0.175918952539199,
0.522405688271443, 1.22470400099597, 0.500518732622418, -0.955555902724404, -1.64314670274929,
0.282877947180367, 0.525557053071503, -1.36772517780935, -0.930517582275318, 1.15921916289836,
0.192354788633672, -1.10963787471878, -1.66433297389852, -1.50254690330443, 0.48896258472284,
0.0190125701400032, 0.585263817219323, -2.1304309391815, 0.167316468184587, -2.06879815038375,
-1.44669218845647, -0.841920448378554, -1.69514936829739, 0.577559718619604, 0.571781644669815,
-0.722506920082915, -0.884292990677007, -1.44476616380654, 0.833720997060249, 0.257839626731281,
-0.735989092632423, -1.69514936829739, 0.369549056427201, 0.354140859227764, 0.775940257562359,
-1.22905140301442, -1.92819835093888, -0.0926968595559172, -1.87426966074085, -0.304559571048179,
1.12455071919963, 1.25359437074491, -1.87234363609092, 1.14958903964871)
weight_s2 <-
c(0.688682470456936, 0.00353538312064366, 0.0647615617377341, 1.40246578686443,
0.148192087119764, 3.46068682930179, 0.0319954127847931, 1.82221813965046, 0.00253253690742997,
1.6447671095996, 0.941809589302136, 0.145241176764956, 0.000820403884613591, 0.674591873887883,
0.0143188038994696, 0.774304238999661, 0.208136839567645, 0.0348108704622312, 1.18053783364662,
2.1659906908434, 2.91284371901717, 0.825982799294488, 0.233467271450992, 0.667296410930577, 1.77704025635766,
0.0000632307249987687, 0.0242612773752885, 0.000097570925114412, 0.31657150497811, 0.77178845891223,
1.45428913031114, 0.102062624283953, 1.75524681976783, 0.0165884032241111, 0.0384971232518752,
0.558083534882034, 0.0481008824212564, 0.00000472497607749699, 0.641566682433236, 0.695090700939135,
0.879330666517779, 0.875722208042897, 2.02516960781261, 0.0569201206718777, 0.941809589302136,
0.213900322957789, 1.06625633459751, 0.69270718530318, 2.3041876871457, 0.146712922371408,
0.0400235583610627, 0.212761682630811, 0.377185365370654, 0.853718894919255, 1.88122408992459,
2.41063987001224, 2.43908903363972, 0.569653580006516, 0.342533735746133, 1.0789784256896, 1.61525948747368,
0.488460191794329, 0.711241112127959, 1.27329280795731, 0.666487366739223, 0.464530916994109,
0.213442373415513, 0.00353538312064366, 1.90636562042779, 0.0115798484028807, 0.564597887166664,
0.459966570261456, 0.744098370148425, 0.0137451771021486, 0.166925279264248, 0.617120823209122,
0.629284399685945, 2.37488871994817, 2.22157654542888, 0.0248649833789405, 0.18261075448538,
0.297365888746886, 0.620150588615471, 0.266180596915406, 0.991034321749743, 0.0654917717302523,
1.52005092272846, 1.04940319386652, 0.0674781936375045, 0.00211387673489588, 0.524085637131403,
0.20463691271192, 0.666487366739223, 1.86405446749819, 1.30063593994868, 0.774304238999661, 0.23908440925884,
0.00135734362668204, 1.44964750298847, 2.06372391942925, 0.307961484122968, 1.12979452676981,
0.623187773163725, 0.130931773505173, 0.0386917518073425, 2.12504376130391, 0.250519001705952,
0.0866914363455457, 0.0458115858765061, 1.57757555950894, 1.68581215267869, 0.493859418977547,
0.139428385758191, 1.38998148970706, 1.94515886531037, 2.6872869608279, 0.663346314204309,
0.0165884032241111, 0.408567061538483, 0.20463691271192, 1.70961655489761, 1.48108876243248, 1.2689498544339,
3.08628354748786, 0.213442373415513, 0.901137219347064, 2.33940465899266, 0.172867726392443,
1.92235471861781, 0.197726088703323, 1.21425804695201, 1.94515886531037, 1.4116042615674, 0.987203289017732,
0.220620286532664, 0.840045676098806, 0.0384971232518752, 0.930628095066083, 0.183034357811985,
2.69197959334585, 2.63700722279217, 0.701528607988952, 0.0625677824336554, 0.0292986613778231,
0.855143158258142, 0.00746900975206735, 0.196016930555935, 1.15555927762755, 1.63616855272364,
0.370121494891473, 0.691882876127083, 0.96156503052122, 1.28889600218132, 1.77059045261218,
0.0314856435447332, 0.128158922556067, 2.3041876871457, 1.12173609748312, 0.832999399412839,
1.46705831770519, 1.57633146325243, 1.80147803513229, 0.202898077996915, 0.809462227466284,
0.130931773505173, 0.91582330407134, 0.351610949190778, 0.493859418977547, 0.185917783651593,
1.16278723746765, 2.41063987001224, 2.35711330333184, 0.333575228571956, 0.0535323704399158,
2.06372391942925, 1.34378906763078, 0.00353538312064366, 0.808571133981939, 2.10264176899919,
0.19431519155045, 0.979563480979423, 0.55234302717122, 3.16626115770613, 0.825982799294488,
0.0770591094697676, 0.0535323704399158, 0.648549934111104, 0.579162723461533, 0.916771641248469,
0.000459414105797978, 0.464530916994109, 0.316014333035009, 0.456688036614979, 0.053303395668778,
0.142319942977765, 2.33351661583007, 0.428502082484798, 1.49989989005554, 1.28636618337895, 1.37639046912474,
2.20585670915493, 0.0560048107082464, 2.4587235420443, 0.46979659962305, 0.105787880906211, 1.16278723746765,
0.0431654582822889, 0.0606555519454459, 0.351610949190778, 0.0705134700626652, 0.0951177275657028,
0.575483117420181, 0.792226500085878, 0.116048340809354, 0.979563480979423, 0.148192087119764,
1.50111345634257, 1.66438792222033, 0.406108570058763, 0.384316008126974, 2.07623348538456, 1.2657286639386,
0.000175151889408797, 0.425984237869844, 0.721020388544104, 0.467160048737627, 1.01129681279281,
0.757449009329932, 0.143776850300408, 0.036263114152376, 0.196016930555935, 1.48698825754314,
0.0319954127847931, 0.747424901230946, 0.0506687529064481, 0.0901273464335372, 0.983379675427625,
1.61036751744936, 0.460585384426299, 0.122702250360708, 0.331924678437925, 0.0674781936375045,
1.05435687836937, 0.266180596915406, 0.0647615617377341, 0.999708960524649, 0.446334730762799,
0.0273539988125906, 1.77704025635766, 0.0010559083024575, 0.128158922556067, 0.698305944893091,
0.675955039195389, 0.535299563444478, 0.142319942977765, 0.00451089360582208, 0.258290446175444,
0.724294985633294, 0.00560511036146894, 2.02516960781261, 0.0439694811173563, 0.650856295483699,
0.00161594684099666, 2.09849472734357, 0.148192087119764, 1.91838954058297, 0.386707727329555,
1.0141763201218, 1.78600085773367, 0.0248649833789405, 0.0000632307249987687, 2.40910192279943,
0.920463618407391, 0.358496760264257, 0.356573936111041, 0.370121494891473, 0.374823322735689,
0.109579909805608, 0.331354149633173, 0.579162723461533, 0.00658381508809713, 0.0371911770195864,
0.000522790822129746, 0.897480899197384, 1.42536761718614, 0.00998112999774324, 0.110858757722548,
1.0375854076024, 0.322544025297082, 0.0152555239025009, 0.935305718462124, 1.65593696300281,
0.941134563911359, 0.0187677468830948, 0.313852607231459, 0.724294985633294, 0.235332231578371,
0.0027300982294396, 0.61185824779782, 0.0134117604640554, 2.55631536135405, 0.148192087119764,
0.0778554516880232, 0.000522790822129746, 1.08119575775157, 0.475091958819608, 0.494555883906425,
1.89710762535533, 0.85422725917359, 2.48745420636321, 0.284367455881718, 1.45893817677571, 1.23447331535521,
1.04939010975842, 0.00859270777152944, 0.0654917717302523, 2.64792027843625, 0.102062624283953,
1.2646143200524, 0.0878293205663052, 0.32036004206782, 1.66458724052163, 0.413506301923636, 2.69360531422093,
2.62451147970228, 4.07523653285972, 2.5501602290829, 1.38091339017694, 2.71260488925146, 0.151172674042189,
1.73112572927106, 3.83019626064351, 2.59960902322535, 1.96670786964656, 3.05927405471012, 0.18261075448538,
0.220604331367227, 0.210352065004453, 2.4046627971468, 0.262220683261616, 0.125415748174579,
0.242866263506927, 0.51852318882629, 1.04252419441836, 1.79763961053011, 0.691882876127083,
0.220620286532664, 1.58726686695902, 2.32176278693061, 0.767539889706056, 0.485771706915576,
1.37974943509032, 0.660212680811301, 2.37488871994817, 4.13768366361846, 0.206833483618735,
0.290563286758099, 0.282317017206733, 2.68097602657677, 2.63700722279217, 3.63669507246367, 2.42261627316883,
3.7253801052421, 2.49509790002833, 3.5708824067898, 1.4299702406762, 0.379554827147523, 2.45268711604363,
2.42261627316883, 3.68090404429861, 3.21619972068821, 0.620150588615471, 0.224723156223498,
0.0718052962647622, 2.003301793725, 1.12267000821544, 3.39122258189392, 3.4410599269248, 2.65580648099128,
1.8798655062825, 3.52734115772624, 0.325300232103721, 0.650856295483699, 2.84097512542632, 2.15466717217438,
0.420970806065648, 0.454088581439077, 0.812038628760641, 0.555209571455675, 1.10640343291509,
1.40820671847014, 2.81506366565041, 0.316014333035009, 0.329709110517953, 1.2657286639386, 2.18872675788428,
1.92769925629829, 0.0924550488683862, 0.377794056637262, 1.06523358061976, 3.21619972068821,
3.62935287748784, 0.527397543855531, 1.21001705329217, 0.0242612773752885, 0.872121168709918,
3.69569972137882, 0.256838292718625, 3.56360698409111, 0.333575228571956, 1.09727664115385,
0.122702250360708, 0.0175954983999995, 1.17946165222227, 0.391513423160432, 3.51288676117362,
0.280273997673652, 0.569653580006516, 0.0715400671548608, 0.00314747829917174, 0.758311480967884,
0.0625677824336554, 0.0156109846158398, 0.787921967289721, 0.000000061343578952882, 3.17488421419594,
0.303155265342277, 0.730866437237387, 0.153061445204099, 1.0892213611689, 0.324735427668248,
0.247170090080577, 1.07422640112579, 0.730866437237387, 0.0587729980248533, 0.566749940012538,
1.99785338805786, 0.777697542359319, 0.73416329175229, 2.29834415883454, 3.86798301109876, 0.650856295483699,
0.000016807750480317, 0.00451815621390715, 0.82248562794817, 1.12979452676981, 0.549483902028669,
0.00850111786307425, 0.508193327463259, 1.72606121085984, 0.784506406504349, 0.0273539988125906,
3.39122258189392, 0.629284399685945, 0.243354743018688, 3.48406699777124, 3.7253801052421, 1.80665193254898,
0.0449908166345375, 1.59573612222782, 0.764168843772111, 0.256336456345215, 3.71052507502665,
0.00932169180124097, 0.949301014502359, 0.149678671010024, 4.36803488225052, 3.98245613375013,
0.351610949190778, 0.0214990518406473, 1.61651884871494, 3.46253007620945, 0.629284399685945,
0.510235159932904, 1.09831466011634, 2.41063987001224, 0.641566682433236, 0.126783625794371, 1.0025719347972,
0.418475218876407, 2.65580648099128, 0.202898077996915, 0.168096370761682, 3.23003090932087,
0.425984237869844, 1.12979452676981, 0.95779144280135, 2.6620877386748, 0.861362565562409, 0.270685380371872,
1.1216205405144, 0.313852607231459, 1.48229469622714, 0.0000363096667874069, 0.00859270777152944,
0.244272894484774, 0.0433715355763131, 0.575483117420181, 0.18096836861513, 0.0309474778624889,
0.27290770313836, 1.49989989005554, 0.250519001705952, 0.913087083231451, 2.69993108675587,
0.0800199330009782, 0.276210216033203, 1.87067216201362, 0.865862970923504, 1.34378906763078,
0.0370003647103047, 1.2312962130104, 2.77000424800589, 2.25764719662972, 0.23908440925884,
0.000361477823328542, 0.342533735746133, 4.53873598662175, 0.0279948005257638, 4.27992578703121,
2.09291828814096, 0.708830041397946, 2.87353138083905, 0.333575228571956, 0.326934249181319, 0.5220162495677,
0.781974093360485, 2.08734926808026, 0.695090700939135, 0.0664812731129262, 0.541679944473897,
2.87353138083905, 0.136566505106234, 0.125415748174579, 0.60208328330594, 1.5105673512517, 3.71794888056342,
0.00859270777152944, 3.51288676117362, 0.0927565323170511, 1.2646143200524, 1.57149884636334,
3.50567069161017, 1.32155496008045)
a_mean <- 0
a_scale <- 100
b_mean <-
c(0, 0, 0)
b_scale <-
c(10, 10, 10)
sigma_scale <- 25
weight_s3 <-
c(0.571516197099123, 0.000210210655336425, -0.0164807119921092, 1.66088060323296,
0.0570476188668237, 6.43788849188427, 0.00572310318750868, 2.45980425486087, -0.000127448190795744,
2.10938871972768, 0.913996772235939, 0.0553521683476704, 0.0000234985769226662, 0.55406620078339,
-0.00171340532382827, 0.681345599387375, 0.0949562234304209, 0.00649489784612934, -1.28268455313941,
-3.18775458171964, -4.9713664430663, 0.75068286902492, 0.112807769337567, -0.545102512120667,
-2.36889548052324, -0.0000005027964956853, 0.00377894433449838, -0.00009637860459392, -0.178117967717335,
-0.678027673712306, -1.75378413544027, 0.0326061931152264, 2.32545153226745, 0.00213651957002787,
0.00755339882278787, 0.416916242124137, 0.0105494438644078, -0.0000000102706852, 0.513881168994894,
0.579511712234283, 0.824571517149081, 0.819501108073943, 2.88198755963189, 0.0135799671126263,
0.913996772235939, -0.0989275287679497, -1.10101297368508, -0.576533488475113, -3.49765345222101,
0.0561956309626848, 0.0080070685488349, 0.0981386601518951, 0.231649971405543, -0.788809877681556,
-2.58024412082982, -3.74281621877449, 3.80926757866252, 0.42994831038443, 0.200472601709176,
1.12077682319743, 2.05287946708948, 0.341384472233641, 0.599825988723571, 1.43678712960951,
0.544111472051027, 0.316608037535619, 0.0986100007790979, 0.000210210655336425, 2.63214188401359,
0.00124610405942463, -0.424237322261826, -0.311953170113961, 0.641867712114226, 0.0016114821894771,
-0.0681998103880208, 0.48479184009541, 0.499195254562788, -3.65986357615994, 3.31124962767738,
0.00392086834589313, 0.0780350164479643, -0.162157381492955, 0.488366359379788, 0.137329738144714,
0.986581671589894, 0.0167602352101785, -1.87407603032541, 1.07501264432577, 0.0175285169242738,
0.00009718953146604, 0.379405294154166, 0.092571214744118, 0.544111472051027, 2.54500064870399,
-1.48331581028232, 0.681345599387375, 0.116903330718136, -0.000050007450312, -1.74539455870628,
2.9646770864178, -0.17090100075832, 1.20087886017354, 0.49195840630691, 0.0473770020622566,
-0.00761075234067234, 3.09778888340094, 0.125389453231696, 0.0255249146115813, -0.00980534749201108,
-1.98145974658209, -2.18883875887207, 0.347060359008109, 0.0520627134118477, -1.63875309617336,
-2.71289268984069, -4.40525514716955, 0.540269536948683, 0.00213651957002787, 0.261153004512923,
0.092571214744118, 2.23536307089845, 1.80248487932052, 1.42944250013107, -5.42192786796297,
0.0986100007790979, 0.855433770848025, -3.57814574049669, 0.0718738452280622, 2.6653257314379,
0.0879216770216485, -1.33803195486078, -2.71289268984069, 1.67714047949379, 0.980866473559623,
0.10362586444394, 0.769935511860271, 0.00755339882278787, 0.897768243579751, -0.0783067013378124,
4.41679909421078, -4.28220031412562, 0.587581453302073, 0.0156504253030631, 0.00501500843232078,
-0.790784663715575, 0.000645497463044184, 0.0867841424527646, -1.24219089918122, -2.09286908035668,
0.225173076406663, 0.575504697859299, -0.942905114134749, -1.46327790495979, 2.35601025855576,
-0.00558687380348021, 0.0458799854396128, -3.49765345222101, 1.18805362439451, 0.760268562271535,
1.77693305800847, 1.97911630201602, 2.41792851455315, 0.0913938346597596, -0.728274127592528,
0.0473770020622566, 0.87643060675314, 0.20849401172081, 0.347060359008109, 0.0801643730150268,
1.25386385900272, -3.74281621877449, -3.61885088618213, 0.192659615152489, -0.0123858185069881,
2.9646770864178, 1.55774603809092, 0.000210210655336425, 0.727071881816765, 3.04893335277106,
0.0856564620918726, 0.969502378299492, 0.410500143911684, 5.63404225494359, 0.75068286902492,
-0.0213912409047015, -0.0123858185069881, -0.522294111286346, -0.440758709189172, -0.877792278024715,
-0.000009847057828347, 0.316608037535619, 0.177647938106209, 0.308623827132556, 0.0123064364869852,
0.0536906475116132, -3.56464550172532, 0.280497765124311, 1.83693339644443, 1.458971881563,
-1.61477674000102, -3.27616635790963, 0.0132537263828443, -3.8553565572579, 0.322006622441054,
0.0344075600456722, 1.25386385900272, 0.00896818449758287, 0.0149384603435815, 0.20849401172081,
0.018724409526456, -0.0293354124823617, 0.436564966325878, -0.705137884356997, 0.0395328753713053,
0.969502378299492, 0.0570476188668237, -1.83916323656314, 2.14724615359426, 0.258799380063056,
0.238249891825157, -2.99167413410201, -1.42400306147841, 0.00000231804701308956, 0.278029126122628,
0.61223946156926, 0.31929972361243, -1.01699298620624, 0.659219587078499, 0.05451718815343,
0.00690554000623834, 0.0867841424527646, -1.81326512400844, 0.00572310318750868, 0.646176774421795,
0.0114053947959948, 0.0270573261617064, 0.975173390085605, 2.04356051497741, 0.312582909202999,
0.0429812252543281, -0.191231448560749, 0.0175285169242738, -1.08263348019373, 0.137329738144714,
-0.0164807119921092, -0.999563472552505, 0.298188591107256, 0.00452408999606085, -2.36889548052324,
0.000034311468592074, 0.0458799854396128, 0.583537283092423, 0.555746472839472, 0.391647477583442,
0.0536906475116132, 0.000302965988025369, 0.131269101832913, 0.61641502755328, 0.000419639394821082,
2.88198755963189, 0.00921991698695113, 0.525082645852321, 0.000064959190593, -3.03991768213764,
0.0570476188668237, -2.65708346539141, 0.240477404333049, 1.0213396660805, 2.38683553350251,
0.00392086834589313, -0.00000050279649568, 3.73923501053533, -0.883100115049078, 0.214648497553341,
0.212923890374009, 0.225173076406663, 0.229477394072524, 0.0362740801235868, 0.190738613165524,
-0.440758709189172, -0.000534215441661942, -0.00717232374528413, 0.00001195341319464, 0.850232734555466,
1.70172873300336, 0.000997170835370425, 0.0369109320124095, 1.0569045874206, 0.183182297775969,
-0.00188425939798299, -0.904545431342241, -2.13091290996623, 0.913014311760851, -0.0025710954887743,
0.175828232798878, 0.61641502755328, 0.114162144863268, -0.000142648565928991, -0.478603894236894,
-0.00155320465116682, -4.08716005002015, 0.0570476188668237, 0.0217236873121431, 0.0000119534131946,
1.12423344101864, 0.327466229827146, -0.3477947802584, -2.61299124446261, 0.789514551851328,
3.92312947424318, 0.1516421170213, -1.76220055327435, 1.37158447237007, 1.07499253927287,
-0.000796517025502502, 0.0167602352101785, 4.30881014232845, 0.0326061931152264, 1.42212294312507,
0.0260291079588237, 0.181324927726404, 2.14763187941859, 0.265902976620259, -4.42080074033772,
-4.25179889855331, -8.22676764512534, -4.07240725680034, -1.6227426892292, -4.46765686739958,
0.0587773420158781, -2.2776810935566, -7.49603389972611, -4.19142841800746, -2.75809857176209,
-5.3509091240178, 0.0780350164479643, 0.103614623379415, -0.0964761956758296, -3.72890463489612,
0.134276625051588, 0.0444148408192381, 0.119688048766627, 0.37338105183598, -1.0644596757938,
-2.41020478112744, 0.575504697859299, 0.10362586444394, -1.99974641158275, -3.53774698165814,
0.672436738571566, 0.338569883828509, 1.62069143653658, 0.536445729645992, -3.65986357615994,
-8.41658525703158, -0.094065695304002, 0.156625009087981, 0.150004948197812, -4.38974604048034,
-4.28220031412562, -6.93522143369973, -3.77074310160916, -7.19044694946139, -3.94122642550663,
-6.74781741741921, 1.70997790607818, 0.233836218229754, -3.84116731384016, -3.77074310160916,
-7.06206554856411, -5.76785740184113, 0.488366359379788, -0.106529953834109, -0.0192413135879562,
2.83543417693771, -1.18953761907815, -6.24502876719889, -6.38319855941485, -4.3280736211462,
2.57744952195609, -6.62477600704579, -0.185535303812408, 0.525082645852321, -4.78851824860799,
-3.16278952965429, 0.273135385825522, 0.305992563379491, 0.731753879770841, 0.413699900748395,
-1.16377834534462, -1.67108914432671, -4.72315645644376, 0.177647938106209, -0.189319967652979,
-1.42400306147841, -3.23807811198681, 2.67644867615441, 0.0281122721647902, -0.232210943008523,
1.09942921475963, -5.76785740184113, -6.91422955608333, 0.383007390657403, -1.33102813803122,
0.00377894433449838, 0.814451527504542, -7.10468808204158, -0.130163632955433, -6.72720561734432,
0.192659615152489, 1.14940796162434, 0.0429812252543281, 0.00233400810260392, 1.2809310049584,
0.24497399511097, -6.58409707808592, 0.148379605526441, 0.42994831038443, 0.0191348038755295,
-0.000176581058856474, -0.660345841137119, 0.0156504253030631, 0.00195049770485107, 0.699398687848649,
-0.00000000001519335, -5.65707370668624, 0.166915886555864, 0.624823029284607, -0.0598823333341378,
1.13677424305475, 0.185052308476312, -0.122883585232933, -1.11338081264143, 0.624823029284607,
0.0142484219290374, 0.426665202169057, 2.82387469524761, 0.685829386346461, 0.629055550768614,
-3.48435657111221, -7.60723507595794, 0.525082645852321, -0.0000000689071659167, 0.000303697952297043,
0.745920386359033, 1.20087886017354, 0.407316928745654, 0.00078381587677719, -0.362279231454048,
-2.2676931562298, 0.694855887708747, 0.00452408999606085, -6.24502876719889, 0.499195254562788,
-0.120049325051978, -6.50323947423225, -7.19044694946139, 2.42835253186764, -0.00954301956610999,
2.01577295555008, 0.668011578978142, 0.129782329255618, -7.14748176802508, -0.000899999172099651,
0.924923720573051, 0.0579081749284381, -9.12912485661619, -7.94742615361376, 0.20849401172081,
-0.00315231029532353, -2.05528077230576, -6.44303264329676, 0.499195254562788, 0.364464785566379,
-1.15103934974355, -3.74281621877449, 0.513881168994894, 0.0451434505633172, 1.00386038170174,
0.270710198793399, -4.3280736211462, 0.0913938346597596, 0.0689187680304816, -5.80510417053982,
0.278029126122628, 1.20087886017354, -0.937360025887679, -4.34343721060303, 0.799427328147258,
-0.140830655411913, 1.187870046002, 0.175828232798878, -1.80468675933844, -0.000000218792985866,
-0.000796517025502502, 0.120729365406781, -0.00903248393955876, 0.436564966325878, 0.0769846264903896,
0.0054442478892991, 0.142568536492573, 1.83693339644443, 0.125389453231696, -0.872505752083222,
-4.43638286285322, 0.0226358743808272, 0.145164227166653, -2.55856541541308, -0.805700718285463,
1.55774603809092, 0.00711719733321946, -1.36629291295414, -4.61020940779517, -3.39222080404991,
0.116903330718136, 0.00000687262247008, 0.200472601709176, -9.66946357067543, 0.00468399115150281,
-8.85430255198987, -3.02780853853121, -0.596778506277948, -4.87106490501205, 0.192659615152489,
0.186935002695785, -0.377160352708394, -0.691494209649684, -3.0157315945687, 0.579511712234283,
0.0171415066440572, -0.398670530830525, -4.87106490501205, 0.0504680231015694, 0.0444148408192381,
0.467180657922402, -1.85656492240368, -7.16894290037745, -0.000796517025502502, -6.58409707808592,
-0.0282498896943977, 1.42212294312507, 1.97002210743321, -6.56382020966677, 1.51924509740188)
|
1a08226d520b1c7160d205a76c4e50b292703b2c
|
1d3c33bfd5fb2d08b91ea2fda0ae178fe2f94919
|
/fakeWeblogGenerator.R
|
79a7fcf33c1b723371326d8ce069b835dcf03c9c
|
[] |
no_license
|
jkebinger/drillable-stacked-time-series
|
dc3b35d25055bb92892d823402f85f622f358e82
|
77645d9e22b2cdb4a299e182dbc65f54700a0226
|
refs/heads/master
| 2021-01-21T07:39:47.447951
| 2010-01-17T23:49:01
| 2010-01-17T23:49:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 942
|
r
|
fakeWeblogGenerator.R
|
generateGrowingSequence = function(initialValue,length) {
randomSequence = rlnorm(length-1,0.15,1/6);
growingSequence = c(initialValue);
for (i in 2:length)
{
growingSequence = append(growingSequence,randomSequence[i-1] * growingSequence[i-1]);
}
return(growingSequence);
}
generateFakeWeblog = function(controllers,actionsPerController,startDate,endDate)
{
dateSeq = seq(as.Date(startDate), as.Date(endDate), "months");
data = list();
data$date = rep(dateSeq,controllers*actionsPerController);
for (controllerIndex in 1:controllers)
{
for (actionIndex in 1:actionsPerController)
{
data$controller = append(data$controller,rep(paste("controller",controllerIndex),length(dateSeq)));
data$action = append(data$action,rep(paste("action",actionIndex),length(dateSeq)));
data$hits = append(data$hits,generateGrowingSequence(100,length(dateSeq)));
}
}
data$hits = round(data$hits);
return(data);
}
|
716a2df323e30f561606b215436fb6eaa2016e0a
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/src/library/base/man/environment.Rd
|
6bffe932a732d6ac200ec798eb541a537188852e
|
[
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.1-only",
"LGPL-3.0-only",
"GPL-3.0-only",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 6,121
|
rd
|
environment.Rd
|
% File src/library/base/man/environment.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2012 R Core Team
% Distributed under GPL 2 or later
\name{environment}
\alias{environment}
\alias{environment<-}
\alias{.GlobalEnv}
\alias{globalenv}
\alias{emptyenv}
\alias{baseenv}
\alias{is.environment}
\alias{new.env}
\alias{parent.env}
\alias{parent.env<-}
\alias{.BaseNamespaceEnv}
\alias{environmentName}
\alias{env.profile}
\alias{enclosure}
\title{Environment Access}
\description{
Get, set, test for and create environments.
}
\usage{
environment(fun = NULL)
environment(fun) <- value
is.environment(x)
.GlobalEnv
globalenv()
.BaseNamespaceEnv
emptyenv()
baseenv()
new.env(hash = TRUE, parent = parent.frame(), size = 29L)
parent.env(env)
parent.env(env) <- value
environmentName(env)
env.profile(env)
}
\arguments{
\item{fun}{a \code{\link{function}}, a \code{\link{formula}}, or
\code{NULL}, which is the default.}
\item{value}{an environment to associate with the function}
\item{x}{an arbitrary \R object.}
\item{hash}{a logical, if \code{TRUE} the environment will use a hash table.}
\item{parent}{an environment to be used as the enclosure of the
environment created.}
\item{env}{an environment}
\item{size}{an integer specifying the initial size for a hashed
environment. An internal default value will be used if
\code{size} is \code{NA} or zero. This argument is ignored if
\code{hash} is \code{FALSE}.}
}
\value{
If \code{fun} is a function or a formula then \code{environment(fun)}
returns the environment associated with that function or formula.
If \code{fun} is \code{NULL} then the current evaluation environment is
returned.
The replacement form sets the environment of the function or formula
\code{fun} to the \code{value} given.
\code{is.environment(obj)} returns \code{TRUE} if and only if
\code{obj} is an \code{environment}.
\code{new.env} returns a new (empty) environment with (by default)
enclosure the parent frame.
\code{parent.env} returns the enclosing environment of its argument.
\code{parent.env<-} sets the enclosing environment of its first
argument.
\code{environmentName} returns a character string, that given when
the environment is printed or \code{""} if it is not a named environment.
\code{env.profile} returns a list with the following components:
\code{size} the number of chains that can be stored in the hash table,
\code{nchains} the number of non-empty chains in the table (as
reported by \code{HASHPRI}), and \code{counts} an integer vector
giving the length of each chain (zero for empty chains). This
function is intended to assess the performance of hashed environments.
When \code{env} is a non-hashed environment, \code{NULL} is returned.
}
\details{
Environments consist of a \emph{frame}, or collection of named
objects, and a pointer to an \emph{enclosing environment}. The most
common example is the frame of variables local to a function call; its
\emph{enclosure} is the environment where the function was defined
(unless changed subsequently). The enclosing environment is
distinguished from the \emph{parent frame}: the latter (returned by
\code{\link{parent.frame}}) refers to the environment of the caller of
a function. Since confusion is so easy, it is best never to use
\sQuote{parent} in connection with an environment (despite the
presence of the function \code{parent.env}).
When \code{\link{get}} or \code{\link{exists}} search an environment
with the default \code{inherits = TRUE}, they look for the variable
in the frame, then in the enclosing frame, and so on.
The global environment \code{.GlobalEnv}, more often known as the
user's workspace, is the first item on the search path. It can also
be accessed by \code{globalenv()}. On the search path, each item's
enclosure is the next item.
The object \code{.BaseNamespaceEnv} is the namespace environment for
the base package. The environment of the base package itself is
available as \code{baseenv()}.
If one follows the chain of enclosures found by repeatedly calling
\code{parent.env} from any environment, eventually one reaches the
empty environment \code{emptyenv()}, into which nothing may
be assigned.
The replacement function \code{parent.env<-} is extremely dangerous as
it can be used to destructively change environments in ways that
violate assumptions made by the internal C code. It may be removed
in the near future.
The replacement form of \code{environment}, \code{is.environment},
\code{baseenv}, \code{emptyenv} and \code{globalenv} are
\link{primitive} functions.
System environments, such as the base, global and empty environments,
have names as do the package and namespace environments and those
generated by \code{attach()}. Other environments can be named by
giving a \code{"name"} attribute, but this needs to be done with care
as environments have unusual copying semantics.
}
\seealso{
For the performance implications of hashing or not, see
\url{https://en.wikipedia.org/wiki/Hash_table}.
The \code{envir} argument of \code{\link{eval}}, \code{\link{get}},
and \code{\link{exists}}.
\code{\link{ls}} may be used to view the objects in an environment,
and hence \code{\link{ls.str}} may be useful for an overview.
\code{\link{sys.source}} can be used to populate an environment.
}
\examples{
f <- function() "top level function"
##-- all three give the same:
environment()
environment(f)
.GlobalEnv
ls(envir = environment(stats::approxfun(1:2, 1:2, method = "const")))
is.environment(.GlobalEnv) # TRUE
e1 <- new.env(parent = baseenv()) # this one has enclosure package:base.
e2 <- new.env(parent = e1)
assign("a", 3, envir = e1)
ls(e1)
ls(e2)
exists("a", envir = e2) # this succeeds by inheritance
exists("a", envir = e2, inherits = FALSE)
exists("+", envir = e2) # this succeeds by inheritance
eh <- new.env(hash = TRUE, size = NA)
with(env.profile(eh), stopifnot(size == length(counts)))
}
\keyword{data}
\keyword{programming}
|
274a92d111bb751d25fa0a29fcf4f6a660ece56d
|
45dfbdeaeaa213f373ac41a492e2ec71553f1540
|
/ui.R
|
da7b025c8a10deb76f913e2370bd8f4342060382
|
[] |
no_license
|
AnaLira/WineQualityApp
|
71fef132d3b8a9bffb7ccced258fc67975c970b8
|
64f43d18057f694cdfeb6aaad98ea368dd241fab
|
refs/heads/master
| 2016-09-01T13:21:28.933936
| 2015-09-24T22:28:15
| 2015-09-24T22:28:15
| 43,170,844
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,476
|
r
|
ui.R
|
library(shiny)
#
shinyUI(fluidPage(
# Application title
headerPanel("Assignment - Wine Quality Modeling"),
# Sidebar with to collect input variables:
# One radio button list to indicate the type of output the users wants
# One checkbox list to indicate which variables from the data set are going to be considered
# in the linear modeling function or graphs
# To avoid recalculating results on each variable selection, a submit button was added.
sidebarPanel(
h4("Model Configuration",align="center"),
radioButtons("TypeAnalysis", label = h5("¿What would you like to see about wine quality related to the selected variables?"),
choices = list("Data discovery density graphs" = 1, "Plots with resulting linear model" = 2,
"Data discovery boxplot graphs" = 3),selected = 1),
checkboxGroupInput("ModelVariables",
label = h5("Select the variables to consider in the linear model:"),
choices = list("Density"="wine$density",
"pH"="wine$pH",
"Sulphates"="wine$sulphates",
"Alcohol"="wine$alcohol",
"Fixed acidity" = "wine$fixed.acidity",
"Volatile acidity" = "wine$volatile.acidity",
"Citric Acid" = "wine$citric.acid",
"Residual Sugar" = "wine$residual.sugar",
"Chlorides"="wine$chlorides",
"Free Sulfure Dioxide"="wine$free.sulfur.dioxide"
),
selected = c("wine$density","wine$pH")),
submitButton( "Submit")
),
# Show the results according to variables and type of output selected
mainPanel(
h5('Based on a dataset containing different measures for different Italian red wines, this application shows how the different variables measured affect resulting wine quality.
The dataset used and description of the data source is available at:'),
a('https://archive.ics.uci.edu/ml/datasets/Wine+Quality',href='https://archive.ics.uci.edu/ml/datasets/Wine+Quality'),
h5('Wine quality is defined on a scale from 1 to 10, being 10 the top quality value.'),
plotOutput("WineLMplot")
)
)
)
|
c648e4f8bd2245d5a955b83a92666fee51ecb32e
|
de9468da30d02ade3b7a600fa096dd50e12fb3d8
|
/3 Getting and cleaning data/Week 3/SolQuizz3.R
|
48438d4b4a4b9aef6d0591ab2d5e4fd89a0b0b1a
|
[] |
no_license
|
MariaBravo/DataScienceSpecialization01
|
0bb9d39bd71e0f1215895811191923225d7701a8
|
e727c681225a99fafeb48914251b22590348c367
|
refs/heads/master
| 2021-01-10T16:17:25.586351
| 2015-11-13T10:06:29
| 2015-11-13T10:06:29
| 46,025,439
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,486
|
r
|
SolQuizz3.R
|
## Question 1
##***********************
ACR 1
Lot size
b .N/A (GQ/not a one-family house or mobile home)
1 .House on less than one acre
2 .House on one to less than ten acres
3 .House on ten or more acres
AGS 1
Sales of Agriculture Products
b .N/A (less than 1 acre/GQ/vacant/
.2 or more units in structure)
1 .None
2 .$ 1 - $ 999
3 .$ 1000 - $ 2499
4 .$ 2500 - $ 4999
5 .$ 5000 - $ 9999
6 .$10000+
housingData <- read.table("housing.csv", sep=",", header=TRUE)
housingData <- data.frame(housingData)
housingData[c(1,2)] ## returns all rows for columns 1,2
housingData[c(1,2),] ## returns all columns rows 1,2
housingData[,c(1,2)] ## returns all columns rows 1,2
dt.tbl> DT[2] # 2nd row
x y v
1: a 3 2
dt.tbl> DT[,v] # v column (as vector)
[1] 1 2 3 4 5 6 7 8 9
dt.tbl> DT[,list(v)] # v column (as data.table)
dataIris["Sepal.Length", "Sepal.Length"==5.9]
logicalVector <- housingData[,x:=((ACR==3) & (AGS==6))]
logicalVector <- logicalVector$x
which(logicalVector)
##*****************************************************************
## Question 2
##*****************************
library(jpeg)
z <- readJPEG("getdata_jeff.jpg", native=TRUE)
quantile(z, probs=c(0.3, 0.8), na.rm=TRUE)
##*****************************************************************
## Question 3
##*****************************
library(data.table)
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(fileUrl, destfile="GDP", method="auto")
## "Reading GDP.R"
##GDPdata <- read.table("GDP.csv", sep=",",blank.lines.skip=TRUE, allowEscapes=TRUE, header=FALSE, strip.white=TRUE, encoding="UTF-8", fill=TRUE, quote="", skip=5, as.is=TRUE, colClasses=c("character","character","character","character","character","character","character","character"), nrows=190)
##GDPdata <- data.table(GDPdata)
## To catch the V4 starting with a "
##bad <- !(substr(GDPdata$V4,1,1) %in% letters) & !(substr(GDPdata$V4,1,1) %in% LETTERS)
##GDPdata[bad, x:=1]
##substr(GDPdata$V4,start=2,stop=length(GDPdata$V4))
##
##GDPdata[,nchar(V3)] = 0
##.
##GDPdata[,nchar(V9)]
##GDPdata[,nchar(V10)]
##GDPdata[,nchar(V11)]
##GDPdata[,nchar(V12)]
##A <- scan("GDP.csv", skip=5, sep=",", blank.lines.skip=TRUE, allowEscapes=TRUE, what=list("","","","","","","","","","","",""), strip.white=c(TRUE), quote="", fill=TRUE)
"Reading EDU.R"
library(data.table)
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(fileUrl, destfile="EDU.csv", method="auto")
EDUdata <- read.table("EDU.csv", sep=",",blank.lines.skip=TRUE, allowEscapes=TRUE, header=FALSE, strip.white=TRUE, encoding="UTF-8", fill=TRUE, quote="", skip=11, as.is=TRUE, colClasses=c("character","character","character","character","character","character","character","character"))
dat1 <- merge(DTgdp, DTedu, by.x="V1", by.y="V1", all=FALSE)
library(plyr)
library(stringr)
dat2 <- dat1[,x:=as.numeric(str_trim(Rank))+0]
dat3 <- arrange(dat2, desc(x))
# Question 4
##----------------------------------------------------
dat4 <- arrange(dat3, V3)
dat4[,mean(x), by=V3]
##----------------------------------------------------
# Question 5
##----------------------------------------------------
brks <- with(dat4, quantile(x, probs = c(0, 0.20, 0.4, 0.6, 0.8, 1)))
dat4 <- within(dat4, quartile <- cut(x, breaks = brks, labels = 1:5,
include.lowest = TRUE))
table(dat4$quartile, dat4$V3)
|
ece44e53818e1c4937393f0f2ba47f608cf4482d
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.ssm/man/get_patch_baseline.Rd
|
d8d2cc530d4a9d95b4b4d1bff96c9c4c4b876133
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 491
|
rd
|
get_patch_baseline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ssm_operations.R
\name{get_patch_baseline}
\alias{get_patch_baseline}
\title{Retrieves information about a patch baseline}
\usage{
get_patch_baseline(BaselineId)
}
\arguments{
\item{BaselineId}{[required] The ID of the patch baseline to retrieve.}
}
\description{
Retrieves information about a patch baseline.
}
\section{Accepted Parameters}{
\preformatted{get_patch_baseline(
BaselineId = "string"
)
}
}
|
8f6b6278883abd331fba539b8b8ef41581e07704
|
85bd593fc4603e99bbb6e8e097960ab832a469d3
|
/man/cartesian.Rd
|
9b13e8996478528bc280a5197633787dba551f55
|
[] |
no_license
|
cran/GeodesiCL
|
2dab609c79d45ceba619cf478ad5f5382c1d7667
|
09c72a0c6deefe2b168024406087c2fcf8ae34b3
|
refs/heads/master
| 2023-04-25T15:26:14.862721
| 2021-05-25T11:20:02
| 2021-05-25T11:20:02
| 370,748,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,467
|
rd
|
cartesian.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Cartesian.R
\name{cartesian}
\alias{cartesian}
\title{To convert from Geographic coordinate to Cartesian coordinate.}
\usage{
cartesian(a, longlat_df, digits = 4)
}
\arguments{
\item{a}{Selection of Ellipsoid.}
\item{longlat_df}{Point name, Sexagesimal longitude and latitude as dataframe.}
\item{digits}{Number of digits the seconds are \code{\link{round}ed} to. DEFAULT: 4}
}
\value{
data.frame with the data in the following order: "Pt", "X", "Y", "Z".
}
\description{
With this function it is possible to convert from Geographic coordinate to Cartesian coordinate and obtain the decimal precision that you assign.
}
\note{
create data frame of epsg codes by epsg <- rgdal::make_EPSG()
}
\examples{
# Point name
Pto <- "St1"
# Longitude
g <- -71
m <- 18
s <- 44.86475
# Value in sexagesimal
sexa_long <- sexagesimal(g,m,s)
# Latitude
g <- -33
m <- 38
s <- 30.123456
# Value in sexagesimal
sexa_lat <- sexagesimal(g, m, s)
print(sexa_lat)
# ELLIPSOIDAL HEIGHT (h)
h <- 31.885
# Longitude and Latitude as data.frame
longlat_df <- data.frame(Pto, sexa_long, sexa_lat, h)
# To know the ellipsoids and the order open the Ellipsoids in the package and look for it number
Ellip <- Ellipsoids
#View(Ellip)
# We choose the number 5 which is GRS80
value <- cartesian(5, longlat_df, digits = 4)
print(value)
}
\references{
https://github.com/OSGeo/PROJ & https://github.com/cran/rgdal
}
|
fafb4a5298b6a7bfe9014caddf7bcc8617812457
|
0d658054756f19a535a5ea48382437f071656882
|
/plot3.R
|
6334648137b0913fe91c7a11affcf8b37833b268
|
[] |
no_license
|
visheshtayal/ExploratoryDataAnalysis1
|
b8eb87852e41e448d654f875a48c9e17844f7bdf
|
23bd0400143e2efe95626fa6b77ab1acc30ac6ce
|
refs/heads/master
| 2022-12-12T03:51:43.391429
| 2020-09-05T19:49:52
| 2020-09-05T19:49:52
| 292,792,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 500
|
r
|
plot3.R
|
library(lubridate)
library(data.table)
DT<-data.table::fread("household_power_consumption.txt",na.strings = "?")
DT[, dateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
DT <-DT[(dateTime >= "2007-02-01") & (dateTime < "2007-02-03")]
png("plot3.png",height=480,width=480)
plot(DT[, dateTime],DT[, Sub_metering_1], type="l", xlab="", ylab="Energy sub metering")
lines(DT[, dateTime], DT[, Sub_metering_2],col="red")
lines(DT[, dateTime], DT[, Sub_metering_3],col="blue")
dev.off()
|
9021b611ad4190ba052287220942b5af6ff21839
|
87f61a31940ea636e2330909b72c0658496eac0d
|
/plot1.R
|
4b32b8c6aee705bae80fcca30c440e0c2210a912
|
[] |
no_license
|
Vcub38/ExData_Plotting1
|
e4bc1ed4f9ba4c62b6cd389f54ff5bc517a5151a
|
d21dabc50c7cc9bcdfbd2548e1decb3f69527118
|
refs/heads/master
| 2020-03-28T17:37:15.897885
| 2018-09-14T16:17:23
| 2018-09-14T16:17:23
| 148,807,983
| 0
| 0
| null | 2018-09-14T15:28:53
| 2018-09-14T15:28:53
| null |
UTF-8
|
R
| false
| false
| 970
|
r
|
plot1.R
|
## set working directory to be the folder containing the raw data file, "household_power_consumption.txt"
## read data into R
data <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, na.strings = c("?"))
## subset data to only include data from February 1, 2007 and February 2, 2007
sdata <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
## Merge the Date and Time columns and convert the combined column to class POSIXct
DateTime <- as.POSIXct(paste(sdata$Date, sdata$Time), format = "%d/%m/%Y %H:%M:%S")
sdata <- sdata[,3:9]
sdata <- cbind(DateTime, sdata)
## re-create plot1 and save it to a PNG file
png(filename = "plot1.png", width = 480, height = 480)
hist(sdata$Global_active_power, freq = TRUE, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)", breaks = 11, xaxt = "n", yaxt = "n")
axis(side = 1, at = c(0, 2, 4, 6))
axis(side = 2, at = c(0, 200, 400, 600, 800, 1000, 1200))
dev.off()
|
3ee278c64f2a7b87bc2e8bda3cac34faa9656f05
|
ee81f0c7496bf294ebc89046a85bf8e5b3b28ff4
|
/man/plotCyclopsSimulationFit.Rd
|
740d851554d4419779485cd09a2645cba171aead
|
[
"Zlib",
"Apache-2.0"
] |
permissive
|
OHDSI/Cyclops
|
34f5bf493fb89de9bb34970016c2089611fddb33
|
c7710e1cda715470c1f8476aeda21221d9b529a2
|
refs/heads/main
| 2023-08-04T22:33:15.297098
| 2023-04-14T20:32:38
| 2023-04-14T20:32:38
| 16,480,696
| 36
| 36
| null | 2023-05-31T21:01:22
| 2014-02-03T13:50:33
|
C++
|
UTF-8
|
R
| false
| true
| 623
|
rd
|
plotCyclopsSimulationFit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Simulation.R
\name{plotCyclopsSimulationFit}
\alias{plotCyclopsSimulationFit}
\title{Plot Cyclops simulation model fit}
\usage{
plotCyclopsSimulationFit(fit, goldStandard, label)
}
\arguments{
\item{fit}{A Cyclops simulation fit generated by \code{fitCyclopsSimulation}}
\item{goldStandard}{Numeric vector. True relative risks.}
\item{label}{String. Name of estimate type.}
}
\description{
\code{plotCyclopsSimulationFit} generates a plot that compares \code{goldStandard} coefficients to their
Cyclops model \code{fit}.
}
\keyword{internal}
|
0f4f507632bb161920452d72e61b638eb1f5343d
|
b136b4bfef2449633275481a5aa62c60e32f07bd
|
/R/bolshev.rec.vec.R
|
79e26429e1fada6c76c01def6d6ebdf53ec2dd00
|
[] |
no_license
|
cran/MHTcop
|
cba5339e5c2875ee8d9dfc318aeb132c8e89dcae
|
496ee271b9e68adff69523e19dee05c469678ee4
|
refs/heads/master
| 2020-03-08T17:36:15.334807
| 2019-01-21T15:10:03
| 2019-01-21T15:10:03
| 128,273,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,658
|
r
|
bolshev.rec.vec.R
|
#' Distribution function of the order statistics of i.i.d. uniform random variables
#'
#' \code{bolshev.rec.vec} is a vectorized and unrolled implementation of the Bolshev recursion described in Shorack, Wellner (1986)
#' which can be utilized to calculate probabilities for order statistics of i.i.d. uniform random variables.
#'
#' Denote by \eqn{U_1,\cdots,U_n} n i.i.d. uniform random variables on \eqn{[0,1]}. Denote by \eqn{U_{1:n},\cdots,U_{n:n}} their order statistics.
#' Then the return value \code{p} contains the probabilities \deqn{p[i,j] = P\left(\bigcap\limits_{k=i}^n\left\{m[n-k+1,j] \le U_{k:n}\right\}\right)}{p[i,j] = P(\forall k=i,\cdots,n: m[n-k+1,j] \le U_{k:n})}
#' @param m matrix whose columns are p-values sorted in descending order
#' @return matrix p containing the calculated probabilities
#' @references G. R. Shorack and J. A. Wellner (1986). Empirical Processes with Applications to Statistics
#'
#' @export
#' @examples
#' bolshev.rec.vec(cbind(rev(c(0.7,0.8,0.9))))
#' #result: c(0.016, 0.079, 0.271)
#' #monte carlo simulation
#' sim <- function(v) mean(replicate(1e4,all(v <= sort(runif(3)))))
#' set.seed(0)
#' c(sim(c(0.7,0.8,0.9)),sim(c(0,0.8,0.9)),sim(c(0,0,0.9)))
#' #similar result: c(0.0176, 0.0799, 0.2709)
bolshev.rec.vec <- function (m)
{
dim.row <- nrow(m)
dim.col <- ncol(m)
s <- 0:(dim.row-1)
summands <- m
summands[-1,] <- 0
for (k in 2:dim.row) {
Fk <- 1 - .colSums(summands,dim.row,dim.col,na.rm=TRUE)
summands <- (k / (k-s)) * summands * m
summands[k,] <- k * Fk * m[k,]
}
ret <- 1 - matrixStats::colCumsums(summands)
ret[dim.row:1,]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.