blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3bd31872f504541d755c2325f9d8cb39f17f627d
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/BatchExperiments/R/getExperimentParts.R
|
8fd4f9f332ac22f4b407cd518924375b2341f6e6
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 788
|
r
|
getExperimentParts.R
|
#' @title Get all parts required to run a single job.
#'
#' @param reg [\code{\link{ExperimentRegistry}}]\cr
#' Registry.
#' @param id [\code{integer(1)}]\cr
#' Id of a job.
#' @return [named list]. Returns the \link{Job}, \link{Problem}, \link{Instance} and \link{Algorithm}.
#' @family get
#' @export
getExperimentParts = function(reg, id) {
checkExperimentRegistry(reg, strict = TRUE)
id = BatchJobs:::checkId(reg, id)
res = namedList(c("job", "prob", "instance", "algo"))
res$job = dbGetJobs(reg, id)[[1L]]
res$prob = loadProblem(reg, res$job$prob.id)
# use insert to keep the slot even if this is NULL
res = insert(res, list(instance = calcDynamic(reg, res$job, res$prob$static, res$prob$dynamic)))
res$algo = loadAlgorithm(reg, res$job$algo.id)
return(res)
}
|
4385cf0c2cd74a651a089de74cbefe18b2ff1b52
|
8bfeb902acc4577317c621cbb837de1919364c0c
|
/man/AOCF.Rd
|
00ae45bc63e41d3dec6272002b8b9239337dba15
|
[] |
no_license
|
melren/effiplot
|
65e1a8bf6c04712afa4f027ca70deea24ca2924e
|
e1208fa929c7cfe789178b66cc5c12296f6add08
|
refs/heads/master
| 2020-04-01T19:05:41.612326
| 2018-11-14T23:06:43
| 2018-11-14T23:06:43
| 153,533,582
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 642
|
rd
|
AOCF.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{AOCF}
\alias{AOCF}
\title{Average Observation Carried Forward}
\usage{
AOCF(x)
}
\arguments{
\item{x}{data series/vector with missing values to fill (vector)}
}
\value{
returns the same data series with missing values filled in with the average numeric value
}
\description{
This a function to handle missing values in a numeric dataframe column. It fills empty/missing rows values
with the average numeric value rounded to the minimum number of decimal places in the input data.
}
\examples{
AOCF(c(NA, 2.230, 1.1, 2390.141, NA, 1341.012, 10.24))
}
|
d72f1dc039aca4fd257a7bffd6ae2e106ee6efd1
|
376c7deac1b0a2a81aa876238c11683399b0e352
|
/man/plotPurity.Rd
|
bd7c7c0367a5121db23a4993fcf5b00153611cd8
|
[] |
no_license
|
svkucheryavski/supure
|
dd127b0f4108b60fe0be6395117cd4e81ce07816
|
703e54c7bba4b74f0d4a04645641c8973abc6365
|
refs/heads/master
| 2021-01-10T03:13:52.248797
| 2015-09-23T08:12:00
| 2015-09-23T08:12:00
| 42,987,569
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 891
|
rd
|
plotPurity.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plots.R
\name{plotPurity}
\alias{plotPurity}
\title{Purity spectra plot}
\usage{
plotPurity(obj, ncomp = 1, origspec = NULL, col = c("#62B7EF", "#D84E5E",
"#F0F0F0"), lty = 1, lwd = 1, xlab = "Wavenumbers",
ylab = c("Weighted std", "Purity"), main = sprintf("Purity for C\%d",
ncomp))
}
\arguments{
\item{obj}{object with unmixing results}
\item{ncomp}{which component to make the plot for}
\item{origspec}{original spectral data used for unmixing}
\item{col}{colors for the std, purity and original spectra}
\item{lty}{line type for the component spectra}
\item{lwd}{line width for the component spectra}
\item{xlab}{label for x axis}
\item{ylab}{label for y axis}
\item{main}{main title for the plot}
}
\description{
Shows a plot with purity spectra for each or user selected component
}
|
25a226b6c1bcad101c37e6cdd8a2dbfa0ddf268c
|
d6e11b703be86c2964ede85d928e0c8986361558
|
/MCA.R
|
97fee3eb9ae38d5daa582f1f67b2aa7e12071ed1
|
[] |
no_license
|
polserra95/BankMarketing_ML
|
e736d06e11aa36c04833e7e90ba45a33a0c07aea
|
7437985638e0171da4d5d19f84d8352afde49218
|
refs/heads/master
| 2020-03-21T16:17:49.320223
| 2018-06-26T16:07:55
| 2018-06-26T16:07:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,699
|
r
|
MCA.R
|
##Load workspace and some libraries
setwd("C:/Users/Pol/Desktop/MVA_Project")
require("dplyr") # Data manipulation
require("reshape2") # Data reshaping for ggplot
require("ggplot2") # Data visualization
require("RColorBrewer") # Colors on plots
require("readr") # CSV file I/O, e.g. the read_csv function
require("dataQualityR") # DQR generation
require("randomForest") # Random Forest for variable importance
require("scales") # Colour palette
require("fmsb") # Radar plots
require("caret") # For Machine Learning, TOP
require("randomForest") # Random Forest
require("RANN")
require("VIM")
require("chemometrics")
require("scales") # Colour palette
require("fmsb") # Radar plots
require("corrplot") # To correlation plot of continuous variables
require("FactoMineR") # To PCAs
## Read data and apply MCA
rm(list=ls())
data <- read.csv("phpkIxskf.csv", stringsAsFactors = F)
head(data)
colnames(data) <- c("age","job","marital", "education", "credit_in_default",
"balance", "housing_loan", "personal_loan",
"contact_type", "last_contact_day",
"last_contact_month", "last_contact_duration",
"number_contacts_campaign", "last_day_contact_previous_campaign",
"number_contacts_before_campaign",
"outcome_previous_campaign","subscription")
head(data)
summary(data)
age <- which(colnames(data)=="age")
balance <- which(colnames(data)=="balance")
last_contact_day <- which(colnames(data)=="last_contact_day")
last_contact_duration <- which(colnames(data)=="last_contact_duration")
number_contacts_campaign <- which(colnames(data)=="number_contacts_campaign")
last_day_contact_previous_campaign <- which(colnames(data)=="last_day_contact_previous_campaign")
number_contacts_before_campaign <- which(colnames(data)=="number_contacts_before_campaign")
subscription <- which(colnames(data)=="subscription")
data$age <- as.numeric(data$age)
data$balance <- as.numeric(data$balance)
data$last_contact_day <- as.numeric(data$last_contact_day)
data$last_contact_duration <- as.numeric(data$last_contact_duration)
data$number_contacts_campaign <- as.numeric(data$number_contacts_campaign)
data$last_day_contact_previous_campaign <- as.numeric(data$last_day_contact_previous_campaign)
data$number_contacts_before_campaign <- as.numeric(data$number_contacts_before_campaign)
data$subscription <- data$subscription - 1
data$subscription <- as.factor(data$subscription)
data$education <- as.factor(data$education)
data$housing_loan <- as.factor(data$housing_loan)
data$outcome_previous_campaign <- as.factor(data$outcome_previous_campaign)
data$job <- as.factor(data$job)
data$credit_in_default <- as.factor(data$credit_in_default)
data$personal_loan <- as.factor(data$personal_loan)
data$last_contact_month <- as.factor(data$last_contact_month)
data$marital <- as.factor(data$marital)
data$contact_type <- as.factor(data$contact_type)
sapply(data,class)
mca <- MCA(data,
quanti.sup = c(age,balance,last_contact_day,
last_contact_duration,number_contacts_campaign,
last_day_contact_previous_campaign,
number_contacts_before_campaign),
quali.sup = c(subscription))
# Interpret the first factorial components
dimdesc(mca)
## Decide the number of significant dimensions that we retain
## (by subtracting the average eigenvalue and represent the
## new obtained eigenvalues in a new screeplot).
plot(mca[["eig"]][,1],type="l", xlab = "Index", ylab = "Eigenvalue")
## Find average eigenvalue
avg <- mean(mca[["eig"]][,1])
abline(h = avg, col="red", lty = 2)
## Take all those dimensions > than average
lmb <- mca[["eig"]][,1][mca[["eig"]][,1]>avg]
## New screeplot
plot(lmb,type="l",xlab = "Index",ylab = "Eigenvalue")
plot(cumsum(100*lmb/sum(lmb)), type = "o", xlab="Component Number", ylab="Contribution to total variance(%)", pch=16, ylim = c(0,100))
abline(h=80, col="red", lty = 2)
## We take dimensions that explains 80% of new dimension set
nd <- which.min(abs(cumsum(100*lmb/sum(lmb)) - 80))
L <- 1:nd
mca <- MCA(data,
quanti.sup = c(age,balance,last_contact_day,
last_contact_duration,number_contacts_campaign,
last_day_contact_previous_campaign,
number_contacts_before_campaign),
quali.sup = c(subscription),ncp = 11)
rm(list=setdiff(ls(), c("data","mca")))
|
b60b0f457e1012b4609291153c0641ddc0d56d16
|
81a96b248fe00c49b348b3b31513e48a14212ca7
|
/plot2.R
|
a66b001d9d98377cce42be72ce5eb9b8b9713047
|
[] |
no_license
|
nisamvp/ExData_Plotting1
|
5f31a533d708734d2603d302a79f0eb190c2318b
|
d0d08eddd1c4807c231465021a8c145f29794b6a
|
refs/heads/master
| 2020-12-25T08:41:53.335671
| 2016-06-04T19:44:13
| 2016-06-04T19:44:13
| 60,402,321
| 0
| 0
| null | 2016-06-04T09:09:48
| 2016-06-04T09:09:48
| null |
UTF-8
|
R
| false
| false
| 635
|
r
|
plot2.R
|
#R code to produce plot2.png
plotData<-read.table("household_power_consumption.txt",header=TRUE,sep = ";")
plotData$Date<-as.Date(plotData$Date,"%d/%m/%Y")
subplotData<-subset(plotData,Date >="2007-02-01"& Date <="2007-02-02")
subplotData$Global_active_power <-as.numeric(as.character(subplotData$Global_active_power))
subplotData$Date<-paste(subplotData$Date,subplotData$Time)
subplotData$Date<-strptime(subplotData$Date,"%Y-%m-%d %H:%M:%S")
png(file = "plot2.png",width = 480, height = 480, units = "px")
with(subplotData,plot(Date,Global_active_power,type = "l",ylab = "Global Active Power(kilowatts)",xlab = ""))
dev.off()
|
df2c9104d1312cf2ccebc4c6ae4f17d6d874ff39
|
41762d55e0600512241940c77b33239970c41557
|
/1. MSY proxy reference points.R
|
e23e25c74420536db0df0cc47b7bcb3193282292
|
[] |
no_license
|
JuanMatiasBraccini/Git_reference.points
|
cf8365b16294307e56ae5d754c2aba965e474cd4
|
d295633004b5492ba1cc9e31fde2be0f6c9dcda8
|
refs/heads/master
| 2021-06-16T13:51:03.284644
| 2021-04-28T05:37:41
| 2021-04-28T05:37:41
| 191,690,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102,584
|
r
|
1. MSY proxy reference points.R
|
###########################MSY PROXY REFERENCE POINTS###########################
#notes: This script implements different MSY proxy reference points
# FIRST: the Spawning Potential Ratio reference points derived in Brooks
# et al (2010) for assessment of data poor fisheries using a biological reference point
# which is compared to abundance data ("overfished state").
# To determine if "Overfishing" is occurring (page 169), need current estimate of F (tagging, etc)
# Also, due to the relationship between steepness (lower bound=0.2) and alpha, the Maximum
# Reproductive Rate at low density (lower bound=1), this approach can also be used to evaluate
# if life history parameter combinations satisfy the lower bounds
# Finally, by reparameterising the S-R relationship in terms of alpha,
# we avoid the need for estimating steepness!!
# Gummy shark not included as whiskery is used as representative of this life history group
# assumptions: for steepness: if using maturity schedule, assumes that reproduction occurs immediately once
# reaching maturing for first age at maturity
# for reference point: in addition, these assumes constant catchability and selectivity thru time
# required: life history parameters and an index of abundance for stock status assessment
# life history data only for steepness
# F.mer is estimated by finding F that yields SPR.mer
# F.lim is estimated by finding F that yields SPR.lim, which is estimated by finding the SPR.lim
# that yields p*optimal depletion
# F.targ is estimated by finding F that yields SPR.tar, which is estimated by finding the SPR.tar
# that yields 1.2*optimal depletion
# SECOND: Zhou et al (2012) FMSY proxy
# THIRD: Per recruit analysis
#VERY IMPORTANT: check if simulations make sense (growth in particular) by tracking
# selectivity and growth curve. Resample if too much variability
rm(list=ls(all=TRUE))
library(triangle)
library(TeachingDemos) #for grey scale
library(mvtnorm) #for multivariate normal pdf
library(PBSmapping)
if(!exists('handl_OneDrive')) source('C:/Users/myb/OneDrive - Department of Primary Industries and Regional Development/Matias/Analyses/SOURCE_SCRIPTS/Git_other/handl_OneDrive.R')
#source sigmas for var-covar matrix
source(handl_OneDrive("Analyses/Reference Points/5.Derive var_covar Matrix.r"))
#source indirect estimation of M
source(handl_OneDrive("Analyses/SOURCE_SCRIPTS/M_indirect.R"))
#---DATA SECTION----
#Abundance index
species.names <- c("gummy","whiskery", "dusky","sandbar")
abundance.species.list <-depletion.species.list<- vector("list", length(species.names))
names(abundance.species.list) <-names(depletion.species.list) <- species.names
N.sp=length(species.names)
#abundance series
abundance.species.list$whiskery=c(3.76,0.81) #first and last year of cpue data.
#initial depletion level of the abundance series
depletion.species.list$Whiskery=0.95 #dummy. Use real value. Note: if catchability/selectivity change, depletion
# will change if inferred from cpue
#Empirical F at age estimates (McAuley et al 2007)
Dusky.F.emp=c(.21,.145,.05,.03,.03,.025,.022,.02,.03,.01,0)
names(Dusky.F.emp)=0:10 #age classes studied
Sandb.F.emp=c(0,0.06,.28,.08,.04,.03,.04,.03,.025,.022,.02,.01,0)
names(Sandb.F.emp)=c("0-3","3-6","6-9","9-12","12-15","15-18","18-24",25:30)
Empirc.F=list(Dusky=Dusky.F.emp,Sandbar=Sandb.F.emp)
#Maturity ogive
#Whiskery
# FL.w=c(92,96,100,104,108,112,116,120,124,128,132)
# Prop.M.w=c(0,0,0,0.35,0.4,0.25,0.58,0.7,0.8,1,1)
# fit.Mat=function(DAT,theta)fit=nls(Prop.M~1/(1+exp(-log(19)*((FL-L50)/(L95-L50)))),data=DAT,start=theta)
# Whis.Fit=fit.Mat(DAT=data.frame(Prop.M=Prop.M.w,FL=FL.w),theta=c(L50=115,L95=125))
#---PARAMETERS SECTION----
#Management decisions
Prop.FMsy=0.75 #management parameter. Proportion of Fmsy (as used by NMFS)
#Prop.FMsy=0.65 #sensitivity
#Prop.FMsy=0.85
#Life history par vectors (Gummy,Whiskery,Dusky,Sandbar)
#Min values
Min.max.age=c(16,15,40,30)
Min.breed.freq=c(1,0.5,0.5,0.5)
Min.age.mat=c(4,6,26,13)
Min.fec=c(1,4,2,4)
#Max values
Max.max.age=c(21,20,56,40) #put +1 because using floor in random sample
Max.breed.freq=c(1,0.5,0.333,0.5)
Max.age.mat=c(6,8,36,20) #put +1 because using floor in random sample
Max.fec=c(31,28,18,10)
#Mean values
#Whisk.Fec.relation=0.314*(100:140)-17.8 #Simfendorfer & Unsworth 1998
#Mean.fec=c(mean(Whisk.Fec.relation),9.9,7.7) #mean Sandbar fec from (Table 4 Baremore & Hale 2010)
#Mean.fec=c(mean(Whisk.Fec.relation),9.9,6.5)
#Sd.fec=c(sd(Whisk.Fec.relation),2.7,2.3) #sd=2 fec is CV=30% as found for dusky
species.list <-vector("list", length(species.names))
names(species.list) <- species.names
pars.names <- c("max.age", "M","fec","breed.freq","age.mat")
pars.list <- vector("list", length(pars.names))
names(pars.list) <- pars.names
#Fill in species list of pars
for (i in 1:N.sp)
{
species.list[[i]]=list(max.age=c(Min.max.age[i],Max.max.age[i]),fec=c(Min.fec[i],Max.fec[i]),
breed.freq=c(Min.breed.freq[i],Max.breed.freq[i]),
age.mat=c(Min.age.mat[i],Max.age.mat[i]))
}
#Average water temperature
Temperature=c(18,18,18,24)
#Growth pars (female)
Linf.g=201.9
#Linf.w=128.2 #Simfendorfer et al 2000, tag-recapture
Linf.w=120.7 #Simfendorfer et al 2000, age and growth
Linf.d=374.4
Linf.s=244.2
K.g=0.123
#K.w=0.288
K.w=0.369
K.d=.0367
K.s=.040
to.g=-1.55
to.w=-0.6
to.d=-3.3
to.s=-4.8
#Size at birth
Size.birth=c(33.5,25,75.3,42.5)
#FL to TL pars
b.g=NA
b.w=8.891
b.d=4.000
b.s=5.8188
a.g=NA
a.w=1.046
a.d=1.100
a.s=1.11262
#TL to TW pars
bwt.g=0.927e-6 #(reported as 0.927e-9 but I changed scale to match others)
bwt.w=0.0000163
bwt.d=1.2334e-5
bwt.s=0.000002
awt.g=3.206
awt.w=2.733
awt.d=2.855
awt.s=3.2069
#Selectivity
#gummy #Walker 2010
theta1=186
theta2=36695
mesh= 6.5 #(in inches, = 165 mm)
alphabeta.g=theta1*mesh
beta.g=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.g=alphabeta.g/beta.g
mesh= 7 #(in inches, = 178 mm)
alphabeta.g7=theta1*mesh
beta.g7=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.g7=alphabeta.g7/beta.g7
#whiskery #Simpfendorfer & Unsworth 1998
# alpha.w=64.01339
# beta.w=18.53164
# alphabeta.w=alpha.w*beta.w #old, used in paper as it was the values used in Simpfendorfer for stock assessment
theta1=173.70
theta2=26415
mesh= 6.5 #(in inches, = 16.5 cm)
alphabeta.w=theta1*mesh
beta.w=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.w=alphabeta.w/beta.w
mesh= 7 #(in inches)
alphabeta.w7=theta1*mesh
beta.w7=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.w7=alphabeta.w7/beta.w7
#dusky #Simpfendorfer & Unsworth 1998
theta1=130.13
theta2=29237
mesh= 6.5 #(in inches, = 16.5 cm)
alphabeta.d=theta1*mesh
beta.d=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.d=alphabeta.d/beta.d
mesh= 7 #(in inches)
alphabeta.d7=theta1*mesh
beta.d7=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.d7=alphabeta.d7/beta.d7
#sandbar #McAuley et al 2005
theta1=135.58
theta2=117001
mesh= 6.5
alphabeta.s=theta1*mesh
beta.s=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.s=alphabeta.s/beta.s
mesh= 7
alphabeta.s7=theta1*mesh
beta.s7=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.s7=alphabeta.s7/beta.s7
#Put pars in data frame for loop
Growth.pars=data.frame(Linf=c(Linf.g,Linf.w,Linf.d,Linf.s),K=c(K.g,K.w,K.d,K.s),to=c(to.g,to.w,to.d,to.s))
Length.conv=data.frame(b=c(b.g,b.w,b.d,b.s),a=c(a.g,a.w,a.d,a.s))
Weight.conv=data.frame(bwt=c(bwt.g,bwt.w,bwt.d,bwt.s),awt=c(awt.g,awt.w,awt.d,awt.s))
Sel.pars=data.frame(alphabeta=c(alphabeta.g,alphabeta.w,alphabeta.d,alphabeta.s),
alpha=c(alpha.g,alpha.w,alpha.d,alpha.s),
beta=c(beta.g,beta.w,beta.d,beta.s))
#---PROCEDURE SECTION----
#1. Priors
#1.1. Natural mortality
#source external function
Wght.G=c(1,1,1,1) # weight given to methods using growth parameters (high uncertainty for whiskery so give lower weight)
Wght.noG=c(1,10,1,1) # weight given to methods not using growth parameters
#1.2 Max age
A.max=function(MIN,MAX,MODE) floor(rtriangle(1, a=MIN, b=MAX, c=MODE))
#1.3 Fecundity
FEC=function(spec.)
{
if (spec.=="gummy")Fec=1.12*exp(-3.033+0.00396*total.length*10) #add 10 scaler as it was calculated in mm
if (spec.=="whiskery")
{
#Fec=0.314*mid.FL.fem-17.8 #underestimates Fec because mid.FL only gets to Linf (i.e. 120)!!
Fec=rep(round(rtriangle(1, a=Min.fec[2], b=Max.fec[2], c=mean(c(Min.fec[2],Max.fec[2])))),
length(mid.FL.fem))
if(USE.MEAN=="YES")Fec=rep(mean(c(Min.fec[2],Max.fec[2])),length(mid.FL.fem))
if(USE.MEAN=="Linear")Fec=round(seq(Min.fec[2],Max.fec[2],length.out=length(mid.FL.fem)))
}
if (spec.=="dusky")
{
Fec=rep(round(rtriangle(1, a=Min.fec[3], b=Max.fec[3], c=mean(c(Min.fec[3],Max.fec[3])))),
length(mid.FL.fem))
if(USE.MEAN=="YES")Fec=rep(mean(c(Min.fec[3],Max.fec[3])),length(mid.FL.fem))
if(USE.MEAN=="Linear")Fec=round(seq(Min.fec[3],Max.fec[3],length.out=length(mid.FL.fem)))
}
if (spec.=="sandbar")
{
Fec=rep(round(rtriangle(1, a=Min.fec[4], b=Max.fec[4], c=mean(c(Min.fec[4],Max.fec[4])))),
length(mid.FL.fem))
if(USE.MEAN=="YES")Fec=rep(mean(c(Min.fec[4],Max.fec[4])),length(mid.FL.fem))
if(USE.MEAN=="Linear")Fec=round(seq(Min.fec[4],Max.fec[4],length.out=length(mid.FL.fem)))
}
return(Fec)
}
#1.4 Reproductive periodicity
REP.PER=function(MIN,MAX)runif(1,MIN,MAX)
#1.5 age at maturity
AGE.MAT=function(MIN,MAX)floor(runif(1,MIN,MAX))
#1.6 growth pars (multivariate normal distribution)
GROWTH=function(Linf.mean,k.mean,sigma)
{
growth.pars.sim=rmvnorm(1,mean=c(Linf.mean,k.mean),sigma=sigma)
if(growth.pars.sim[2]<=0.001) #repeat until sensible pars obtained
{ repeat
{
growth.pars.sim=rmvnorm(1,mean=c(Linf.mean,k.mean),sigma=sigma)
if(growth.pars.sim[2]>0.001)break
}
}
return(growth.pars.sim)
}
#2. Spawning potential ratio method (Brooks et al 2010)
#2.1. Analytically-derived SPR.mer
SPR=function(max.age,M,fec,breed.freq,sex.ratio,age.mat,sel.age,F.mult=0,Plus.gp="Y",MAT)
{
#age
age=0:max.age
#survivorship
surv=exp(-M)
#fecundity
#fecundity=rep(fec*breed.freq*sex.ratio,length(age))
fecundity=fec*breed.freq*sex.ratio
#maturity
if(MAT=="knife")maturity=ifelse(age>=age.mat,1,0)
if(MAT=="ogive")maturity=plogis(age,age.mat,1)
#if maximum age is a terminal group
if(!(Plus.gp=="Y"))
{
phi.o=0.0 #unexploited spawners per recruit
cum.survive=1.0 #cumulative survival
z=0.0
for (i in 2:(max.age) )
{
z=M[i] + F.mult*sel.age[i]
z.ts=(M[i]+F.mult*sel.age[i])*spawn.time
phi.o=phi.o+cum.survive*fecundity[i]*maturity[i]*exp(-z.ts)
cum.survive=cum.survive*exp(-z )
}
}
#if maximum age is plus group
if(Plus.gp=="Y")
{
phi.o=0.0
cum.survive=1.0
z=0.0
for (i in 2:(max.age) )
{
z=M[i] + F.mult*sel.age[i]
z.ts=(M[i]+F.mult*sel.age[i])*spawn.time
phi.o=phi.o+cum.survive*fecundity[i]*maturity[i]*exp(-z.ts)
cum.survive=cum.survive*exp(-z )
}
#plus group
z= M[max.age+1] + F.mult*sel.age[max.age+1]
z.ts=(M[max.age+1]+F.mult*sel.age[max.age+1])*spawn.time
phi.o=phi.o + fecundity[max.age+1]*maturity[max.age+1]*cum.survive*exp(-z.ts)/( 1- exp(-z ) )
}
#maximum lifetime reproductive rate at low density
alpha=phi.o*surv[1]
#steepness
h=alpha/(4+alpha)
#spawning potential ratio at maximum excess recruitment (MER) (Beverton-Holt relationship)
SPR.mer=1/alpha^0.5
#optimal depletionlevel (i.e.depletion at MER, the proportional reduction from unexploited level)
Dep.MER=((alpha^0.5)-1)/(alpha-1)
#overfished threshold
p=max(c(0.5,(1-mean(M))))
if(h<0.2)return(Warning="Bad joint priors")
if(h>=0.2)return(list(phi.o=phi.o,alpha=alpha,h=h,SPR.mer=SPR.mer,Dep.MER=Dep.MER,p=p,
fecundity=fecundity,maturity=maturity))
}
Stock.depletion=function(ab.index,init.depl,dep.mer,p)
{
#scaled current index
Scaled.relative.index=init.depl*(ab.index[length(ab.index)]/ab.index[1])
#Current depletion level
Depletion=Scaled.relative.index/dep.mer
#Stock status
if(Scaled.relative.index<(p*dep.mer)){Stock.status="Overfished"}else
{Stock.status="Not overfished"}
return(Stock.status)
}
#2.2 Find Fmer thru numerical approximation
#note: Equilibrium Per recruit function
EqPerRec=function(Fe=0)
{
#survivorship
surv=exp(-mm)
#note: age, fecundity and maturity are set outside the function (in loop)
#eggs
phieggs=0.0
#survival
cum.survive=1.0
z=0.0
for (i in 2:(A.MAX) )
{
z=mm[i] + Fe*Sel.A[i]
z.ts=(mm[i]+Fe*Sel.A[i])*spawn.time
phieggs=phieggs+cum.survive*fecundity[i]*maturity[i]*exp(-z.ts)
cum.survive=cum.survive*exp(-z )
}
#plus group
z= mm[A.MAX+1] + Fe*Sel.A[A.MAX+1]
z.ts=(mm[A.MAX+1]+Fe*Sel.A[A.MAX+1])*spawn.time
phieggs=phieggs + fecundity[A.MAX+1]*maturity[A.MAX+1]*cum.survive*exp(-z.ts)/( 1- exp(-z ) )
#Spawning potential ratio
SPR=phieggs/phieggs0
#Objective function
epsilon=(Observed-SPR)^2
return(list(phieggs0=phieggs0,phieggs=phieggs,epsilon=epsilon,SPR=SPR))
}
#2.3 Find SPR for given depletion level thru numerical approximation
Find.SPR=function(Spr)
{
DEPLET=((((1/Spr)^2)^0.5)-1)/(((1/Spr)^2)-1)
#Objective function
epsilon=(Observed-DEPLET)^2 #objective function
return(list(epsilon=epsilon,Depletion=DEPLET))
}
#2.4 Find B from a given SPR level (B-H function)
Find.B<-function(recr.par, R0, spr, spr0)
{
sprF=spr/spr0
ssb=spr0*R0*(sprF*recr.par - 1.0)/(recr.par-1.0)
return(ssb)
}
#3. Zhou et al (2012) FMSY proxy
Zhou=function(M) FMSY=0.41*M
#4. Standard per recruit analysis
Per.recruit=function(Linf,K,to,b,a,M,bwt,awt,age.mat,fec,breed.freq,fishing)
{
N=length(age)
#length at mid age
FL=Linf*(1-exp(-K*(age+0.5-to)))
TL=b+a*FL
#mortality at age
Fat.age=fishing*Sel.A
Z.at.age=Fat.age+M
#numbers at age
Rel.numbers=vector(length=N)
Rel.numbers[1]=1
for(i in 2:N)Rel.numbers[i]=Rel.numbers[i-1]*exp(-Z.at.age[i])
#harvest fraction
harvest=(Fat.age/Z.at.age)*Rel.numbers*(1-exp(-Z.at.age))
#weigth at age
Wt=bwt*TL^awt
#fecundity at age (considering Maturity and breeding freq)
fecundity=ifelse(age>=age.mat,fec*breed.freq,0)
#YPR
YPR=Wt*harvest
#Spawning stock biomass per recruit
SSB=ifelse(age>=age.mat,Wt*Rel.numbers,0)
#Eggs per recruit (female)
Eggs=fecundity*sex.ratio*Rel.numbers
#Objective functions
Epsilon=-1*sum(YPR)
Epsilon.mer=-1*sum(Eggs)
#Totals per recruit
return(list(N.per.rec=sum(Rel.numbers),Yield.per.rec=sum(YPR),SSB.per.rec=sum(SSB),
Eggs.per.rec=sum(Eggs),Epsilon=Epsilon,Epsilon.mer=Epsilon.mer))
}
#5. Selectivity function (note: Fork length in mm)
Select=function(alphabeta,alpha,beta)
{
sel.fem=((mid.FL.fem*10/alphabeta)^alpha)*(exp(alpha-(mid.FL.fem*10/beta)))
}
#---MAIN SECTION----
N.sim=10000
#1. Spawning Potential Ratio and derived methods
Species.SPR.MER=Species.B.MER=Species.F.MER=Species.F.est.Conv=Species.M=Species.Sel=Species.Fec=
Species.len=Species.F.40=Species.F.30=Species.F.zhou=Species.Trigged=vector("list", length=N.sp)
names(Species.SPR.MER)=names(Species.B.MER)=names(Species.F.MER)=names(Species.F.est.Conv)=
names(Species.M)=names(Species.Sel)=names(Species.Fec)=names(Species.F.40)=names(Species.F.30)=
names(Species.F.zhou)=names(Species.Trigged)=names(Species.len)=species.names
#Monte Carlo simulations
#note: obtain stock rec pars and biological reference points
#add or remove variability in growth pars
Growth.var="YES"
#Growth.var="NO"
#use Mean Fecundity for whiskery, dusky and sandbar
#USE.MEAN="YES" #for using mean
#USE.MEAN="Linear" #for using linear relation
USE.MEAN="NO" #for using random sample
b.f <- function(Fstart)
{
temp.spr = EqPerRec(Fstart)$SPR
temp.SSB =Find.B(spr.temp$alpha,R0=1,spr=temp.spr,spr0=1) #R0 and spr0 =1 as it's on relative scale
yy<-abs(temp.SSB - obs.biom )
return(yy)
}
Fstart=0.1
system.time(for (a in 1:N.sp)
{
spec.=species.names[a]
WT=Weight.conv[a,]
GROW=Growth.pars[a,]
TO=Growth.pars[a,3]
SIG=SIGMA[[a]]
Lo=Size.birth[a]
AA=species.list[[a]]$max.age
FF=species.list[[a]]$fec
BF=species.list[[a]]$breed.freq
b.fem=Length.conv[a,1]
a.fem=Length.conv[a,2]
alphabeta.S=Sel.pars[a,1]
alpha.S=Sel.pars[a,2]
beta.S=Sel.pars[a,3]
sex.ratio=0.5
AMat=species.list[[a]]$age.mat
Temper=Temperature[a]
r=1
spawn.time = 0 # specify time of the year when spawning (or pupping) occurs as a fraction beteween 0 and 1
w.g=Wght.G[a]
w.ng=Wght.noG[a]
SPR.out=f.out=b.out=Store.M=Store.Fec=Convergence=Store.Sel=Store.len=f.out.30=f.out.40=
f.zhou=A.Max=vector("list", length=N.sim)
Trigged=rep(NA,N.sim)
#Threshold
for (j in 1:N.sim)
{
#1. draw random samples of input parameters
A.MAX=A.max(AA[1],AA[2],AA[1])
age=0:A.MAX
if(Growth.var=="YES") GR=GROWTH(GROW[[1]],GROW[[2]],SIG)
if(Growth.var=="NO") GR=matrix(c(GROW[[1]],GROW[[2]]),ncol=2)
Rep=REP.PER(BF[2],BF[1])
A.MAT=AGE.MAT(AMat[1],AMat[2])
if(!spec.=="gummy")
{
mid.FL.fem=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #modified version
total.length=b.fem+a.fem*mid.FL.fem
}
if(spec.=="gummy")
{
total.length=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #gummy pars are fn(total length)
mid.FL.fem=total.length
}
Fec=FEC(spec.)
#put a cap on gummy Fecundity to avoid predicting beyond data range
if(spec.=="gummy")
{
Fec=ifelse(Fec>Max.fec[1],Max.fec[1],Fec)
Fec=ifelse(Fec<0,0,Fec)
}
mm=M.fun(A.MAX,GR[2],GR[1],Temper,A.MAT,TO,WT[1,1],WT[1,2],w.g,w.ng)
Sel.A=Select(alphabeta.S,alpha.S,beta.S)
#2. calculate SPR.mer quantities
spr.temp=SPR(A.MAX,mm,Fec,Rep,sex.ratio,A.MAT,Sel.A,0,"Y","ogive")
if(length(spr.temp)>1)
{
#3. Calculate Threshold (i.e. MER)reference points
#3.1.1 Biomass (Bmer)
BTHR=spr.temp$Dep.MER
}
#3. Repeat if nonsense outputs (e.g. h<0.2, negative Biomass RP), i.e. obtain sensible joint priors
Trigger=0
if (length(spr.temp)==1|(BTHR*(1/Prop.FMsy))<0|(BTHR*Prop.FMsy)<0|BTHR<0)Trigger=1
Trigged[j]=Trigger
if(Trigger==1)
{ repeat
{
#3.1 draw random samples
A.MAX=A.max(AA[1],AA[2],AA[1])
age=0:A.MAX
if(Growth.var=="YES") GR=GROWTH(GROW[[1]],GROW[[2]],SIG)
if(Growth.var=="NO") GR=matrix(c(GROW[[1]],GROW[[2]]),ncol=2)
Rep=REP.PER(BF[2],BF[1])
A.MAT=AGE.MAT(AMat[1],AMat[2])
if(!spec.=="gummy")
{
mid.FL.fem=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #modified version
total.length=b.fem+a.fem*mid.FL.fem
}
if(spec.=="gummy")
{
total.length=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #gummy pars are fn(total length)
mid.FL.fem=total.length
}
Fec=FEC(spec.)
#3.2 put a cap on gummy Fecundity to avoid predicting beyond data range
if(spec.=="gummy")
{
Fec=ifelse(Fec>Max.fec[1],Max.fec[1],Fec)
Fec=ifelse(Fec<0,0,Fec)
}
mm=M.fun(A.MAX,GR[2],GR[1],Temper,A.MAT,TO,WT[1,1],WT[1,2],w.g,w.ng)
Sel.A=Select(alphabeta.S,alpha.S,beta.S)
#3.3 calculate SPR.mer quantities
spr.temp=SPR(A.MAX,mm,Fec,Rep,sex.ratio,A.MAT,Sel.A,0,"Y","ogive")
if(length(spr.temp)>1)
{
#3.3.1 Calculate reference points
#3.3.1.1 Threshold (i.e. MER)
#Biomass (Bmer)
BTHR=spr.temp$Dep.MER
}
#break if sensible joint prior
Trigger=0
if (length(spr.temp)==1|(BTHR*(1/Prop.FMsy))<0|(BTHR*Prop.FMsy)<0|BTHR<0)Trigger=1
if(Trigger==0)break
}
}
#store quantities of interest
SPR.out[[j]]=spr.temp
Store.M[[j]]=mm
Store.Sel[[j]]=Sel.A
Store.len[[j]]=mid.FL.fem
Store.Fec[[j]]=Fec
b.out[[j]]=BTHR
A.Max[[j]]=A.MAX
}
BTHR=unlist(b.out)
#Biomass limit (Blim = p x BTHR)
b.limt=Prop.FMsy*BTHR
b.limt=BTHR-(mean(BTHR)-mean(b.limt))
#Biomass target (Blim = p x BTHR)
b.targt=(1/Prop.FMsy)*BTHR
b.targt=BTHR+(mean(b.targt)-mean(BTHR))
for (j in 1:N.sim)
{
#3.1.2. F (Fmer)
#Set up useful vectors
spr.temp=SPR.out[[j]]
A.MAX=A.Max[[j]]
age=0:A.MAX
mm=Store.M[[j]]
Sel.A=Store.Sel[[j]]
fecundity=spr.temp$fecundity
maturity=spr.temp$maturity
lx=vector(length=length(age)) #no fishing
lz=vector(length=length(age)) #with fishing
Observed=spr.temp$SPR.mer
phieggs0=spr.temp$phi.o
fn.yield=function(Fe) EqPerRec(Fe=Fe)$epsilon
#numerical approximation
Fmer.fit <- nlminb( start=Fstart, objective=fn.yield, lower=0.0001, upper=5.0,
control=list(eval.max=500, iter.max=500 ) )
FTHR=Fmer.fit$par
FTHR.Conv=Fmer.fit$convergence
#Store convergence code
Convergence[[j]]=c(FTHR.Conv=FTHR.Conv)
#3.2.2 F limit
obs.biom <- b.limt[j]
Flim.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
FLim = Flim.fit$par
Flim.Conv = Flim.fit$convergence
#3.3. F Target
obs.biom <- b.targt[j]
FTar.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
FTar = FTar.fit$par
FTar.Conv = FTar.fit$convergence
# #5. Calculate F reference points for arbitrary SPR
# Observed=0.40 #target (Tsai et al 2011 page 1388)
# F40.fit=optimize(fn.yield,lower=0.0001,upper=5) #numerical approximation
# f.out.40[[j]]=F40.fit$minimum
#
# Observed=0.30 #limit
# F30.fit=optimize(fn.yield,lower=0.0001,upper=5) #numerical approximation
# f.out.30[[j]]=F30.fit$minimum
#
# #6. Calculate Fmsy proxy from Zhou et al 2012
# f.zhou[[j]]=Zhou(mean(mm))
#store
b.out[[j]]=c(BTHR[j],b.limt[j],b.targt[j])
f.out[[j]]=c(FTHR,FLim,FTar)
}
#STORE
Species.SPR.MER[[a]]=SPR.out
Species.M[[a]]=Store.M
Species.Fec[[a]]=Store.Fec
Species.Sel[[a]]=Store.Sel
Species.len[[a]]=Store.len
Species.B.MER[[a]]=b.out
Species.F.MER[[a]]=f.out
Species.F.est.Conv[[a]]=do.call(rbind,Convergence)
Species.F.40[[a]]=f.out.40
Species.F.30[[a]]=f.out.30
Species.F.zhou[[a]]=f.zhou
Species.Trigged[[a]]=Trigged
})
#Extract reference points (MSY proxies)
B.lim=FMER=BMER=B.thre=B.tar=F40=FZHOU=F30=Steep=SprMer=vector("list", length=N.sp)
names(B.lim) =names(BMER) =names(FMER)= names(B.tar)=names(B.thre)=names(Steep)=names(SprMer)<-species.names
for (i in 1:N.sp)
{
Blim=Bthre=Btar=f40=fZhou=f30=H=SPRMER=data.frame(title=rep(NA,N.sim))
Fmer=data.frame(Fmer=rep(NA,N.sim),Flim=rep(NA,N.sim),Ftar=rep(NA,N.sim))
Bmer=data.frame(Bmer=rep(NA,N.sim),Blim=rep(NA,N.sim),Btar=rep(NA,N.sim))
for (j in 1:N.sim)
{
#Biomass
Bmer[j,]=Species.B.MER[[i]][[j]]
#steepness
H[j,1]=Species.SPR.MER[[i]][[j]]$h
#SPR.mer
SPRMER[j,1]=Species.SPR.MER[[i]][[j]]$SPR.mer
#F
# f30[j,1]=Species.F.30[[i]][[j]]
Fmer[j,]=Species.F.MER[[i]][[j]]
# f40[j,1]=Species.F.40[[i]][[j]]
# fZhou[j,1]=Species.F.zhou[[i]][[j]]
}
#B.thre[[i]]=Bthre
#B.lim[[i]]=Blim
BMER[[i]]=Bmer
FMER[[i]]=Fmer
B.tar[[i]]=Btar
# FZHOU[[i]]=fZhou
# F40[[i]]=f40
# F30[[i]]=f30
Steep[[i]]=H
SprMer[[i]]=SPRMER
}
#Model checks
#1 .extract times priors had to be resampled (0= no resampling, 1= resampling)
TAB.Trigger=lapply(Species.Trigged,table)
names(TAB.Trigger)=species.names
#2. check convergence (0= converged)
TAB.converge=lapply(Species.F.est.Conv,table)
names(TAB.converge)=species.names
print(TAB.converge)
#3. check steepness and SPRmer
check.cols=1:4
par(mfcol=c(1,1),las=1,mai=c(.8,1,.075,.15),omi=c(.1,.1,.1,.05),mgp=c(1.5,0.95,0))
one=density(Steep[[1]][,1],adjust=2,na.rm =T,from=0.2,to=1)
two=density(Steep[[2]][,1],adjust=2,na.rm =T,from=0.2,to=1)
three=density(Steep[[3]][,1], adjust=2,na.rm =T,from=0.2,to=1)
four=density(Steep[[4]][,1], adjust=2,na.rm =T,from=0.2,to=1)
plot(one, type="l",lty=1,xlab="",lwd=2.5, yaxs="i",xaxs="i",ylab="",main="",
xlim=c(0.2,1),ylim=c(0, max(one$y,two$y,three$y,four$y)),cex.axis=1.5,cex.lab=1.65,col=check.cols[1])
lines(two, lty=1,col=check.cols[2],lwd=2.5)
lines(three, lty=1,col=check.cols[3],lwd=2.5)
lines(four, lty=1,col=check.cols[4],lwd=2.5)
legend("topright",species.names,lty=1,cex=1.5,bty="n",col=check.cols,lwd=2)
mtext("Density",side=2,line=-1.5,font=1,las=0,cex=1.3,outer =T)
mtext("Steepness",side=1,line=2.1,font=1,las=0,cex=1.3)
one=density(SprMer[[1]][,1],adjust=2,na.rm =T,from=0.2,to=1)
two=density(SprMer[[2]][,1],adjust=2,na.rm =T,from=0.2,to=1)
three=density(SprMer[[3]][,1], adjust=2,na.rm =T,from=0.2,to=1)
four=density(SprMer[[4]][,1], adjust=2,na.rm =T,from=0.2,to=1)
plot(one, type="l",lty=1,xlab="",lwd=2.5, yaxs="i",xaxs="i",ylab="",main="",
xlim=c(0.2,1),ylim=c(0, max(one$y,two$y,three$y)),cex.axis=1.5,cex.lab=1.65,col=check.cols[1])
lines(two, lty=1,col=check.cols[2],lwd=2.5)
lines(three, lty=1,col=check.cols[3],lwd=2.5)
lines(four, lty=1,col=check.cols[4],lwd=2.5)
legend("topright",species.names,lty=1,cex=1.5,bty="n",col=check.cols,lwd=2)
mtext("Density",side=2,line=-1.5,font=1,las=0,cex=1.3,outer =T)
mtext("SprMer",side=1,line=2.1,font=1,las=0,cex=1.3)
#4.check variability in growth, selectivity and M
par(mfcol=c(4,3),las=1,mai=c(.6,.5,.075,.15),omi=c(.1,.1,.1,.05),mgp=c(1.5,0.6,0))
fn.plot.1=function(DATA,LEG)
{ plot(DATA[[1]],col="transparent",ylab="",xlab="")
for (j in 1:N.sim) lines( DATA[[j]])
legend("topleft",LEG,bty='n',cex=.9)
}
for(p in 1:N.sp)fn.plot.1(Species.Sel[[p]],"Select")
for(p in 1:N.sp)fn.plot.1(Species.len[[p]],"Growth")
for(p in 1:N.sp)
{
fn.plot.1(Species.M[[p]],"M")
legend("topright",species.names[p],cex=1.5,bty="n")
}
#Stock satus assessment
# Whiskery.stock.status=Stock.depletion(abundance.species.list$Whiskery,depletion.species.list$Whiskery,
# Species.SPR.MER$Whiskery$Dep.MER,Species.SPR.MER$Whiskery$p)
#---REPORT SECTION----
SP.names=c("gummy shark","whiskery shark","dusky shark","sandbar shark")
setwd(handl_OneDrive("Analyses/Reference Points/Outputs"))
#setwd("C:/Matias/Analyses/Reference Points/Outputs/p_0.65")
#setwd("C:/Matias/Analyses/Reference Points/Outputs/p_0.85")
write.table(do.call(rbind,TAB.Trigger),"Trigged.times.csv",sep=",")
#1. Natural mortality
# Function for filling in ages to max age for all iterations
add.missing.age=function(Data,A)
{
test=vector('list',length=length(Data))
for(i in 1:length(Data))
{
datos=Data[[i]]
if(length(datos)<A)
{
extra=A-length(datos)
datos=c(datos,rep(NA,extra))
}
test[[i]]=datos
}
dummy=do.call(rbind,test)
return(dummy)
}
tiff(file="Figure1.tiff",width = 2400, height = 2400,units = "px", res = 300, compression = "lzw") #create tiff
par(mfcol=c(4,2),las=1,mai=c(.6,.6,.075,.1),omi=c(.1,.1,.1,.05),mgp=c(1.5,0.95,0))
for (j in 1:N.sp)
{
mSim=add.missing.age(Species.M[[j]],(Max.max.age[j]+1))
boxplot(mSim,col="gray60",ylab="",xaxt="n",cex.axis=1.5,cex.lab=1.5,outline=F,
border="gray10",varwidth=T,xlim=c(0,Max.max.age[j]))
axis(1,at=seq(1,Max.max.age[j]+1,1),labels=F,tck=-0.02)
axis(1,at=seq(1,Max.max.age[j],2),labels=seq(0,Max.max.age[j]-1,2),tck=-0.04,cex.axis=1.5)
# legend("topright",species.names[j],cex=1.5,bty="n")
##Calculate median and export value for population dynamics
colMedian=function(x) median(x,na.rm=T)
Median.M=apply(mSim,2,colMedian)
Median.M=Median.M[!is.na(Median.M)]
write.csv(cbind(Median.M,Age=seq(0,Max.max.age[j]-1,1)),
paste(handl_OneDrive("Data/Population dynamics/Parameter inputs for models/"),
names(Species.M)[j],".M_at_age.csv",sep=""),row.names=F)
}
mtext(expression(" Natural mortality " (year^-1)),side=2,line=-1.25,font=1,las=0,cex=1.2,outer=T)
mtext("Age",side=1,line=-2,font=1,las=0,cex=1.2,outer=T)
#dev.off()
#2. Selectivity
#2.1 Box plots
#tiff(file="Figure2.tiff",width = 2400, height = 2400,units = "px", res = 300, compression = "lzw") #create tiff
#par(mfcol=c(3,1),las=1,mai=c(.6,.6,.075,.15),omi=c(.1,.1,.1,.05),mgp=c(1.5,0.95,0))
for (j in 1:N.sp)
{
SelSim=add.missing.age(Species.Sel[[j]],(Max.max.age[j]+1))
boxplot(SelSim,col="gray60",ylab="",xaxt="n",cex.axis=1.5,cex.lab=1.5,
varwidth=T,outline=F,border="gray10",xlim=c(0,Max.max.age[j]))
if(j==3)mtext(" Relative selectivity",side=2,line=3,font=1,las=0,cex=1.2,outer=F)
axis(1,at=seq(1,Max.max.age[j]+1,1),labels=F,tck=-0.02)
axis(1,at=seq(1,Max.max.age[j],2),labels=seq(0,Max.max.age[j]-1,2),tck=-0.04,cex.axis=1.5)
if(!j==2)legend("topright",SP.names[j],cex=1.75,bty="n")
if(j==2)legend("bottomright",SP.names[j],cex=1.75,bty="n")
##Calculate median and export value for population dynamics
Median.Sel=apply(SelSim,2,colMedian)
Median.Sel=Median.Sel[!is.na(Median.Sel)]
write.csv(cbind(Median.Sel,Age=seq(0,Max.max.age[j]-1,1)),
paste(handl_OneDrive("Data/Population dynamics/Parameter inputs for models/"),
names(Species.M)[j],".Selectivity_at_age.csv",sep=""),row.names=F)
}
mtext("Age",side=1,line=-2,font=1,las=0,cex=1.2,outer=T)
dev.off()
#2.2 Mean behaviour
ext=seq(130,200,10)
fn.plot.sel=function(species,age,Lo,Linf,K,alphabeta,alpha,beta)
{
mid.FL.fem=Lo+(Linf-Lo)*(1-exp(-K*age)) #use modified version
sel.fem=((mid.FL.fem*10/alphabeta)^alpha)*(exp(alpha-(mid.FL.fem*10/beta)))
# plot(age,mid.FL.fem,pch=19,cex=1.5)
if(!(species =="sandbar shark"))
{
plot(mid.FL.fem,sel.fem,xlim=c(Lo,200), yaxs="i",xaxs="i",type='l',lwd=2,xlab="",
ylab="",cex.axis=1.5,ylim=c(0,1.1))
}
if(species =="sandbar shark")
{
plot(mid.FL.fem,sel.fem,xlim=c(Lo,200), yaxs="i",xaxs="i",type='l',lwd=2,xlab="Fork length (cm)",
ylab="",cex.lab=2,cex.axis=1.5,ylim=c(0,1.1))
}
if (species =="whiskery shark")
{
sel.fem.ext=((c(mid.FL.fem,ext)*10/alphabeta)^alpha)*(exp(alpha-(c(mid.FL.fem,ext)*10/beta)))
lines(c(mid.FL.fem,ext),sel.fem.ext,col=1,lwd=2)
}
if(!(species =="sandbar shark"))
{
plot(age,sel.fem,type='l', yaxs="i",xaxs="i",lwd=2,xlab="",ylab="",cex.axis=1.5,ylim=c(0,1.1))
}
if(species =="sandbar shark")
{
plot(age,sel.fem,type='l', yaxs="i",xaxs="i",lwd=2,xlab="Age",ylab="",cex.lab=2,cex.axis=1.5,ylim=c(0,1.1))
}
legend("topright",species,cex=1.5,bty="n")
}
par(mfrow=c(4,2),las=1,mai=c(.7,.6,.075,.15),omi=c(.1,.3,.1,.05))
for (a in 1:N.sp)
{
age=seq(0,max(species.list[[a]]$max.age),by=.1)
GR=Growth.pars[a,];ab=Sel.pars[a,1];al=Sel.pars[a,2];be=Sel.pars[a,3]
fn.plot.sel(species.names[a],age,Size.birth[a],GR[1,1],GR[1,2],ab,al,be)
}
mtext("Relative selectivity",side=2,line=0,font=1,las=0,cex=1.3,outer =T)
#Check fecundity
par(mfcol=c(4,1),las=1,mai=c(.6,.6,.075,.1),omi=c(.1,.1,.1,.05),mgp=c(1.5,0.95,0))
for (j in 1:N.sp)
{
FecSim=add.missing.age(Species.Fec[[j]],(Max.max.age[j]+1))
boxplot(FecSim,col="gray60",ylab="",xaxt="n",cex.axis=1.5,cex.lab=1.5,outline=F,
border="gray10",varwidth=T,xlim=c(0,Max.max.age[j]))
axis(1,at=seq(1,Max.max.age[j]+1,1),labels=F,tck=-0.02)
axis(1,at=seq(1,Max.max.age[j],2),labels=seq(0,Max.max.age[j]-1,2),tck=-0.04,cex.axis=1.5)
legend("topleft",SP.names[j],cex=1.75,bty="n")
}
mtext("Fecundity",side=2,line=-1.25,font=1,las=0,cex=1.2,outer=T)
mtext("Age",side=1,line=-2,font=1,las=0,cex=1.2,outer=T)
#3. SPR reference points
#3.1. B reference points
tiff(file="Figure2.tiff",width = 2400, height = 2400,units = "px", res = 300, compression = "lzw") #create tiff
par(mfcol=c(4,1),las=1,mai=c(.6,.5,.075,.15),omi=c(.1,.1,.1,.05),mgp=c(1.5,0.6,0))
for (i in 1:N.sp)
{
one=density(BMER[[i]][,2],adjust=2,na.rm =T,from=0,to=1)
two=density(BMER[[i]][,1],adjust=2,na.rm =T,from=0,to=1)
three=density(BMER[[i]][,3], adjust=2,na.rm =T,from=0,to=1)
plot(one, type="l",lty=1,col=1,xlab="",lwd=2.5, yaxs="i",xaxs="i",yaxt='n',
ylab="",main="",xlim=c(0,.7),ylim=c(0, max(one$y,two$y,three$y)*1.05),cex.axis=1.75,cex.lab=1.65)
lines(two, lty=1,col="gray60",lwd=2.5)
lines(three, lty=5,col="gray40",lwd=2.5)
legend("topleft",SP.names[i],cex=1.75,bty="n")
if(i==1)legend("topright",c("Limit","Threshold","Target"),lty=c(1,1,5),
cex=1.75,bty="n",col=c(1,"gray60","gray40"),lwd=2)
}
mtext("Density",side=2,line=-1.5,font=1,las=0,cex=1.5,outer =T)
mtext("Relative biomass",side=1,line=2.1,font=1,las=0,cex=1.5)
dev.off()
#3.2. F reference points
tiff(file="Figure3.tiff",width = 2400, height = 2400,units = "px", res = 300, compression = "lzw") #create tiff
par(mfcol=c(4,1),las=1,mai=c(.6,.5,.075,.15),omi=c(.1,.1,.1,.05),mgp=c(1.5,0.6,0))
XLim=c(.4,.4,.3,.1)
for (i in 1:N.sp)
{
one=density(FMER[[i]][,2],adjust=2,na.rm =T,from=0,to=1)
two=density(FMER[[i]][,1],adjust=2,na.rm =T,from=0,to=1)
three=density(FMER[[i]][,3], adjust=2,na.rm =T,from=0,to=1)
plot(one, type="l",lty=1,col=1,xlab="",lwd=2.5, yaxs="i",xaxs="i",yaxt='n',
ylab="",main="",xlim=c(0,XLim[i]),ylim=c(0, max(one$y,two$y,three$y)*1.05),cex.axis=1.75,cex.lab=1.65)
lines(two, lty=1,col="gray60",lwd=2.5)
lines(three, lty=5,col="gray40",lwd=2.5)
legend("topright",SP.names[i],cex=1.75,bty="n")
if(i==1)legend("topleft",c("Limit","Threshold","Target"),lty=c(1,1,5),
cex=1.75,bty="n",col=c(1,"gray60","gray40"),lwd=2)
}
mtext("Density",side=2,line=-1.5,font=1,las=0,cex=1.5,outer =T)
mtext(expression("Fishing mortality " (year^-1)),side=1,line=2.6,font=1,las=0,cex=1.5)
dev.off()
#Extract median and 95% CI
BLIM.list=BTHRE.list=BTARG.list=FMER.list=FLIM.list=FTAR.fin.list=FZHOU.list=
F40.list=F30.list=SprMer.list=Steep.list=Steep.list.percentile=vector('list',length=N.sp)
for (i in 1:N.sp)
{
this=BMER[[i]][,2]
BLIM.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
this=BMER[[i]][,1]
BTHRE.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
this=BMER[[i]][,3]
BTARG.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
this=FMER[[i]][,1]
FMER.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
this=FMER[[i]][,2]
FLIM.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
this=FMER[[i]][,3]
FTAR.fin.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
# this=FZHOU[[i]][,1]
# FZHOU.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
# CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
# this=F40[[i]][,1]
# F40.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
# CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
# this=F30[[i]][,1]
# F30.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
# CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
this=Steep[[i]][,1]
Steep.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
Steep.list.percentile[[i]]=quantile(this,probs=seq(0,1,0.05))
#export steepness for populatin dynamics
Steep.pop.dyn=round(c(mean=mean(this),sd=sd(this)),3)
write.csv(data.frame(t(Steep.pop.dyn)),paste(handl_OneDrive("Data/Population dynamics/Parameter inputs for models/"),
names(Species.M)[i],".Steepness.csv",sep=""),row.names=F)
this=SprMer[[i]][,1]
SprMer.list[[i]]=round(c(median=median(this),sixty.per=quantile(this,probs=0.6),
CI95=sort(this)[c(floor(0.025*length(this)),ceiling(0.975*length(this)))]),3)
}
write.csv(do.call(rbind,BLIM.list),"SPR.B.lim.csv",row.names=species.names)
write.csv(do.call(rbind,BTHRE.list),"SPR.B.thre.csv",row.names=species.names)
write.csv(do.call(rbind,BTARG.list),"SPR.B.tar.csv",row.names=species.names)
write.csv(do.call(rbind,FMER.list),"SPR.F.mer.csv",row.names=species.names)
write.csv(do.call(rbind,FLIM.list),"SPR.F.lim.csv",row.names=species.names)
write.csv(do.call(rbind,FTAR.fin.list),"SPR.F.tar.fin.csv",row.names=species.names)
#write.csv(do.call(rbind,F40.list),"SPR.F.40.csv",row.names=species.names)
#write.csv(do.call(rbind,F30.list),"SPR.F.30.csv",row.names=species.names)
#write.csv(do.call(rbind,FZHOU.list),"SPR.F.zhou.csv",row.names=species.names)
write.csv(do.call(rbind,SprMer.list),"SPR.mer.csv",row.names=species.names)
write.csv(do.call(rbind,Steep.list),"h.csv",row.names=species.names)
write.csv(do.call(rbind,Steep.list.percentile),"h_percentiles.csv",row.names=species.names)
#4. Distributions overlap
#4.1 Calculate densities overlaps and probabilities of being over BRP
Dens.over=function(LIM,THR,TAR,Estim,WHAT,Ley)
{
#Calculate original densities
Dens.lim=density(LIM,adjust=3,na.rm =T)
Dens.th=density(THR,adjust=3,na.rm =T)
Dens.tar=density(TAR,adjust=3,na.rm =T)
Estimate=density(Estim,adjust=3,na.rm =T)
#Calculate prob larger than median
Lim.med=median(LIM)
Thr.med=median(THR)
Tar.med=quantile(TAR,prob=0.2)
fn.Prob.larger<<-function(DAT,Point.Est.BRP)
{
Fn <- ecdf(DAT)
Prob=1-Fn(Point.Est.BRP)
return(Prob)
}
#B.Lim.prob=round(fn.Prob.larger(Estim,Lim.med),2) #median
#B.Thr.prob=round(fn.Prob.larger(Estim,Thr.med),2)
#B.Tar.prob=round(fn.Prob.larger(Estim,Tar.med),2)#20% percentile
#Calculate prob smaller than median
fn.Prob.smaller<<-function(DAT,Point.Est.BRP)
{
Fn <- ecdf(DAT)
Prob=Fn(Point.Est.BRP)
return(Prob)
}
B.Lim.prob=round(fn.Prob.smaller(Estim,Lim.med),2) #median
B.Thr.prob=round(fn.Prob.smaller(Estim,Thr.med),2)
B.Tar.prob=round(fn.Prob.smaller(Estim,Tar.med),2)#20% percentile
#Redo pairwise densities within same range
RANGO=range(Dens.lim$y, Dens.th$y,Dens.tar$y,Estimate$y)
fn.redo=function(A,B,C,D,YRANGE,LEG,LEG1,relative.to,whatprob,prob.larger,MED)
{
#plot
LWD=1.75
lim <- range(A$x, B$x)
BRP=density(C,adjust=3,na.rm =T,from=lim[1], to=lim[2])
estimate=density(D,adjust=3,na.rm =T,from=lim[1], to=lim[2])
plot(BRP, col=1, ylim=YRANGE,main="",lwd=LWD,ylab="",xlab="", yaxs="i",xaxs="i",cex.axis=1.5,xlim=c(0,1))
COL=rgb(70, 70, 70, alpha=95, maxColorValue=255)
#polygon(BRP$x, pmin(BRP$y, estimate$y), col= COL) #overlap
#lines(estimate, col="gray50",lwd=2.5)
#plot reference point
polygon(x=c(BRP$x, rev(BRP$x)), y=c(rep(0,length(BRP$x)), rev(BRP$y)), border=1, density=10,
col=1,lwd=LWD)
#plot biomass
polygon(x=c(BRP$x, rev(BRP$x)), y=c(rep(0,length(estimate$x)), rev(estimate$y)), border="gray40", density=10,
col="gray40", angle=135, lty=6,lwd=LWD)
#plot alpha overlap
IDs=match(round(MED,3),round(estimate$x,3)) #match MED in density
lines(c(BRP$x[IDs],BRP$x[IDs]),c(0,BRP$y[IDs]), col=1,lwd=LWD) #plot median
polygon(x=c(estimate$x[1:IDs], rev(estimate$x[1:IDs])), y=c(rep(0,IDs), rev(estimate$y[1:IDs])),
border=COL, col=COL)
#calculate overlapping area
NN=length(BRP$x)
BRP.pol=data.frame(PID=rep(1,(NN)),POS=1:(NN),X=c(BRP$x),Y=c(BRP$y))
Estim.pol=data.frame(PID=rep(1,(NN)),POS=1:(NN),X=c(estimate$x),Y=c(estimate$y))
Over.pol=data.frame(PID=rep(1,(NN)),POS=1:(NN),X=BRP$x, Y=pmin(BRP$y, estimate$y))
area.Overlap=calcArea (Over.pol, rollup = 3)
if(relative.to=="Estim")AREA=calcArea (Estim.pol, rollup = 3)
if(relative.to=="Tar")AREA=calcArea (BRP.pol, rollup = 3)
Per.over=area.Overlap*100/AREA
Per.over=abs(round(Per.over$area))
# polygon(Over.pol$X, Over.pol$Y, col=rgb(1,0,1,0.2))
greek <- c(LEG,LEG1,"tau","alpha")
other_stuff <- c('','',paste('=',Per.over,'%'),paste('=',prob.larger))
.expressions <- mapply(sprintf, greek, other_stuff,MoreArgs = list(fmt = '%s~"%s"'))
legend_expressions <-parse(text = .expressions)
legend("topright",legend=legend_expressions,
lty=c(1,6,0,0),cex=1.25,bty="n",col=c(1,"gray40"),
fill=c("transparent","transparent", "transparent","transparent"),lwd=LWD,merge = TRUE,
border=c("transparent","transparent", "transparent","transparent"))
# fill=c("transparent","transparent", COL,"transparent"),lwd=2,merge = TRUE,
# border=c("transparent","transparent", 1,"transparent"))
# abline(0,0,col="white",lwd=2)
# return(Overlap=Per.over)
}
Lim.Est=fn.redo(Dens.lim,Estimate,LIM,Estim,RANGO,"Limit",WHAT,"Estim","Limit",B.Lim.prob,Lim.med) #Limit Vs Estimate
legend("topleft",Ley,bty="n",cex=1.5)
Thr.Est=fn.redo(A=Dens.th,B=Estimate,C=THR,D=Estim,YRANGE=RANGO,LEG="Threshold",LEG1=WHAT,
relative.to="Estim",whatprob="Threshold",prob.larger=B.Thr.prob,MED=Thr.med) #Threshold Vs Estimate
Tar.Est=fn.redo(Dens.tar,Estimate,TAR,Estim,RANGO,"Target",WHAT,"Tar","Target",B.Tar.prob,Tar.med) #Target Vs Estimate
return(list(Lim.Est=Lim.Est,Thr.Est=Thr.Est,Tar.Est=Tar.Est))
}
tiff(file="Figure4.tiff",width = 2400, height = 2400,units = "px", res = 300, compression = "lzw") #create tiff
par(mfcol=c(3,2),las=1,mai=c(.6,.5,.075,.15),omi=c(.1,.1,.1,.05),mgp=c(1.5,0.6,0))
#happy place
Dummy=rnorm(10000, mean=0.55, sd=0.03)
B.overlap.nice=Dens.over(LIM=BMER[[3]][,2],THR=BMER[[3]][,1],TAR=BMER[[3]][,3],Estim=Dummy,WHAT="Bc",Ley="A")
#nasty place
Dummy2=rnorm(10000, mean=0.30, sd=0.03)
B.overlap.nasty=Dens.over(LIM=BMER[[3]][,2],THR=BMER[[3]][,1],TAR=BMER[[3]][,3],Estim=Dummy2,WHAT="Bc",Ley="B")
mtext("Relative biomass",1,line=-1,outer=T,cex=1.5)
mtext("Density",2,line=-1,outer=T,las=3,cex=1.5)
dev.off()
#4.2 Calculate probabilty of being above or below point estimate
B.Lim.prob.above=round(fn.Prob.larger(Dummy,median(BMER[[3]][,2])),2)
B.Thr.prob.above=round(fn.Prob.larger(Dummy,median(BMER[[3]][,1])),2)
B.Tar.prob.above=round(fn.Prob.larger(Dummy,quantile(BMER[[3]][,3],prob=0.2)),2)
B.Lim.prob.below=round(fn.Prob.smaller(Dummy,median(BMER[[3]][,2])),2)
B.Thr.prob.below=round(fn.Prob.smaller(Dummy,median(BMER[[3]][,1])),2)
B.Tar.prob.below=round(fn.Prob.smaller(Dummy,quantile(BMER[[3]][,3],prob=0.2)),2)
Fig4.prob.above=cbind(B.Lim.prob.above,B.Thr.prob.above,B.Tar.prob.above)
Fig4.prob.below=cbind(B.Lim.prob.below,B.Thr.prob.below,B.Tar.prob.below)
write.csv(Fig4.prob.above,"Fig4.prob.above.csv")
write.csv(Fig4.prob.below,"Fig4.prob.below.csv")
#5. SENSITIVITY TEST
#Manually change one parameter at a time to see effect. Don't change gummy, they are ok
#Life history par vectors (Gummy,Whiskery,Dusky,Sandbar)
#Min values
Min.max.age=c(16,15,40,30) #NEW (less variable for dusky)
#Min.max.age=c(16,15,40,30)
Min.breed.freq=c(1,0.5,0.5,0.5)
Min.age.mat=c(4,6,26,13) #NEW (less variable for dusky, sandbar)
#Min.age.mat=c(4,6,26,13)
Min.fec=c(1,14,2,10) #NEW (less variable for whiskery, dusky, sandbar)
#Min.fec=c(1,4,2,4)
#Max values
Max.max.age=c(21,20,56,40) #put +1 because using floor in random sample
Max.breed.freq=c(1,0.5,0.333,0.5) #NEW (less variable for dusky)
#Max.breed.freq=c(1,0.5,0.333,0.5)
Max.age.mat=c(6,8,36,13) #put +1 because using floor in random sample
Max.fec=c(31,28,18,10)
species.list <-vector("list", length(species.names))
names(species.list) <- species.names
pars.names <- c("max.age", "M","fec","breed.freq","age.mat")
pars.list <- vector("list", length(pars.names))
names(pars.list) <- pars.names
#Fill in species list of pars
for (i in 1:N.sp)
{
species.list[[i]]=list(max.age=c(Min.max.age[i],Max.max.age[i]),fec=c(Min.fec[i],Max.fec[i]),
breed.freq=c(Min.breed.freq[i],Max.breed.freq[i]),
age.mat=c(Min.age.mat[i],Max.age.mat[i]))
}
#Average water temperature
Temperature=c(18,18,18,24)
#Growth pars (female)
Linf.g=201.9
#Linf.w=120.7 #Simfendorfer et al 2000, age and growth
Linf.w=160 #Last & Stevens max size NEW
Linf.d=374.4
Linf.s=244.2
K.g=0.123
#K.w=0.288
K.w=0.369
K.d=.0367
K.s=.040
to.g=-1.55
to.w=-0.6
to.d=-3.3
to.s=-4.8
#Size at birth
Size.birth=c(33.5,25,75.3,42.5)
#FL to TL pars
b.g=NA
b.w=8.891
b.d=4.000
b.s=5.8188
a.g=NA
a.w=1.046
a.d=1.100
a.s=1.11262
#TL to TW pars
bwt.g=0.927e-6 #(reported as 0.927e-9 but I changed scale to match others)
bwt.w=0.0000163
bwt.d=1.2334e-5
bwt.s=0.000002
awt.g=3.206
awt.w=2.733
awt.d=2.855
awt.s=3.2069
#Selectivity
#gummy #Walker 2010
theta1=186
theta2=36695
mesh= 6.5 #(in inches, = 16.5 cm)
alphabeta.g=theta1*mesh
beta.g=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.g=alphabeta.g/beta.g
#whiskery #Simpfendorfer & Unsworth 1998
alpha.w=64.01339
beta.w=18.53164
alphabeta.w=alpha.w*beta.w
#dusky #Simpfendorfer & Unsworth 1998
theta1=130.13
theta2=29237
mesh= 6.5 #(in inches, = 16.5 cm)
alphabeta.d=theta1*mesh
beta.d=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.d=alphabeta.d/beta.d
#sandbar #McAuley et al 2005
theta1=135.58
theta2=117001
mesh= 6.5
alphabeta.s=theta1*mesh
beta.s=-0.5*(theta1*mesh-((theta1^2)*(mesh^2)+4*theta2)^0.5)
alpha.s=alphabeta.s/beta.s
#Put pars in data frame for loop
Growth.pars=data.frame(Linf=c(Linf.g,Linf.w,Linf.d,Linf.s),K=c(K.g,K.w,K.d,K.s),to=c(to.g,to.w,to.d,to.s))
Length.conv=data.frame(b=c(b.g,b.w,b.d,b.s),a=c(a.g,a.w,a.d,a.s))
Weight.conv=data.frame(bwt=c(bwt.g,bwt.w,bwt.d,bwt.s),awt=c(awt.g,awt.w,awt.d,awt.s))
Sel.pars=data.frame(alphabeta=c(alphabeta.g,alphabeta.w,alphabeta.d,alphabeta.s),
alpha=c(alpha.g,alpha.w,alpha.d,alpha.s),
beta=c(beta.g,beta.w,beta.d,beta.s))
#---PROCEDURE SECTION----
#1. Priors
#1.1. Natural mortality
#source external function
Wght.G=c(1,1,1,1) # weight given to methods using growth parameters (high uncertainty for whiskery so give lower weight)
Wght.noG=c(1,10,1,1) # weight given to methods not using growth parameters
#---MAIN SECTION----
N.sim=100
#1. Spawning Potential Ratio and derived methods
Species.SPR.MER=Species.B.MER=Species.F.MER=Species.F.est.Conv=Species.M=Species.Sel=Species.Fec=
Species.len=Species.F.40=Species.F.30=Species.F.zhou=Species.Trigged=vector("list", length=N.sp)
names(Species.SPR.MER)=names(Species.B.MER)=names(Species.F.MER)=names(Species.F.est.Conv)=
names(Species.M)=names(Species.Sel)=names(Species.Fec)=names(Species.F.40)=names(Species.F.30)=
names(Species.F.zhou)=names(Species.Trigged)=names(Species.len)=species.names
#Monte Carlo simulations
#note: obtain stock rec pars and biological reference points
#add or remove variability in growth pars
Growth.var="YES"
#Growth.var="NO"
#use Mean Fecundity for whiskery, dusky and sandbar
#USE.MEAN="YES" #for using mean
#USE.MEAN="Linear" #for using linear relation
USE.MEAN="NO" #for using random sample
system.time(for (a in 1:N.sp)
{
spec.=species.names[a]
WT=Weight.conv[a,]
GROW=Growth.pars[a,]
TO=Growth.pars[a,3]
SIG=SIGMA[[a]]
Lo=Size.birth[a]
AA=species.list[[a]]$max.age
FF=species.list[[a]]$fec
BF=species.list[[a]]$breed.freq
b.fem=Length.conv[a,1]
a.fem=Length.conv[a,2]
alphabeta.S=Sel.pars[a,1]
alpha.S=Sel.pars[a,2]
beta.S=Sel.pars[a,3]
sex.ratio=0.5
AMat=species.list[[a]]$age.mat
Temper=Temperature[a]
r=1
spawn.time = 0 # specify time of the year when spawning (or pupping) occurs as a fraction beteween 0 and 1
w.g=Wght.G[a]
w.ng=Wght.noG[a]
SPR.out=f.out=b.out=Store.M=Store.Fec=Convergence=Store.Sel=Store.len=f.out.30=f.out.40=
f.zhou=vector("list", length=N.sim)
#names(f.out)=c("threshold","limit","target")
Trigged=rep(NA,N.sim)
for (j in 1:N.sim)
{
#1. draw random samples of input parameters
A.MAX=A.max(AA[1],AA[2],AA[1])
age=0:A.MAX
if(Growth.var=="YES") GR=GROWTH(GROW[[1]],GROW[[2]],SIG)
if(Growth.var=="NO") GR=matrix(c(GROW[[1]],GROW[[2]]),ncol=2)
Rep=REP.PER(BF[2],BF[1])
A.MAT=AGE.MAT(AMat[1],AMat[2])
if(!spec.=="gummy")
{
mid.FL.fem=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #modified version
total.length=b.fem+a.fem*mid.FL.fem
}
if(spec.=="gummy")
{
total.length=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #gummy pars are fn(total length)
mid.FL.fem=total.length
}
Fec=FEC(spec.)
#put a cap on gummy Fecundity to avoid predicting beyond data range
if(spec.=="gummy")
{
Fec=ifelse(Fec>Max.fec[1],Max.fec[1],Fec)
Fec=ifelse(Fec<0,0,Fec)
}
mm=M.fun(A.MAX,GR[2],GR[1],Temper,A.MAT,TO,WT[1,1],WT[1,2],w.g,w.ng)
Sel.A=Select(alphabeta.S,alpha.S,beta.S)
#2. calculate SPR.mer quantities
spr.temp=SPR(A.MAX,mm,Fec,Rep,sex.ratio,A.MAT,Sel.A,0,"Y","ogive")
if(length(spr.temp)>1)
{
#3. Calculate reference points
#3.1. Threshold (i.e. MER)
#3.1.1 Biomass (Bmer)
BTHR=spr.temp$Dep.MER
#3.1.2. F (Fmer)
#Set up useful vectors
fecundity=spr.temp$fecundity
maturity=spr.temp$maturity
lx=vector(length=length(age)) #no fishing
lz=vector(length=length(age)) #with fishing
Observed=spr.temp$SPR.mer
phieggs0=spr.temp$phi.o
fn.yield=function(Fe) EqPerRec(Fe=Fe)$epsilon
#numerical approximation
Fstart=0.1
Fmer.fit <- nlminb( start=Fstart, objective=fn.yield, lower=0.0001, upper=5.0,
control=list(eval.max=500, iter.max=500 ) )
FTHR=Fmer.fit$par
FTHR.Conv=Fmer.fit$convergence
#3.2. Limits
#3.2.1. Biomass (Blim = p x BTHR)
B.lim=Prop.FMsy*BTHR
#3.2.2 F
obs.biom <- B.lim
b.f <- function(Fstart)
{
temp.spr = EqPerRec(Fstart)$SPR
temp.SSB =Find.B(spr.temp$alpha,R0=1,spr=temp.spr,spr0=1) #R0 and spr0 =1 as it's on relative scale
yy<-abs(temp.SSB - obs.biom )
return(yy)
}
Flim.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
FLim = Flim.fit$par
Flim.Conv = Flim.fit$convergence
#3.3. Targets
#3.3.1. Biomass (Blim = p x BTHR)
B.tar=(1/Prop.FMsy)*BTHR
#3.3.2 F
obs.biom <- B.tar
FTar.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
FTar = FTar.fit$par
FTar.Conv = FTar.fit$convergence
}
#3. Repeat if nonsense outputs (e.g. h<0.2, negative Biomass RP), i.e. obtain sensible joint priors
Trigger=0
if (length(spr.temp)==1|B.tar<0|B.lim<0|BTHR<0)Trigger=1
Trigged[j]=Trigger
if(Trigger==1)
{ repeat
{
#3.1 draw random samples
A.MAX=A.max(AA[1],AA[2],AA[1])
age=0:A.MAX
if(Growth.var=="YES") GR=GROWTH(GROW[[1]],GROW[[2]],SIG)
if(Growth.var=="NO") GR=matrix(c(GROW[[1]],GROW[[2]]),ncol=2)
Rep=REP.PER(BF[2],BF[1])
A.MAT=AGE.MAT(AMat[1],AMat[2])
if(!spec.=="gummy")
{
mid.FL.fem=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #modified version
total.length=b.fem+a.fem*mid.FL.fem
}
if(spec.=="gummy")
{
total.length=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #gummy pars are fn(total length)
mid.FL.fem=total.length
}
Fec=FEC(spec.)
#3.2 put a cap on gummy Fecundity to avoid predicting beyond data range
if(spec.=="gummy")
{
Fec=ifelse(Fec>Max.fec[1],Max.fec[1],Fec)
Fec=ifelse(Fec<0,0,Fec)
}
mm=M.fun(A.MAX,GR[2],GR[1],Temper,A.MAT,TO,WT[1,1],WT[1,2],w.g,w.ng)
Sel.A=Select(alphabeta.S,alpha.S,beta.S)
#3.3 calculate SPR.mer quantities
spr.temp=SPR(A.MAX,mm,Fec,Rep,sex.ratio,A.MAT,Sel.A,0,"Y","ogive")
if(length(spr.temp)>1)
{
#3.3.1 Calculate reference points
#3.3.1.1 Threshold (i.e. MER)
#Biomass (Bmer)
BTHR=spr.temp$Dep.MER
#F (Fmer)
#Set up useful vectors
fecundity=spr.temp$fecundity
maturity=spr.temp$maturity
lx=vector(length=length(age)) #no fishing
lz=vector(length=length(age)) #with fishing
Observed=spr.temp$SPR.mer
phieggs0=spr.temp$phi.o
fn.yield=function(Fe) EqPerRec(Fe=Fe)$epsilon
#numerical approximation
Fmer.fit <- nlminb( start=0.1 , objective=fn.yield, lower=0.0001, upper=5.0,
control=list(eval.max=500, iter.max=500 ) )
FTHR=Fmer.fit$par
FTHR.Conv=Fmer.fit$convergence
#3.2. Limits
#3.2.1. Biomass (Blim = p x BTHR)
B.lim=Prop.FMsy*BTHR
#3.2.2 F
obs.biom <- B.lim
b.f <- function(Fstart)
{
temp.spr = EqPerRec(Fstart)$SPR
temp.SSB =Find.B(spr.temp$alpha,R0=1,spr=temp.spr,spr0=1) #R0 and spr0 =1 as it's on relative scale
yy<-abs(temp.SSB - obs.biom )
return(yy)
}
Flim.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
FLim = Flim.fit$par
Flim.Conv = Flim.fit$convergence
#3.3. Targets
#3.3.1. Biomass (Blim = p x BTHR)
B.tar=(1/Prop.FMsy)*BTHR
#3.3.2 F
obs.biom <- B.tar
FTar.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
FTar = FTar.fit$par
FTar.Conv = FTar.fit$convergence
}
#break if sensible joint prior
Trigger=0
if (length(spr.temp)==1|B.tar<0|B.lim<0|BTHR<0)Trigger=1
if(Trigger==0)break
}
}
#store quantities of interest
SPR.out[[j]]=spr.temp
Store.M[[j]]=mm
Store.Sel[[j]]=Sel.A
Store.len[[j]]=mid.FL.fem
Store.Fec[[j]]=Fec
#4.5 Store all Bs and Fs
b.out[[j]]=c(BTHR,B.lim,B.tar)
f.out[[j]]=c(FTHR,FLim,FTar)
#4.6 Store convergence code
Convergence[[j]]=c(FTHR.Conv=FTHR.Conv)
#5. Calculate F reference points for arbitrary SPR
Observed=0.40 #target (Tsai et al 2011 page 1388)
F40.fit=optimize(fn.yield,lower=0.0001,upper=5) #numerical approximation
f.out.40[[j]]=F40.fit$minimum
Observed=0.30 #limit
F30.fit=optimize(fn.yield,lower=0.0001,upper=5) #numerical approximation
f.out.30[[j]]=F30.fit$minimum
#6. Calculate Fmsy proxy from Zhou et al 2012
f.zhou[[j]]=Zhou(mean(mm))
}
Species.SPR.MER[[a]]=SPR.out
Species.M[[a]]=Store.M
Species.Fec[[a]]=Store.Fec
Species.Sel[[a]]=Store.Sel
Species.len[[a]]=Store.len
Species.B.MER[[a]]=b.out
Species.F.MER[[a]]=f.out
Species.F.est.Conv[[a]]=do.call(rbind,Convergence)
Species.F.40[[a]]=f.out.40
Species.F.30[[a]]=f.out.30
Species.F.zhou[[a]]=f.zhou
Species.Trigged[[a]]=Trigged
})
#Extract reference points (MSY proxies)
FMER=BMER=vector("list", length=N.sp)
names(BMER) =names(FMER)= species.names
for (i in 1:N.sp)
{
Fmer=data.frame(Fmer=rep(NA,N.sim),Flim=rep(NA,N.sim),Ftar=rep(NA,N.sim))
Bmer=data.frame(Bmer=rep(NA,N.sim),Blim=rep(NA,N.sim),Btar=rep(NA,N.sim))
for (j in 1:N.sim)
{
Bmer[j,]=Species.B.MER[[i]][[j]]
Fmer[j,]=Species.F.MER[[i]][[j]]
}
BMER[[i]]=Bmer
FMER[[i]]=Fmer
}
#5.1. F reference points
#tiff(file="Figure.F.sensit.tiff",width = 2400, height = 2400,units = "px", res = 300, compression = "lzw") #create tiff
par(mfcol=c(4,1),las=1,mai=c(.6,.5,.075,.15),omi=c(.1,.1,.1,.05),mgp=c(1.5,0.6,0))
XLim=c(.4,.4,.3,.06)
for (i in 1:N.sp)
{
one=density(FMER[[i]][,2],adjust=2,na.rm =T,from=0,to=1)
two=density(FMER[[i]][,1],adjust=2,na.rm =T,from=0,to=1)
three=density(FMER[[i]][,3], adjust=2,na.rm =T,from=0,to=1)
plot(one, type="l",lty=1,col=1,xlab="",lwd=2.5, yaxs="i",xaxs="i",yaxt='n',
ylab="",main="",xlim=c(0,XLim[i]),ylim=c(0, max(one$y,two$y,three$y)*1.05),cex.axis=1.75,cex.lab=1.65)
lines(two, lty=1,col="gray60",lwd=2.5)
lines(three, lty=5,col="gray40",lwd=2.5)
legend("topright",SP.names[i],cex=1.75,bty="n")
if(i==1)legend("topleft",c("Limit","Threshold","Target"),lty=c(1,1,5),
cex=1.75,bty="n",col=c(1,"gray60","gray40"),lwd=2)
}
mtext("Density",side=2,line=-1.5,font=1,las=0,cex=1.5,outer =T)
mtext(expression("Fishing mortality " (year^-1)),side=1,line=2.6,font=1,las=0,cex=1.5)
#dev.off()
#################################################LIZ BROOK'S###################
#2.2 Spawners per recruit (Liz Brooks)
s.per.recr<-function(nages,fec.age,mat.age,M.age, F.mult, sel.age, spawn.time )
{
spr=0.0
cum.survive=1.0
z=0.0
for (i in 1:(nages-1) ) {
z=M.age[i] + F.mult*sel.age[i]
z.ts=(M.age[i]+F.mult*sel.age[i])*spawn.time
spr=spr+cum.survive*fec.age[i]*mat.age[i]*exp(-z.ts)
cum.survive=cum.survive*exp(-z )
}
z= M.age[nages] + F.mult*sel.age[nages]
z.ts=(M.age[nages]+F.mult*sel.age[nages])*spawn.time
spr=spr + fec.age[nages]*mat.age[nages]*cum.survive*exp(-z.ts)/( 1- exp(-z ) )
return(spr)
}
#2.3 Yield per recruit (Liz Brooks)
ypr<-function(nages, wgt.age, M.age, F.mult, sel.age )
{
yield=0.0
cum.survive=1.0
z=0.0
for (i in 1:(nages-1) ) {
z=M.age[i] + F.mult*sel.age[i]
yield=yield + wgt.age[i]*F.mult*sel.age[i]*(1-exp(-z) )*cum.survive/z
cum.survive=cum.survive*exp(-z)
}
z= M.age[nages] + F.mult*sel.age[nages]
yield=yield + wgt.age[nages]*F.mult*sel.age[nages]*cum.survive/z
return(yield)
}
#2.4 Equilibrium SSB (includes Ricker option) (Liz Brooks)
ssb.eq<-function(recr.par, R0, spr, spr0, is.steepness=T, SRtype )
{
#SRtype=1 is BH
#SRtype=2 is Ricker
sprF=spr/spr0
if (SRtype==1) {
if (is.steepness==T) alpha.hat <- 4*recr.par/(1-recr.par)
if (is.steepness==F) alpha.hat <- recr.par
ssb=spr0*R0*(sprF*alpha.hat - 1.0)/(alpha.hat-1.0)
}
return(ssb)
}
############################################
########################################## EXAMPLE USED TO SET UP SCRIPT ############################
#1.1 life history
#Dusky (Brooks et al 2010)
#ages
age.min=0
age.max=40
age=age.min:age.max
r=1 #age at recruitment
first=match(r,age)
last=match(age.max,age)
Numb.pups=7.1 #mean number of pups
cycle.length=3 #reproductive cycle length
sex.ratio=0.5 #embryo sex ratio
FEc= Numb.pups*sex.ratio/cycle.length #annual female fecundity
#fecundity at age (number of female pups per year)
fec=c(rep(0,21),rep(FEc,length(age)-21)) #NEW
#proportion mature at age
mu=c(rep(0,11),.001,.002,.006,.013,.03,.062,.124,.226,.37,.535,.687,.803,.881,.929,.958,.975,.985,
.991,.994,.996,.998,.998,.999,.999,rep(1,6))
#survivorship
surv=c(.78,.865,.875,.883,.889,.894,.899,.903,.907,.91,.913,.915,.918,.919,.922,.923,.925,.926,.927,.928,
.93,.93,.932,.933,.934,.934,.935,.935,.936,.937,.937,.938,.939,.939,.939,.94,.94,.94,.94,.941,
.942)
m=-log(surv)
#compounded survival
surv.compounded=cumprod(c(1,surv[first:(last)])) #NEW
#unexploited spawners per recruit
phi.o=sum(fec*mu*surv.compounded) #NEW
#maximum lifetime reproductive rate at low density
alpha=phi.o*surv[1]
#steepness
h=alpha/(4+alpha)
#Spawning potential ratio at maximum excess recruitment (MER) (Beverton-Holt relationship)
SPR.mer=1/alpha^0.5
#Reference point assessment
#1. Stock status
#Depletion at MER (Proportional reduction from unexploited level)
Reference.depletion.MER=((alpha^0.5)-1)/(alpha-1) #this is the optimal level of depletion (life history dependent!)
#Abundance index
Index.1974=2.197; Index.2003=0.24; Depletion.in.1974=0.8
Scaled.relative.index.2003=Depletion.in.1974*(Index.2003/Index.1974)
#Overfished threshold
#this is some proportion of Reference.depletion.MER
p=max(c(0.5,(1-max(m))))
#Stock status
if(Scaled.relative.index.2003<(p*Reference.depletion.MER)){Stock.status="Overfished"}else
{Stock.status="Not overfished"}
#}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
###############
### added may 2014 by liz
### comparing predicted analytic MER (which assumes maturity=1 from the first age and
### selectivity =1 for all ages) to numerically estimated MER
###
wd<-("C:/liz_temp/papers/matias_sharks/liz_R/")
setwd(wd)
### dummy parameter values (recycled from above)
#Dusky (Brooks et al 2010)
#ages
age.min=0
age.max=40
age=age.min:age.max
nages <- length(age)
r=1 #age at recruitment
first=match(r,age)
last=match(age.max,age)
Numb.pups=7.1 #mean number of pups
cycle.length=3 #reproductive cycle length
sex.ratio=0.5 #embryo sex ratio
FEc= Numb.pups*sex.ratio/cycle.length #annual female fecundity
spawn.time <- 0 # value between 0 and 1 to decrement survival until timing of spawning
#some fake selectivity parameters to make a dome
a50.up <- 1
slope.up <- 2
a50.down <- 4
slope.down <- 0.4
sel1 <- ( 1/(1+exp(-(age-a50.up)/slope.up) ) )*(1- 1/(1++exp(-(age-a50.down)/slope.down) ) )
sel <- sel1/max(sel1)
#weight at age (dummy values for a and b in l-w eqn)
Linf <- 374
K <- 0.0367
t0 <- -3.3
wgt.age <- 1.5e-6*(Linf*(1-exp(-K*(age[2:nages]-t0)) ) )^2.8
#fecundity at age (number of female pups per year)
fec=c(rep(0,21),rep(FEc,length(age)-21)) #NEW
#proportion mature at age
mu=c(rep(0,11),.001,.002,.006,.013,.03,.062,.124,.226,.37,.535,.687,.803,.881,.929,.958,.975,.985,
.991,.994,.996,.998,.998,.999,.999,rep(1,6))
#survivorship
surv=c(.78,.865,.875,.883,.889,.894,.899,.903,.907,.91,.913,.915,.918,.919,.922,.923,.925,.926,.927,.928,
.93,.93,.932,.933,.934,.934,.935,.935,.936,.937,.937,.938,.939,.939,.939,.94,.94,.94,.94,.941,
.942)
m=-log(surv)
#compounded survival
surv.compounded=cumprod(c(1,surv[first:(last)])) #NEW
#unexploited spawners per recruit
phi.o=sum(fec*mu*surv.compounded) #NEW
#maximum lifetime reproductive rate at low density
alpha=phi.o*surv[1]
#steepness
steepness=alpha/(4+alpha)
#-------Spawners per recruit -----------------------------
s.per.recr<-function(nages,fec.age,mat.age,M.age, F.mult, sel.age, spawn.time ) {
spr=0.0
cum.survive=1.0
z=0.0
for (i in 1:(nages-1) ) {
z=M.age[i] + F.mult*sel.age[i]
z.ts=(M.age[i]+F.mult*sel.age[i])*spawn.time
spr=spr+cum.survive*fec.age[i]*mat.age[i]*exp(-z.ts)
cum.survive=cum.survive*exp(-z )
}
z= M.age[nages] + F.mult*sel.age[nages]
z.ts=(M.age[nages]+F.mult*sel.age[nages])*spawn.time
spr=spr + fec.age[nages]*mat.age[nages]*cum.survive*exp(-z.ts)/( 1- exp(-z ) )
return(spr)
}
#-------Yield per recruit -----------------------------
ypr<-function(nages, wgt.age, M.age, F.mult, sel.age ) {
yield=0.0
cum.survive=1.0
z=0.0
for (i in 1:(nages-1) ) {
z=M.age[i] + F.mult*sel.age[i]
yield=yield + wgt.age[i]*F.mult*sel.age[i]*(1-exp(-z) )*cum.survive/z
cum.survive=cum.survive*exp(-z)
}
z= M.age[nages] + F.mult*sel.age[nages]
yield=yield + wgt.age[nages]*F.mult*sel.age[nages]*cum.survive/z
return(yield)
}
#------------------------------------
#-------Equilibrium SSB ------------------------
ssb.eq<-function(recr.par, R0.BH, spr, spr0, is.steepness=T ) {
if (is.steepness==T) alpha.BH <- 4*recr.par/(1-recr.par)
if (is.steepness==F) alpha.BH <- recr.par
sprF=spr/spr0
ssb=spr0*R0.BH*(sprF*alpha.BH - 1.0)/(alpha.BH-1.0)
return(ssb)
}
#------------------------------------
bev.holt.alpha<-function(S,R0,recr.par,spr0, is.steepness=T){
if (is.steepness==T) alpha.BH <- 4*recr.par/(1-recr.par)
if (is.steepness==F) alpha.BH <- recr.par
y=rep(0,length(S))
y=R0*S*alpha.BH/(R0*spr0 +(alpha.BH-1.0)*S)
return(y)
}
##============================================================================================
##---- Calculate Analytical SPR.MER, B.MER, and numerically solve for F.MER ---- #
R0 <- 1 #put unfished recruitment at 1 (everything on relative scale)
sr0 <- 1 #unfished spawners per recruit at 1 (relative scale)
sr0.calc <- s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=rep(1, (nages-1)), M.age= m[first:(last)], F.mult=0, sel.age=rep(1, (nages-1)), spawn.time=spawn.time)
#1. Spawning potential ratio at maximum excess recruitment (MER) (Beverton-Holt relationship)
SPR.MER=1/alpha^0.5
#2. Depletion at MER (Proportional reduction from unexploited level)
B.MER=((alpha^0.5)-1)/(alpha-1) #this is the optimal level of depletion (life history dependent!)
#3. Solve numerically for F.MER
#(technically, should fix maturity=1 and selectivity=1 at all ages)
# also, yield is maximized in NUMBERS, not WEIGHT, so calculating YPR with a vector of 1 for the weight at age
F.start=0.12
t.spr <- SPR.MER # the SPR that we want to match by finding an F that produces it
spr.f <- function(F.start) {
abs(s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=rep(1, (nages-1)), M.age= m[first:(last)], F.mult=F.start, sel.age=rep(1, (nages-1)), spawn.time=spawn.time)/sr0.calc - t.spr )
}
yyy <- nlminb(start=F.start, objective=spr.f, lower=0, upper=3)
F.MER <- yyy$par #find F that matches SPR.MER
#4. Yield per recruit at MER (in numbers)
# calculated with F that matches SPR
t.ypr<- ypr((nages-1), wgt.age=rep(1, (nages-1)), M.age=m[first:(last)], F.mult=F.MER, sel.age=rep(1, (nages-1)) )
t.ssb <-ssb.eq( recr.par=alpha, R0=R0, spr=t.spr, spr0=sr0, is.steepness=F)
Y.MER <- t.ypr*t.ssb/t.spr
#5. Let B.Lim = p*B.MER
p <- 0.75
B.Lim <- p*B.MER
#6. calculate corresponding F.MER that produces B.Lim
t.b.ref <- B.Lim # define a level of B/B0 that we want to match with an F
b.f <- function(F.start) {
temp.spr = s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=rep(1, (nages-1)), M.age= m[first:(last)], F.mult=F.start, sel.age=rep(1, (nages-1)), spawn.time=spawn.time)/sr0.calc
temp.SSB = ssb.eq( recr.par=alpha, R0=R0, spr=temp.spr, spr0=sr0, is.steepness=F)
yy<-abs(temp.SSB - t.b.ref )
return(yy)
}
yyy <- nlminb(start=F.start, objective=b.f, lower=0, upper=3)
F.Lim <- yyy$par
#7. Yield per recruit at B.Lim (in numbers)
t.ypr<- ypr((nages-1), wgt.age=rep(1, (nages-1)), M.age=m[first:(last)], F.mult=F.Lim, sel.age=rep(1, (nages-1)) )
SPR.Lim <- s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=rep(1, (nages-1)), M.age= m[first:(last)], F.mult=F.Lim, sel.age=rep(1, (nages-1)), spawn.time=spawn.time)/sr0.calc
Y.Lim <- t.ypr*B.Lim/SPR.Lim
#8. Let B.Tar = 1/p*B.MER
B.Tar <- 1/p*B.MER
#9. calculate F.Tar that produces B.Tar
t.b.ref <- B.Tar # define a level of B/B0 that we want to match with an F
b.f <- function(F.start) {
temp.spr = s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=rep(1, (nages-1)), M.age= m[first:(last)], F.mult=F.start, sel.age=rep(1, (nages-1)), spawn.time=spawn.time)/sr0.calc
temp.SSB = ssb.eq( recr.par=alpha, R0=R0, spr=temp.spr, spr0=sr0, is.steepness=F)
yy<-abs(temp.SSB - t.b.ref )
return(yy)
}
yyy <- nlminb(start=F.start, objective=b.f, lower=0, upper=3)
F.Tar <- yyy$par
#10. Calculate YPR at B.Tar
t.ypr<- ypr((nages-1), wgt.age=rep(1, (nages-1)), M.age=m[first:(last)], F.mult=F.Tar, sel.age=rep(1, (nages-1)) )
SPR.Tar <- s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=rep(1, (nages-1)), M.age= m[first:(last)], F.mult=F.Tar, sel.age=rep(1, (nages-1)), spawn.time=spawn.time)/sr0.calc
Y.Tar <- t.ypr*B.Tar/t.spr
MER.analytic <- matrix(NA, nrow=12, ncol=1)
rownames(MER.analytic) <- c("SPR.MER", "B.MER", "F.MER", "Y.MER",
"SPR.Lim", "B.Lim", "F.Lim", "Y.Lim",
"SPR.Tar", "B.Tar", "F.Tar", "Y.Tar")
MER.analytic[1] <- SPR.MER
MER.analytic[2] <- B.MER
MER.analytic[3] <- F.MER
MER.analytic[4] <- Y.MER
MER.analytic[5] <- SPR.Lim
MER.analytic[6] <- B.Lim
MER.analytic[7] <- F.Lim
MER.analytic[8] <- Y.Lim
MER.analytic[9] <- SPR.Tar
MER.analytic[10] <- B.Tar
MER.analytic[11] <- F.Tar
MER.analytic[12] <- Y.Tar
##============================================================================================
##---- Calculate all MER reference points NUMERICALLY ---------------------------------------
R0 <- 1 #put unfished recruitment at 1 (everything on relative scale)
sr0 <- 1 #unfished spawners per recruit at 1 (relative scale)
sr0.calc <- s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=mu[first:(last)], M.age= m[first:(last)], F.mult=0, sel.age=sel, spawn.time=spawn.time)
# 1.Numeric F.MER
# Given observed maturity, selectivity, find F that maximizes yield
# (still use weight at age = vector of 1s since maximizing yield in numbers)
F.start=0.12
get.yield.f.min <- function(F.start) {
temp.spr = s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=mu[first:(last)], M.age= m[first:(last)], F.mult=F.start, sel.age=sel[first:(last)], spawn.time=spawn.time)/sr0.calc
temp.ypr = ypr(nages=(nages-1), wgt.age=rep(1, (nages-1)), M.age=m[first:(last)], F.mult=F.start, sel.age=sel[first:(last)] )
temp.SSB = ssb.eq( recr.par=alpha, R0=R0, spr=temp.spr, spr0=sr0, is.steepness=F)
yield = temp.ypr*temp.SSB/temp.spr #harvest in weight
yy=-1*yield
return(yy)
} # end get.yield.f.min function
F.nlmin <- nlminb( start=F.start , objective=get.yield.f.min, lower=0.001, upper=3.0,
control=list(eval.max=500, iter.max=500 ) )
F.MER.num <- F.nlmin$par #optimize yield in numbers
#2. Numeric SPR.MER
SPR.MER.num <- s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=mu[first:(last)], M.age= m[first:(last)], F.mult=F.MER.num, sel.age=sel[first:(last)], spawn.time=spawn.time)/sr0.calc
#3. Numeric B.MER
B.MER.num <- ssb.eq( recr.par=alpha, R0=R0, spr=SPR.MER.num, spr0=sr0, is.steepness=F)
#4. Numeric Y.MER
Y.MER.num <- (B.MER.num/SPR.MER.num)*ypr(nages=(nages-1), wgt.age=rep(1, (nages-1)), M.age=m[first:(last)], F.mult=F.MER.num, sel.age=sel[first:(last)] )
#5. Next, define B.Lim as p*B.MER
B.Lim.num <- p*B.MER.num
#6. Find F.Lim.num that produces B.Lim.num
t.b.ref <- B.Lim.num # define a level of B/B0 that we want to match with an F
b.f <- function(F.start) {
temp.spr = s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=mu[first:(last)], M.age= m[first:(last)], F.mult=F.start, sel.age=sel[first:(last)], spawn.time=spawn.time)/sr0.calc
temp.SSB = ssb.eq( recr.par=alpha, R0=R0, spr=temp.spr, spr0=sr0, is.steepness=F)
yy<-abs(temp.SSB - t.b.ref )
return(yy)
}
yyy <- nlminb(start=F.start, objective=b.f, lower=0, upper=3)
F.Lim.num <- yyy$par
#7. Yield per recruit at B.Lim (in numbers)
t.ypr<- ypr((nages-1), wgt.age=rep(1, (nages-1)), M.age=m[first:(last)], F.mult=F.Lim.num, sel.age=sel[first:(last)] )
SPR.Lim.num <- s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=mu[first:(last)], M.age= m[first:(last)], F.mult=F.Lim.num, sel.age=sel[first:(last)], spawn.time=spawn.time)/sr0.calc
Y.Lim.num <- t.ypr*B.Lim.num/SPR.Lim.num
#8. Let B.Tar = 1/p*B.MER
B.Tar.num <- 1/p*B.MER
#9. calculate F.Tar that produces B.Tar
t.b.ref <- B.Tar.num # define a level of B/B0 that we want to match with an F
b.f <- function(F.start) {
temp.spr = s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=mu[first:(last)], M.age= m[first:(last)], F.mult=F.start, sel.age=sel[first:(last)], spawn.time=spawn.time)/sr0.calc
temp.SSB = ssb.eq( recr.par=alpha, R0=R0, spr=temp.spr, spr0=sr0, is.steepness=F)
yy<-abs(temp.SSB - t.b.ref )
return(yy)
}
yyy <- nlminb(start=F.start, objective=b.f, lower=0, upper=3)
F.Tar.num <- yyy$par
#10. Calculate YPR at B.Tar
t.ypr<- ypr((nages-1), wgt.age=rep(1, (nages-1)), M.age=m[first:(last)], F.mult=F.Tar.num, sel.age=sel[first:(last)] )
SPR.Tar.num <- s.per.recr(nages=(nages-1), fec.age=fec[first:(last)], mat.age=mu[first:(last)], M.age= m[first:(last)], F.mult=F.Tar.num, sel.age=sel[first:(last)], spawn.time=spawn.time)/sr0.calc
Y.Tar.num <- t.ypr*B.Tar.num/SPR.Tar.num
MER.numeric <- matrix(NA, nrow=12, ncol=1)
rownames(MER.numeric) <- c("SPR.MER", "B.MER", "F.MER", "Y.MER",
"SPR.Lim", "B.Lim", "F.Lim", "Y.Lim",
"SPR.Tar", "B.Tar", "F.Tar", "Y.Tar")
MER.numeric[1] <- SPR.MER.num
MER.numeric[2] <- B.MER.num
MER.numeric[3] <- F.MER.num
MER.numeric[4] <- Y.MER.num
MER.numeric[5] <- SPR.Lim.num
MER.numeric[6] <- B.Lim.num
MER.numeric[7] <- F.Lim.num
MER.numeric[8] <- Y.Lim.num
MER.numeric[9] <- SPR.Tar.num
MER.numeric[10] <- B.Tar.num
MER.numeric[11] <- F.Tar.num
MER.numeric[12] <- Y.Tar.num
MER.comparison <- cbind(MER.analytic[,1], MER.numeric[,1])
colnames(MER.comparison) <- c("Analytic", "Numeric")
write.csv(x=MER.comparison, file=paste(wd,"Outputs/","MER.comparison.csv", sep=""))
##################################################################################################
# #Not used
#
# #3. Per recruit analysis
# Fe=seq(0,1, by=0.001) #fishing vector
# SPR.LIM=c(.5,.6,.6) #limit F%SPR
#
# A.MAX=A.max(AA[1],AA[2],AA[1])
#
#
#
#
#
#
# #Outputs
# par(mfcol=c(3,1),mai=c(1,.8,.2,.1))
# #YPR
# plot(Fe,FeYield,type='l',ylab="Yield per recruit (kg)",xlab="Fishing mortality")
# arrows(Fmax,fn.yield(Fmax)*.92,Fmax,fn.yield(Fmax),col=2,length=.1)
# text(Fmax,fn.yield(Fmax)*.9,paste("Fmax=",Fmax),col=2)
#
#
# #Potential ratio
# plot(Fe,SPR,type='l',ylab="Potential ratio",xlab="Fishing mortality",ylim=c(0,1),
# yaxs="i",xaxs="i",las=1)
# lines(Fe,EPR,col=2)
# legend('topright',c("SPR","EPR"),bty='n',col=1:2,lty=1)
#
#
#
# #F limit
# arrows(FLim,fn.Lim(FLim),FLim,.04,col=2,length=.1)
# arrows(0,fn.Lim(FLim),FLim,fn.Lim(FLim),col=2,length=0)
# text(FLim,.03,FLim,col=2)
# text(.065,SPR.lim*1.05,paste(SPR.lim*100,"%SPR",sep=""),col=2)
#
#
# #4. Compare empirical F (from McAuley et al 2007) with derived Fbrp
# Store.sel=Store.age=list(Whiskery=NA,Dusky=NA,Sandbar=NA)
# fn.get.sel=function(species,age,Lo,Linf,K,alphabeta,alpha,beta)
# {
# mid.FL.fem=Lo+(Linf-Lo)*(1-exp(-K*age)) #use modified version
# sel.fem=((mid.FL.fem*10/alphabeta)^alpha)*(exp(alpha-(mid.FL.fem*10/beta)))
# return(sel.fem=sel.fem)
# }
#
# for (a in 1:N.sp)
# {
# if(!a==3)age=seq(0,max(species.list[[a]]$max.age),by=1)
# if(a==3) age=seq(0,max(species.list[[a]]$max.age),by=.1)
# GR=Growth.pars[a,];ab=Sel.pars[a,1];al=Sel.pars[a,2];be=Sel.pars[a,3]
# Store.sel[[a]]=fn.get.sel(species.names[a],age,Size.birth[a],GR[1,1],GR[1,2],ab,al,be)
# Store.age[[a]]=age
# }
#
# FMER=c(0.058,0.017)
# tiff(file="Compare_Fs.tiff",width = 2400, height = 2400,units = "px", res = 300, compression = "lzw") #create tiff
#
# par(mfcol=c(2,1),mai=c(1.3,1.3,.1,.1))
# for (a in 2:N.sp)
# {
# nn=1:length(Empirc.F[[a-1]])
# if(!a==3)
# {
# plot(as.numeric(names(Empirc.F[[a-1]])),Empirc.F[[a-1]],pch=19,cex=2,col=2,
# ylab="F at age",xlab="age",cex.lab=1.5,xaxt="n", yaxs="i",xaxs="i")
# points(FMER[a-1]*Store.sel[[a]][nn],pch=19,col=3,cex=2)
# legend("right",c("Empirical","FThr"),pch=19,col=2:3,cex=1.25,bty="n")
# axis(1,at=as.numeric(names(Empirc.F[[a-1]])),
# labels=paste(as.numeric(names(Empirc.F[[a-1]])),"+",sep=""),tck=-0.04,cex.axis=1.25)
# }
# if(a==3)
# {
# plot( nn,Empirc.F[[a-1]],pch=19,cex=2,col=2,ylab="F at age",xlab="age",cex.lab=1.5,xaxt="n")
# rango=list(seq(0,2.9,.1),seq(3,5.9,.1),seq(6,8.9,.1),seq(9,11.9,.1),seq(12,14.9,.1),
# seq(15,17.9,.1),seq(18,24,.1),25,26,27,28,29,30)
# Mean.Sel=length(rango)
# for(p in 1:length(rango))
# {
# id=match(round(rango[[p]],1),round(Store.age[[a]],1))
# Mean.Sel[p]=mean(Store.sel[[a]][id])
# }
# points(FMER[a-1]*Mean.Sel,pch=19,col=3,cex=2)
# axis(1,at=nn,labels=names(Empirc.F[[a-1]]),tck=-0.04,cex.axis=1)
#
# }
# legend("topright",species.names[a],cex=1.5,bty="n")
# }
# dev.off()
#
#
# EqPerRec=function(Fe=0)
# {
# lx[1]=1 # Unfished survival at age 0
# lz[1]=1 # Fished survival at age 0
#
# for(i in 2:length(age))
# {
# lx[i]=lx[i-1]*exp(-mm[i]) #Unfished survivorship curve
# lz[i]=lz[i-1]*exp(-mm[i]-(Fe*Sel.A[i-1])) #Fished survivorship curve
# }
#
# #Unfished eggs
# phieggs0=sum(lx*ma*fec) #total number of eggs per recruit
#
# #Fished eggs
# phieggs=sum(lz*ma*fec) #total number of eggs per recruit
#
# #Spawning potential ratio
# SPR=phieggs/phieggs0
#
# #Objective function
# epsilon=(Observed-SPR)^2 #objective function
#
#
# return(list(phieggs0=phieggs0,phieggs=phieggs,epsilon=epsilon,SPR=SPR))
# }
# BTHR=spr.temp$Dep.MER
#
# #4. calculate F reference points for MER
# #4.1. Set up useful vectors
# #fecundity
# fecundity=spr.temp$fecundity
#
# #maturity
# maturity=spr.temp$maturity
#
# #survivorship vectors
# lx=vector(length=length(age)) #no fishing
# lz=vector(length=length(age)) #with fishing
#
# #4.2 F.THRe (F.mer)
# Observed=spr.temp$SPR.mer
# phieggs0=spr.temp$phi.o
# fn.yield=function(Fe) EqPerRec(Fe=Fe)$epsilon
# #numerical approximation
# Fmer.fit <- nlminb( start=0.1 , objective=fn.yield, lower=0.0001, upper=5.0,
# control=list(eval.max=500, iter.max=500 ) )
# FTHR=Fmer.fit$par
# FTHR.Conv=Fmer.fit$convergence
#
#
# #4.3 Target reference points
# #first find SPR from FLim
# Flim.man.par=1/Prop.FMsy
# FLim=FTHR*Flim.man.par
# SPR.lim=EqPerRec(FLim)$SPR
#
# #then find B from SPR assuming B-H
# B.lim=Find.B(spr.temp$alpha,R0=1,spr=SPR.lim,spr0=1)
#
# # #4.3 Limit reference points
# # #first find SPR from BLIMIT
# # BLIMIT=spr.temp$Dep.MER*spr.temp$p
# # Observed=BLIMIT
# # fn.SPR=function(SPR) Find.SPR(SPR=SPR)$epsilon
# # SPR.fit=nlminb( start=0.6 , objective=fn.SPR, lower=0.001, upper=1.0,
# # control=list(eval.max=500, iter.max=500 ) )
# # SPR.lim.Conv=SPR.fit$convergence
# #
# # #then find F from SPR.fit
# # Observed=SPR.fit$par
# # Flim.fit=nlminb( start=FTHR , objective=fn.yield, lower=0.0001, upper=5.0,
# # control=list(eval.max=500, iter.max=500 ) )
# # FLim=Flim.fit$par
# # Flim.Conv=Flim.fit$convergence
#
# #4.4 Target reference points
# #first find SPR from Ftar
# FTar=FTHR*Prop.FMsy
# SPR.tar=EqPerRec(FTar)$SPR
#
# #then find B from SPR assuming B-H
# B.tar=Find.B(spr.temp$alpha,R0=1,spr=SPR.tar,spr0=1)
#
#
# # #first find SPR from BTAR
# # BTAR=spr.temp$Dep.MER*1.2
# # Observed=BTAR
# # SPR.fit=nlminb( start=0.8 , objective=fn.SPR, lower=0.001, upper=1.0,
# # control=list(eval.max=500, iter.max=500 ) )
# # SPR.tar.Conv=SPR.fit$convergence
# #
# # #then find F from SPR.fit
# # Observed=SPR.fit$par
# # Ftar.fin=nlminb( start=FTHR , objective=fn.yield, lower=0.0001, upper=5.0,
# # control=list(eval.max=500, iter.max=500 ) )
# # FTar=Ftar.fin$par
# # FTar.Conv=Ftar.fin$convergence
#
# system.time(for (a in 1:N.sp)
# {
# spec.=species.names[a]
# WT=Weight.conv[a,]
# GROW=Growth.pars[a,]
# TO=Growth.pars[a,3]
# SIG=SIGMA[[a]]
# Lo=Size.birth[a]
# AA=species.list[[a]]$max.age
# FF=species.list[[a]]$fec
# BF=species.list[[a]]$breed.freq
# b.fem=Length.conv[a,1]
# a.fem=Length.conv[a,2]
# alphabeta.S=Sel.pars[a,1]
# alpha.S=Sel.pars[a,2]
# beta.S=Sel.pars[a,3]
# sex.ratio=0.5
# AMat=species.list[[a]]$age.mat
# Temper=Temperature[a]
# r=1
# spawn.time = 0 # specify time of the year when spawning (or pupping) occurs as a fraction beteween 0 and 1
#
# w.g=Wght.G[a]
# w.ng=Wght.noG[a]
#
# SPR.out=f.out=b.out=Store.M=Store.Fec=Convergence=Store.Sel=Store.len=f.out.30=f.out.40=
# f.zhou=vector("list", length=N.sim)
# #names(f.out)=c("threshold","limit","target")
# Trigged=rep(NA,N.sim)
# for (j in 1:N.sim)
# {
# #1. draw random samples of input parameters
# A.MAX=A.max(AA[1],AA[2],AA[1])
# age=0:A.MAX
#
# if(Growth.var=="YES") GR=GROWTH(GROW[[1]],GROW[[2]],SIG)
# if(Growth.var=="NO") GR=matrix(c(GROW[[1]],GROW[[2]]),ncol=2)
#
# Rep=REP.PER(BF[2],BF[1])
# A.MAT=AGE.MAT(AMat[1],AMat[2])
#
# if(!spec.=="gummy")
# {
# mid.FL.fem=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #modified version
# total.length=b.fem+a.fem*mid.FL.fem
# }
#
# if(spec.=="gummy")
# {
# total.length=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #gummy pars are fn(total length)
# mid.FL.fem=total.length
# }
#
# Fec=FEC(spec.)
#
# #put a cap on gummy Fecundity to avoid predicting beyond data range
# if(spec.=="gummy")
# {
# Fec=ifelse(Fec>Max.fec[1],Max.fec[1],Fec)
# Fec=ifelse(Fec<0,0,Fec)
# }
#
#
# mm=M.fun(A.MAX,GR[2],GR[1],Temper,A.MAT,TO,WT[1,1],WT[1,2],w.g,w.ng)
# Sel.A=Select(alphabeta.S,alpha.S,beta.S)
#
#
# #2. calculate SPR.mer quantities
# spr.temp=SPR(A.MAX,mm,Fec,Rep,sex.ratio,A.MAT,Sel.A,0,"Y","ogive")
#
# if(length(spr.temp)>1)
# {
# #3. Calculate reference points
#
# #3.1. Threshold (i.e. MER)
#
# #3.1.1 Biomass (Bmer)
# BTHR=spr.temp$Dep.MER
#
# #3.1.2. F (Fmer)
# #Set up useful vectors
# fecundity=spr.temp$fecundity
# maturity=spr.temp$maturity
# lx=vector(length=length(age)) #no fishing
# lz=vector(length=length(age)) #with fishing
# Observed=spr.temp$SPR.mer
# phieggs0=spr.temp$phi.o
# fn.yield=function(Fe) EqPerRec(Fe=Fe)$epsilon
#
# #numerical approximation
# Fstart=0.1
# Fmer.fit <- nlminb( start=Fstart, objective=fn.yield, lower=0.0001, upper=5.0,
# control=list(eval.max=500, iter.max=500 ) )
# FTHR=Fmer.fit$par
# FTHR.Conv=Fmer.fit$convergence
#
#
# #3.2. Limits
#
# #3.2.1. Biomass (Blim = p x BTHR)
# B.lim=Prop.FMsy*BTHR
#
# #3.2.2 F
# obs.biom <- B.lim
# b.f <- function(Fstart)
# {
# temp.spr = EqPerRec(Fstart)$SPR
# temp.SSB =Find.B(spr.temp$alpha,R0=1,spr=temp.spr,spr0=1) #R0 and spr0 =1 as it's on relative scale
# yy<-abs(temp.SSB - obs.biom )
# return(yy)
# }
# Flim.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
# FLim = Flim.fit$par
# Flim.Conv = Flim.fit$convergence
#
#
# #3.3. Targets
#
# #3.3.1. Biomass (Blim = p x BTHR)
# B.tar=(1/Prop.FMsy)*BTHR
#
# #3.3.2 F
# obs.biom <- B.tar
# FTar.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
# FTar = FTar.fit$par
# FTar.Conv = FTar.fit$convergence
#
#
# }
#
#
# #3. Repeat if nonsense outputs (e.g. h<0.2, negative Biomass RP), i.e. obtain sensible joint priors
# Trigger=0
# if (length(spr.temp)==1|B.tar<0|B.lim<0|BTHR<0)Trigger=1
# Trigged[j]=Trigger
# if(Trigger==1)
# { repeat
# {
# #3.1 draw random samples
# A.MAX=A.max(AA[1],AA[2],AA[1])
# age=0:A.MAX
#
# if(Growth.var=="YES") GR=GROWTH(GROW[[1]],GROW[[2]],SIG)
# if(Growth.var=="NO") GR=matrix(c(GROW[[1]],GROW[[2]]),ncol=2)
#
# Rep=REP.PER(BF[2],BF[1])
# A.MAT=AGE.MAT(AMat[1],AMat[2])
#
# if(!spec.=="gummy")
# {
# mid.FL.fem=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #modified version
# total.length=b.fem+a.fem*mid.FL.fem
# }
#
# if(spec.=="gummy")
# {
# total.length=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #gummy pars are fn(total length)
# mid.FL.fem=total.length
# }
#
# Fec=FEC(spec.)
#
# #3.2 put a cap on gummy Fecundity to avoid predicting beyond data range
# if(spec.=="gummy")
# {
# Fec=ifelse(Fec>Max.fec[1],Max.fec[1],Fec)
# Fec=ifelse(Fec<0,0,Fec)
# }
#
#
# mm=M.fun(A.MAX,GR[2],GR[1],Temper,A.MAT,TO,WT[1,1],WT[1,2],w.g,w.ng)
# Sel.A=Select(alphabeta.S,alpha.S,beta.S)
#
#
# #3.3 calculate SPR.mer quantities
# spr.temp=SPR(A.MAX,mm,Fec,Rep,sex.ratio,A.MAT,Sel.A,0,"Y","ogive")
#
# if(length(spr.temp)>1)
# {
# #3.3.1 Calculate reference points
#
# #3.3.1.1 Threshold (i.e. MER)
#
# #Biomass (Bmer)
# BTHR=spr.temp$Dep.MER
#
# #F (Fmer)
# #Set up useful vectors
# fecundity=spr.temp$fecundity
# maturity=spr.temp$maturity
# lx=vector(length=length(age)) #no fishing
# lz=vector(length=length(age)) #with fishing
# Observed=spr.temp$SPR.mer
# phieggs0=spr.temp$phi.o
# fn.yield=function(Fe) EqPerRec(Fe=Fe)$epsilon
#
# #numerical approximation
# Fmer.fit <- nlminb( start=0.1 , objective=fn.yield, lower=0.0001, upper=5.0,
# control=list(eval.max=500, iter.max=500 ) )
# FTHR=Fmer.fit$par
# FTHR.Conv=Fmer.fit$convergence
#
#
# #3.2. Limits
#
# #3.2.1. Biomass (Blim = p x BTHR)
# B.lim=Prop.FMsy*BTHR
#
# #3.2.2 F
# obs.biom <- B.lim
# b.f <- function(Fstart)
# {
# temp.spr = EqPerRec(Fstart)$SPR
# temp.SSB =Find.B(spr.temp$alpha,R0=1,spr=temp.spr,spr0=1) #R0 and spr0 =1 as it's on relative scale
# yy<-abs(temp.SSB - obs.biom )
# return(yy)
# }
# Flim.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
# FLim = Flim.fit$par
# Flim.Conv = Flim.fit$convergence
#
#
# #3.3. Targets
#
# #3.3.1. Biomass (Blim = p x BTHR)
# B.tar=(1/Prop.FMsy)*BTHR
#
# #3.3.2 F
# obs.biom <- B.tar
# FTar.fit = nlminb(start=Fstart, objective=b.f, lower=0, upper=3)
# FTar = FTar.fit$par
# FTar.Conv = FTar.fit$convergence
#
#
# }
#
# #break if sensible joint prior
# Trigger=0
# if (length(spr.temp)==1|B.tar<0|B.lim<0|BTHR<0)Trigger=1
#
#
# if(Trigger==0)break
# }
# }
#
#
# #store quantities of interest
# SPR.out[[j]]=spr.temp
# Store.M[[j]]=mm
# Store.Sel[[j]]=Sel.A
# Store.len[[j]]=mid.FL.fem
# Store.Fec[[j]]=Fec
#
#
#
# #4.5 Store all Bs and Fs
# b.out[[j]]=c(BTHR,B.lim,B.tar)
# f.out[[j]]=c(FTHR,FLim,FTar)
#
# #4.6 Store convergence code
# Convergence[[j]]=c(FTHR.Conv=FTHR.Conv)
#
#
# #5. Calculate F reference points for arbitrary SPR
# Observed=0.40 #target (Tsai et al 2011 page 1388)
# F40.fit=optimize(fn.yield,lower=0.0001,upper=5) #numerical approximation
# f.out.40[[j]]=F40.fit$minimum
#
# Observed=0.30 #limit
# F30.fit=optimize(fn.yield,lower=0.0001,upper=5) #numerical approximation
# f.out.30[[j]]=F30.fit$minimum
#
# #6. Calculate Fmsy proxy from Zhou et al 2012
# f.zhou[[j]]=Zhou(mean(mm))
#
#
# }
# Species.SPR.MER[[a]]=SPR.out
# Species.M[[a]]=Store.M
# Species.Fec[[a]]=Store.Fec
# Species.Sel[[a]]=Store.Sel
# Species.len[[a]]=Store.len
# Species.B.MER[[a]]=b.out
# Species.F.MER[[a]]=f.out
# Species.F.est.Conv[[a]]=do.call(rbind,Convergence)
# Species.F.40[[a]]=f.out.40
# Species.F.30[[a]]=f.out.30
# Species.F.zhou[[a]]=f.zhou
# Species.Trigged[[a]]=Trigged
# })
# for (a in 1:N.sp)
# {
# spec.=species.names[a]
# WT=Weight.conv[a,]
# GROW=Growth.pars[a,]
# TO=Growth.pars[a,3]
# SIG=SIGMA[[a]]
# Lo=Size.birth[a]
# AA=species.list[[a]]$max.age
# FF=species.list[[a]]$fec
# BF=species.list[[a]]$breed.freq
# b.fem=Length.conv[a,1]
# a.fem=Length.conv[a,2]
# alphabeta.S=Sel.pars[a,1]
# alpha.S=Sel.pars[a,2]
# beta.S=Sel.pars[a,3]
# sex.ratio=0.5
# AMat=species.list[[a]]$age.mat
# Temper=Temperature[a]
# r=1
# spawn.time = 0 # specify time of the year when spawning (or pupping) occurs as a fraction beteween 0 and 1
#
# w.g=Wght.G[a]
# w.ng=Wght.noG[a]
#
# SPR.out=f.out=b.out=Store.M=Store.Fec=Convergence=Store.Sel=Store.len=f.out.30=f.out.40=
# f.zhou=vector("list", length=N.sim)
# #names(f.out)=c("threshold","limit","target")
# Trigged=rep(NA,N.sim)
# for (j in 1:N.sim)
# {
# #1. draw random samples of input parameters
# A.MAX=A.max(AA[1],AA[2],AA[1])
# age=0:A.MAX
#
# if(Growth.var=="YES") GR=GROWTH(GROW[[1]],GROW[[2]],SIG)
# if(Growth.var=="NO") GR=matrix(c(GROW[[1]],GROW[[2]]),ncol=2)
#
# Rep=REP.PER(BF[2],BF[1])
# A.MAT=AGE.MAT(AMat[1],AMat[2])
#
# if(!spec.=="gummy")
# {
# mid.FL.fem=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #modified version
# total.length=b.fem+a.fem*mid.FL.fem
# }
#
# if(spec.=="gummy")
# {
# total.length=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #gummy pars are fn(total length)
# mid.FL.fem=total.length
# }
#
# Fec=FEC(spec.)
#
# #put a cap on gummy Fecundity to avoid predicting beyond data range
# if(spec.=="gummy")
# {
# Fec=ifelse(Fec>Max.fec[1],Max.fec[1],Fec)
# Fec=ifelse(Fec<0,0,Fec)
# }
#
#
# mm=M.fun(A.MAX,GR[2],GR[1],Temper,A.MAT,TO,WT[1,1],WT[1,2],w.g,w.ng)
# Sel.A=Select(alphabeta.S,alpha.S,beta.S)
#
#
# #2. calculate SPR.mer quantities
# spr.temp=SPR(A.MAX,mm,Fec,Rep,sex.ratio,A.MAT,Sel.A,0,"Y","ogive")
# if(length(spr.temp)>1)
# {
# #3. Calculate reference points
#
# #3.1. Threshold (i.e. MER)
#
# #3.1.1 Biomass (Bmer)
# BTHR=spr.temp$Dep.MER
#
# #3.1.2. F (Fmer)
# #Set up useful vectors
# fecundity=spr.temp$fecundity
# maturity=spr.temp$maturity
# lx=vector(length=length(age)) #no fishing
# lz=vector(length=length(age)) #with fishing
# Observed=spr.temp$SPR.mer
# phieggs0=spr.temp$phi.o
# fn.yield=function(Fe) EqPerRec(Fe=Fe)$epsilon
#
# #numerical approximation
# Fmer.fit <- nlminb( start=0.1 , objective=fn.yield, lower=0.0001, upper=5.0,
# control=list(eval.max=500, iter.max=500 ) )
# FTHR=Fmer.fit$par
# FTHR.Conv=Fmer.fit$convergence
#
#
# #3.2. Limits
#
# #3.2.1. F (Flim=(1/p) x Fmer)
# #first find SPR from FLim
# Flim.man.par=1/Prop.FMsy
# FLim=FTHR*Flim.man.par
# SPR.lim=EqPerRec(FLim)$SPR
#
# #3.2.2 Biomass (Blim)
# #then find B from SPR assuming B-H
# B.lim=Find.B(spr.temp$alpha,R0=1,spr=SPR.lim,spr0=1)
#
#
# #3.3. Targets
#
# #3.3.1. F (Ftar=p x Fmer)
# #first find SPR from Ftar
# FTar=FTHR*Prop.FMsy
# SPR.tar=EqPerRec(FTar)$SPR
#
# #3.3.2 Biomass (Btar)
# #then find B from SPR assuming B-H
# B.tar=Find.B(spr.temp$alpha,R0=1,spr=SPR.tar,spr0=1)
#
# }
#
#
# #3. Repeat if nonsense outputs (e.g. h<0.2, negative Biomass RP), i.e. obtain sensible joint priors
# Trigger=0
# if (length(spr.temp)==1|B.tar<0|B.lim<0|BTHR<0)Trigger=1
# Trigged[j]=Trigger
# if(Trigger==1)
# { repeat
# {
# #3.1 draw random samples
# A.MAX=A.max(AA[1],AA[2],AA[1])
# age=0:A.MAX
#
# if(Growth.var=="YES") GR=GROWTH(GROW[[1]],GROW[[2]],SIG)
# if(Growth.var=="NO") GR=matrix(c(GROW[[1]],GROW[[2]]),ncol=2)
#
# Rep=REP.PER(BF[2],BF[1])
# A.MAT=AGE.MAT(AMat[1],AMat[2])
#
# if(!spec.=="gummy")
# {
# mid.FL.fem=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #modified version
# total.length=b.fem+a.fem*mid.FL.fem
# }
#
# if(spec.=="gummy")
# {
# total.length=Lo+(GR[1]-Lo)*(1-exp(-GR[2]*age)) #gummy pars are fn(total length)
# mid.FL.fem=total.length
# }
#
# Fec=FEC(spec.)
#
# #3.2 put a cap on gummy Fecundity to avoid predicting beyond data range
# if(spec.=="gummy")
# {
# Fec=ifelse(Fec>Max.fec[1],Max.fec[1],Fec)
# Fec=ifelse(Fec<0,0,Fec)
# }
#
#
# mm=M.fun(A.MAX,GR[2],GR[1],Temper,A.MAT,TO,WT[1,1],WT[1,2],w.g,w.ng)
# Sel.A=Select(alphabeta.S,alpha.S,beta.S)
#
#
# #3.3 calculate SPR.mer quantities
# spr.temp=SPR(A.MAX,mm,Fec,Rep,sex.ratio,A.MAT,Sel.A,0,"Y","ogive")
#
# if(length(spr.temp)>1)
# {
# #3.3.1 Calculate reference points
#
# #3.3.1.1 Threshold (i.e. MER)
#
# #Biomass (Bmer)
# BTHR=spr.temp$Dep.MER
#
# #F (Fmer)
# #Set up useful vectors
# fecundity=spr.temp$fecundity
# maturity=spr.temp$maturity
# lx=vector(length=length(age)) #no fishing
# lz=vector(length=length(age)) #with fishing
# Observed=spr.temp$SPR.mer
# phieggs0=spr.temp$phi.o
# fn.yield=function(Fe) EqPerRec(Fe=Fe)$epsilon
#
# #numerical approximation
# Fmer.fit <- nlminb( start=0.1 , objective=fn.yield, lower=0.0001, upper=5.0,
# control=list(eval.max=500, iter.max=500 ) )
# FTHR=Fmer.fit$par
# FTHR.Conv=Fmer.fit$convergence
#
#
# #3.3.1.2 Limits
# #F (FLim)
# #note: Flim=(1/p) x Fmer
# # first find SPR from FLim
# Flim.man.par=1/Prop.FMsy
# FLim=FTHR*Flim.man.par
# SPR.lim=EqPerRec(FLim)$SPR
#
# #Biomass (Blim)
# #then find B from SPR assuming B-H
# B.lim=Find.B(spr.temp$alpha,R0=1,spr=SPR.lim,spr0=1)
#
#
# #3.3.1.3 Targets
# #F (Tar)
# #note:Ftar=p x Fmer)
# # first find SPR from Ftar
# FTar=FTHR*Prop.FMsy
# SPR.tar=EqPerRec(FTar)$SPR
#
# #Biomass (Btar)
# #then find B from SPR assuming B-H
# B.tar=Find.B(spr.temp$alpha,R0=1,spr=SPR.tar,spr0=1)
#
# }
#
# #break if sensible joint prior
# Trigger=0
# if (length(spr.temp)==1|B.tar<0|B.lim<0|BTHR<0)Trigger=1
#
#
# if(Trigger==0)break
# }
# }
#
#
# #store quantities of interest
# SPR.out[[j]]=spr.temp
# Store.M[[j]]=mm
# Store.Sel[[j]]=Sel.A
# Store.len[[j]]=mid.FL.fem
# Store.Fec[[j]]=Fec
#
#
#
# #4.5 Store all Bs and Fs
# b.out[[j]]=c(BTHR,B.lim,B.tar)
# f.out[[j]]=c(FTHR,FLim,FTar)
#
# #4.6 Store convergence code
# Convergence[[j]]=c(FTHR.Conv=FTHR.Conv)
# # Convergence[[j]]=c(FTHR.Conv=FTHR.Conv,SPR.lim.Conv=SPR.lim.Conv,Flim.Conv=Flim.Conv,
# # SPR.tar.Conv=SPR.tar.Conv,FTar.Conv=FTar.Conv)
#
# #5. Calculate F reference points for arbitrary SPR
# Observed=0.40 #target (Tsai et al 2011 page 1388)
# F40.fit=optimize(fn.yield,lower=0.0001,upper=5) #numerical approximation
# f.out.40[[j]]=F40.fit$minimum
#
# Observed=0.30 #limit
# F30.fit=optimize(fn.yield,lower=0.0001,upper=5) #numerical approximation
# f.out.30[[j]]=F30.fit$minimum
#
# #6. Calculate Fmsy proxy from Zhou et al 2012
# f.zhou[[j]]=Zhou(mean(mm))
#
#
# # #7. Calculate F from standard per recruit analysis #fix, not working well
# # age=0:A.MAX
# #
# # #7.1 YPR
# # fn.yield=function(Fe) Per.recruit(Linf=GR[1,1],K=GR[1,2],to=GR[1,3],b=b.fem,a=a.fem,
# # M=mm,bwt=WT[1,1],awt=WT[1,2],age.mat=A.MAT,fec=Fec,breed.freq=Rep,fishing=Fe)$Epsilon
# # Fmax.optim=optimize(fn.yield,lower=0.0001,upper=5)
# # Fmax=Fmax.optim$minimum
# #
# # #7.2 Potential ratio
# # fn.yield=function(Fe) Per.recruit(Linf=GR[1,1],K=GR[1,2],to=GR[1,3],b=b.fem,a=a.fem,
# # M=mm,bwt=WT[1,1],awt=WT[1,2],age.mat=A.MAT,fec=Fec,breed.freq=Rep,fishing=Fe)$Epsilon.mer
# # Fmer.optim=optimize(fn.yield,lower=0.0001,upper=5)
# # Fmer=Fmer.optim$minimum
#
#
# }
# Species.SPR.MER[[a]]=SPR.out
# Species.M[[a]]=Store.M
# Species.Fec[[a]]=Store.Fec
# Species.Sel[[a]]=Store.Sel
# Species.len[[a]]=Store.len
# Species.B.MER[[a]]=b.out
# Species.F.MER[[a]]=f.out
# Species.F.est.Conv[[a]]=do.call(rbind,Convergence)
# Species.F.40[[a]]=f.out.40
# Species.F.30[[a]]=f.out.30
# Species.F.zhou[[a]]=f.zhou
# Species.Trigged[[a]]=Trigged
# }
|
90009afed4557a686555266387f2d2794898c639
|
d8b9a31582ece8819768a738d091e0e3d7f3b93d
|
/R/printf.R
|
776120848b4164228af98bcf88cb1e25ee25507b
|
[
"MIT"
] |
permissive
|
PriceLab/tfBindingSites
|
c44bcef9d4cac90bc4b1f20f3541aab3aa7cee53
|
9df2839311f4a76889ddd913918b66bbe3e56a54
|
refs/heads/master
| 2020-03-28T18:54:42.487163
| 2018-09-16T21:54:24
| 2018-09-16T21:54:24
| 148,926,020
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55
|
r
|
printf.R
|
printf <-
function (...)
print(noquote(sprintf(...)))
|
281490c8fba4238b60337c21798db5104c514738
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/kzfs/examples/kzrc2.Rd.R
|
0548a3d50eaaf0c1127e0da5a80d19c0578b8488
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,519
|
r
|
kzrc2.Rd.R
|
library(kzfs)
### Name: kzrc2
### Title: Reconstruct 2D Wave Signals For Given Directions with KZFT
### Aliases: kzrc2 kz.rc2
### Keywords: KZFT reconstruction
### ** Examples
dx <- 100 # The x and y scale of the wave field
dy <- 100 # Enlarge them to 300 to get better result.
b <- expand.grid(x=1:dx, y=dy:1)
q1 <- pi/6; f1 <- 0.1;
b$v1 <- sin(f1*2*pi*(b$x*cos(q1)+b$y*sin(q1))+runif(1))
a1 <- array(0,c(dx,dy))
a1[as.matrix(b[,1:2])] <- b$v1
q2 <- -pi/3; f2 <- 0.15;
b$v2 <- sin(f2*2*pi*(b$x*cos(q2)+b$y*sin(q2))+runif(1))
a2 <- array(0,c(dx,dy))
a2[as.matrix(b[,1:2])] <- b$v2
a <- array(0,c(dx,dy))
a[as.matrix(b[,1:2])] <- b$v1 + 2.5*b$v2
noise <- matrix(rnorm(dx*dy,0,1),ncol=dy)
persp(1:(dx/2), 1:(dy/2), a1[1:(dx/2), 1:(dy/2)], zlab="",
main="wave #1", theta=0, phi=45, ticktype="detailed", col="lightblue")
persp(1:(dx/2), 1:(dy/2), a2[1:(dx/2), 1:(dy/2)],
main="wave #2", theta=90, phi=-110, ticktype="detailed", col="lightblue")
persp(1:(dx/2), 1:(dy/2), a[1:(dx/2), 1:(dy/2)],
main="wave #1 + #2 ", theta=90, phi=-110, ticktype="detailed", col="lightblue")
persp(1:(dx/2), 1:(dy/2), a[1:(dx/2), 1:(dy/2)] + 5*noise[1:(dx/2), 1:(dy/2)],
main="wave #1 + #2 + 5*noise", theta=90, phi=-110, ticktype="detailed", col="lightblue")
image(x=1:dim(a1)[1] , y=1:dim(a1)[2], z=a1)
box(); mtext("wave #1")
image(x=1:dim(a2)[1] , y=1:dim(a2)[2], z=a2)
box(); mtext("wave #2")
image(x=1:dim(a)[1] , y=1:dim(a)[2], z=a+0*noise)
box(); mtext("wave #1 + #2 ")
image(x=1:dim(a)[1] , y=1:dim(a)[2], z=a+7*noise)
box(); mtext("wave #1 + #2 + 7*noise")
rc0 <- kz.rc2(a+0*noise, angle=c(q1,q2)*180/pi,f=c(f1,f2), m = 50, avg=FALSE)
cor(as.vector(a[1:dim(rc0)[1],1:dim(rc0)[2]]), as.vector(rc0), use="pairwise.complete.obs")
rc0 <- kz.rc2(a+0*noise, angle=c(q1,q2)*180/pi,f=c(f1,f2), m = 50, avg=TRUE, rlvl=15)
cor(as.vector(a[1:dim(rc0)[1],1:dim(rc0)[2]]), as.vector(rc0), use="pairwise.complete.obs")
rc <- kz.rc2(a+7*noise, angle=c(q1,q2)*180/pi,f=c(f1,f2), m = 50, avg=TRUE, rlvl=15, plot=TRUE)
cor(as.vector(a[1:dim(rc)[1],1:dim(rc)[2]]), as.vector(rc), use="pairwise.complete.obs")
dev.new();image(x=1:dim(rc)[1] , y=1:dim(rc)[2], z=a[1:dim(rc)[1],1:dim(rc)[2]])
box();title("Signal without noise")
rc <- kz.rc2(a+7*noise, angle=q2*180/pi, f=f2, m = 50, avg=TRUE, rlvl=21, plot=TRUE)
cor(as.vector(a2[1:dim(rc)[1],1:dim(rc)[2]]), as.vector(rc), use="pairwise.complete.obs")
dev.new();image(x=1:dim(rc)[1] , y=1:dim(rc)[2], z=a2[1:dim(rc)[1],1:dim(rc)[2]])
box();title("Signal without noise")
|
81f8745d5caf4568d46f05fd16b344db18e1c6b7
|
7bb3f64824627ef179d5f341266a664fd0b69011
|
/Business_Statistics_:_A_First_Course_by_David_M._Levine,_Kathryn_A._Szabat,_David_F._Stephan,_P._K._Vishwanathan/CH8/EX8.4/Ex8_4.R
|
7e819dc334df9dc59d78fc085772c9b32e577060
|
[
"MIT"
] |
permissive
|
prashantsinalkar/R_TBC_Uploads
|
8bd0f71834814b1d03df07ce90b2eae3b7d357f8
|
b3f3a8ecd454359a2e992161844f2fb599f8238a
|
refs/heads/master
| 2020-08-05T23:06:09.749051
| 2019-10-04T06:54:07
| 2019-10-04T06:54:07
| 212,746,586
| 0
| 0
|
MIT
| 2019-10-04T06:03:49
| 2019-10-04T06:03:48
| null |
UTF-8
|
R
| false
| false
| 379
|
r
|
Ex8_4.R
|
#confidence Interval Estimate for the Population proportion
# p +- z*sqrt((p*p-1)/n)
X<- 35 #No. of Character having the characteristic
n<- 200 #sample size
z <-1.645 #critical value fromthe standardized normal distribution
p <- X / n #sample proportion
p
Pop_prop_upper<- p + z*sqrt((p*(1-p))/n)
Pop_prop_upper
Pop_prop_lower <-p - z*sqrt((p*(1-p))/n)
Pop_prop_lower
|
c8bd1d711b9d681f91e2729fe99fa2acef7e6ee2
|
7b842e47b36c5eccaee6b71c77e22519b49c0168
|
/man/bufferPoint.Rd
|
c63b7ddf7edd049389cc07cdbcdb3deda4247fdd
|
[] |
no_license
|
cran/geoknife
|
5dc92ca0aa7e7afe2afac3fd848e3b7fc99c07c4
|
e6dba004a958f5954317cfcd7faaec1d8d094ae9
|
refs/heads/master
| 2023-07-20T09:34:10.506446
| 2023-07-06T07:00:12
| 2023-07-06T07:00:12
| 48,080,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 723
|
rd
|
bufferPoint.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util-bufferPoint.R
\name{bufferPoint}
\alias{bufferPoint}
\title{Create linear ring from point}
\usage{
bufferPoint(point)
}
\arguments{
\item{point}{A vector of longitude and latitude values}
}
\value{
A linear ring (with closure) as a numeric vector
}
\description{
Buffer ring creation from point
}
\details{
bufferPoint is a function that takes a longitude and latitude pair and creates
a buffered ring (i.e., a feature with non-zero area) centered.
}
\examples{
linearRing = bufferPoint(c(-111.48, 36.95))
}
\seealso{
\linkS4class{simplegeom}
}
\author{
Jordan S. Read
}
\keyword{internal}
\keyword{methods}
|
c546b8de4a2c18d1587819cbe6eabeb6f48183d8
|
ba46cd68bfcf8b0375cbea758e052fe1077dbd18
|
/genotype_qc.R
|
3312cb3dbbb47ef277cbfc5d19f58d46964445da
|
[] |
no_license
|
nuada/easd_nh
|
9e207fe8c3d02eb45f4258718b29956a90590178
|
f432a1039703f7a5709a0d37405cfbdc1f8181f7
|
refs/heads/master
| 2021-01-10T05:44:39.174206
| 2016-04-07T10:31:16
| 2016-04-07T10:31:16
| 54,579,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,523
|
r
|
genotype_qc.R
|
#' Configuration
library(RcppEigen)
library(printr)
library(plyr)
library(ggplot2)
library(plink2R) # install_github("gabraham/plink2R", subdir = 'plink2R')
data_dir <- '/data'
phenotype_dir <- paste(data_dir, 'phenotype', sep='/')
genotype_dir <- paste(data_dir, 'genotype', sep='/')
raw_genotypes <- paste(data_dir, 'genotype', 'easd_nh', sep='/')
resources_dir <- '/resources/arrays'
temp_dir = tempdir()
#' PLINK wrapper
plink_path <- '/usr/bin/plink'
plink <- function(..., infile, outfile = NULL){
if (is.null(outfile)) {
outfile = tempfile(tmpdir = temp_dir)
}
stdout <- system(paste(plink_path, '--noweb', '--bfile', infile, '--make-bed', '--out', outfile, ...), intern = T)
print(grep('done\\.$', stdout, invert = T, value = T))
return(outfile)
}
#' data.frame to TSV file
to_file <- function(x) {
file_name <- tempfile(tmpdir = temp_dir)
write.table(x, file=file_name, col.names = F, row.names = F, quote = F)
return(file_name)
}
#' Compare two steps of the analysis and print sample statistics
sample_stats <- function(genotype_a, genotype_b) {
stats <- list()
a <- read_plink(genotype_a)
b <- read_plink(genotype_b)
stats$snps <- c(dim(a$bim)[1], dim(b$bim)[1])
stats$individuals <- c(dim(a$fam)[1], dim(b$fam)[1])
if (stats$individuals[1] != stats$individuals[2]) {
stats$individuals_removed <- setdiff(apply(a$fam[,1:2], 1, function (row) paste(row[1], row[2])), apply(b$fam[,1:2], 1, function (row) paste(row[1], row[2])))
}
if (stats$snps[1] != stats$snps[2]) {
stats$snps_removed <- stats$snps[1] - stats$snps[2]
}
stats
}
#' # Update raw genotypes with data from phentoype.csv
phenotype <- read.csv(paste(data_dir, 'phenotype.csv', sep='/'), sep='\t')
#' Convert exported text file to binary file
if (!file.exists(paste(raw_genotypes, 'bed', sep='.'))) {
system(paste(plink_path, '--noweb', '--file', paste(genotype_dir, 'easd_nh_2015-11-13-101820', sep='/'), '--make-bed', '--out', raw_genotypes))
}
#' Update FIDs
genotypes <- plink('--update-ids',
to_file(subset(phenotype, select=c(OMICRON_ID, OMICRON_ID, FAMILY_ID, OMICRON_ID))),
infile=raw_genotypes)
#' Update Sex field
genotypes <- plink('--update-sex',
to_file(subset(phenotype, select=c(FAMILY_ID, OMICRON_ID, SEX_PLINK))),
infile=genotypes)
#' Drop excluded individuals (not included in phentoype file)
genotypes_with_phenotype <- plink('--keep',
to_file(subset(phenotype, select=c(FAMILY_ID, OMICRON_ID))),
infile=genotypes)
sample_stats(genotypes, genotypes_with_phenotype)
#' Remove SNPs with unknown locaton
genotypes_no_chr0 <- plink('--not-chr', 0, infile = genotypes_with_phenotype)
sample_stats(genotypes_with_phenotype, genotypes_no_chr0)
#' Plot missinges
missingnes <- function (genotypes) {
missing_per_sample <- read.table(paste(genotypes, 'imiss', sep='.'), header = T)
missing_per_sample <- merge(missing_per_sample, phenotype, by.x='IID', by.y='OMICRON_ID', all.x=T)
print(qplot(factor(ARRAY_ID), F_MISS, fill=factor(PLATE), data=missing_per_sample, geom = 'boxplot') + coord_flip()+ ggtitle('Fraction missing per sample by array'))
print(qplot(ARRAY_COL, ARRAY_ROW, fill=F_MISS, facets = ~ARRAY_ID, data=missing_per_sample, geom='tile') + scale_fill_gradient(low="white", high="red") + ggtitle('Fraction missing per sample vs location by array'))
print(qplot(WELL_COL, WELL_ROW, fill=F_MISS, facets = ~PLATE, data=missing_per_sample, geom='tile') + scale_fill_gradient(low="white", high="red") + ggtitle('Fraction missing per sample vs location by plate'))
missing_per_marker <- read.table(paste(genotypes, 'lmiss', sep='.'), header = T)
# Exclude markers on Y chromosome
missing_per_marker <- subset(missing_per_marker, CHR != 24)
print(qplot(factor(CHR), F_MISS, color=factor(CHR), data = missing_per_marker, geom='boxplot') + ggtitle('Missingnes per marker'))
return(missing_per_sample)
}
missing_per_sample <- missingnes(plink('--missing', infile=genotypes_no_chr0))
#' Drop failed plate #16
genotypes_no_plate_16 <- plink('--remove', to_file(subset(phenotype, PLATE == 16, select=c('FAMILY_ID', 'OMICRON_ID'))), infile=genotypes_no_chr0)
sample_stats(genotypes_no_chr0, genotypes_no_plate_16)
#' Drop failed arrays
# TODO remove - does not remove any arrays
mean_missingnes <- ddply(missing_per_sample, .(ARRAY_ID), summarize, mean=mean(F_MISS))
genotypes_no_failures <- plink('--remove', to_file(subset(phenotype, ARRAY_ID %in% mean_missingnes[mean_missingnes$mean > 0.5, 1], select=c('FAMILY_ID', 'OMICRON_ID'))), infile=genotypes_no_plate_16)
sample_stats(genotypes_no_plate_16, genotypes_no_failures)
#' Plot missinges after removing replicates
invisible(missingnes(plink('--missing', infile=genotypes_no_failures)))
#' Filter by missingnes per subject
genotypes_mind_1pct <- plink('--mind', '0.01', infile=genotypes_no_failures)
sample_stats(genotypes_no_failures, genotypes_mind_1pct)
#' Sex check
genotypes <- plink('--merge-x', infile=genotypes_mind_1pct)
genotypes <- plink('--split-x', 'hg19', infile=genotypes)
genotypes <- plink('--check-sex', infile=genotypes)
sex_check <- read.table(paste(genotypes, 'sexcheck', sep='.'), header = T)
sex_check <- merge(sex_check, phenotype, by.x='IID', by.y='OMICRON_ID', all.x=T)
table(sex_check$STATUS)
table(sex_check$COUNTRY, sex_check$STATUS)
table(sex_check$PLATE, sex_check$STATUS)
qplot(seq_along(F), F, color=factor(STATUS), data=sex_check) + geom_hline(yintercept=0.2)
qplot(ARRAY_COL, ARRAY_ROW, fill=factor(STATUS), facets = ~ARRAY_ID, data=sex_check, geom='tile') + ggtitle('Sex check per sample vs location by array')
qplot(WELL_COL, WELL_ROW, fill=factor(STATUS), facets = ~PLATE, data=sex_check, geom='tile') + ggtitle('Sex check per sample vs location by plate')
#' Sex check on replicates
replicates <- subset(phenotype, grepl('.*_.*', phenotype$OMICRON_ID), select=c('FAMILY_ID', 'OMICRON_ID'))
genotypes <- plink('--keep', to_file(replicates), infile=genotypes_mind_1pct)
genotypes <- plink('--merge-x', infile=genotypes)
genotypes <- plink('--split-x', 'hg19', infile=genotypes)
genotypes <- plink('--check-sex', infile=genotypes)
replicates_sex_check <- read.table(paste(genotypes, 'sexcheck', sep='.'), header = T)
replicates_sex_check <- merge(replicates_sex_check, phenotype, by.x='IID', by.y='OMICRON_ID', all.x=T)
replicates_sex_check$SAMPLE_ID <- factor(replicates_sex_check$SAMPLE_ID)
table(replicates_sex_check$STATUS)
sex_check_reproducibility <- ddply(replicates_sex_check, .(SAMPLE_ID), function (df){ all(df$STATUS == df$STATUS[1]) })
sum(sex_check_reproducibility$V1)/length(unique(replicates_sex_check$SAMPLE_ID))
sum(!sex_check_reproducibility$V1)
sex_check_reproducibility_failed <- replicates_sex_check[replicates_sex_check$SAMPLE_ID %in% subset(sex_check_reproducibility, V1==F)$SAMPLE_ID,1:6]
sex_check_reproducibility_failed
#' Drop all samples flagged by sex check
genotypes_no_sex_errors <- plink('--remove', to_file(subset(sex_check, STATUS=='PROBLEM', select=c(FAMILY_ID, IID))), infile=genotypes_mind_1pct)
sample_stats(genotypes_mind_1pct, genotypes_no_sex_errors)
#' Drop chr >= 23
genotypes_autosomes <- plink('--chr', '1-22', infile = genotypes_no_sex_errors)
sample_stats(genotypes_no_sex_errors, genotypes_autosomes)
#' Analyze heterozygosity
genotypes <- plink('--het', infile=genotypes_autosomes)
heterozygosity <- read.table(paste(genotypes, 'het', sep='.'), header = T)
heterozygosity$H <- heterozygosity$O.HOM/heterozygosity$N.NM*100
qplot(H, data=heterozygosity) +
geom_vline(xintercept=mean(heterozygosity$H)+2*sd(heterozygosity$H), color='red') +
geom_vline(xintercept=mean(heterozygosity$H)-2*sd(heterozygosity$H), color='red')
# TODO remove samples with extreme heterozygosity
#' Remove replicates by missingnes
genotypes <- plink('--missing', infile=genotypes_autosomes)
missing_per_sample <- read.table(paste(genotypes, 'imiss', sep='.'), header = T)
missing_per_sample <- merge(missing_per_sample, phenotype, by.x='IID', by.y='OMICRON_ID', all.x=T)
replicates_to_remove <- ddply(subset(missing_per_sample, grepl('.*_.*', missing_per_sample$IID)), .(SAMPLE_ID), function (df){ df[order(df$F_MISS)[2:nrow(df)], c('FAMILY_ID', 'IID')] })[,2:3]
replicates_to_remove <- replicates_to_remove[complete.cases(replicates_to_remove),]
genotypes_unique <- plink('--remove', to_file(replicates_to_remove), infile=genotypes_autosomes)
sample_stats(genotypes_autosomes, genotypes_unique)
#' KING wrapper
king_path <- '/usr/bin/king'
king <- function (infile) {
outfile = tempfile(tmpdir = temp_dir)
print(system(paste(king_path, '-b', paste0(infile, '.bed'), '--kinship', '--prefix', outfile)))
return(list('all'=read.table(paste0(outfile, '.kin0'), sep='\t', header = T), 'family'=read.table(paste0(outfile, '.kin'), sep='\t', header = T)))
}
#' Kinship coefficient! See: http://cphg.virginia.edu/quinlan/?p=300
kinship <- king(genotypes_unique)
qplot(Kinship, data=kinship[['all']])
qplot(seq_along(Kinship), Kinship, data=kinship[['all']]) +
geom_hline(yintercept=c(.5, .375, .25, .125, .0938, .0625, .0313, .0078, .002), color='blue') +
scale_y_log10()
# Cryptic duplicates
subset(kinship[['all']], Kinship == 0.5)
qplot(Kinship, data=kinship[['family']])
qplot(seq_along(Kinship), Kinship, data=kinship[['family']]) +
geom_hline(yintercept=c(.5, .375, .25, .125, .0938, .0625, .0313, .0078, .002), color='blue') +
scale_y_log10()
# Cryptic duplicates
subset(kinship[['family']], Kinship == 0.5)
# TODO remove cryptic duplicates
#' Filter by missingnes per marker and MAF
#' * maf > 0.1 -> geno 0.05
#' * maf <= 0.1 -> geno 0.03
#' * maf <= 0.05 -> geno 0.01
genotypes_maf_lt_01 <- plink('--maf', '0.1', '--geno', '0.05', infile=genotypes_unique)
sample_stats(genotypes_unique, genotypes_maf_lt_01)
genotypes_maf_gt_01 <- plink('--max-maf', '0.1', '--geno', '0.03', infile=genotypes_unique)
sample_stats(genotypes_unique, genotypes_maf_gt_01)
genotypes_maf_gt_005 <- plink('--max-maf', '0.05', '--geno', '0.01', infile=genotypes_unique)
sample_stats(genotypes_unique, genotypes_maf_gt_005)
genotypes_maf_geno_filtered <- plink('--merge-list', to_file(c(genotypes_maf_gt_01, genotypes_maf_gt_005)), infile=genotypes_maf_lt_01)
sample_stats(genotypes_unique, genotypes_maf_geno_filtered)
#' HWE filtering
genotypes_hwe_filtered <- plink('--hwe', '1e-5', infile=genotypes_maf_geno_filtered)
sample_stats(genotypes_maf_geno_filtered, genotypes_hwe_filtered)
#' MAF fitering
genotypes_maf_01 <- plink('--maf', '0.01', infile=genotypes_hwe_filtered)
sample_stats(genotypes_hwe_filtered, genotypes_maf_01)
#' # Population structure
convertf_path <- '/usr/bin/convertf'
smartpca_path <- '/usr/bin/smartpca'
#' Extract HapMap SNPs
hapmap_snps <- plink('--extract', paste(resources_dir, 'hapmap3', 'hapmap3r2_CEU.CHB.JPT.YRI.no-at-cg-snps.txt', sep='/'), infile=genotypes_maf_01)
#' LD pruning
genotypes_pruned <- plink('--exclude', paste(resources_dir, 'high_ld_hg19.txt', sep='/'), '--range', '--indep-pairwise 50 5 0.2', infile=hapmap_snps)
genotypes_with_hapmap <- plink('--bmerge', paste(resources_dir, 'hapmap3',
'hapmap3r2_CEU.CHB.JPT.YRI.founders.no-at-cg-snps', sep='/'),
'--extract', paste(genotypes_pruned, 'prune.in', sep='.'), infile=genotypes_pruned)
#' Flip failed SNPs
genotypes_flipped <- plink('--extract', paste(resources_dir, 'hapmap3', 'hapmap3r2_CEU.CHB.JPT.YRI.no-at-cg-snps.txt', sep='/'),
'--flip', paste0(genotypes_with_hapmap, '-merge.missnp'), infile=hapmap_snps)
genotypes_with_hapmap_2 <- plink('--bmerge', paste(resources_dir, 'hapmap3',
'hapmap3r2_CEU.CHB.JPT.YRI.founders.no-at-cg-snps', sep='/'),
'--extract', paste(genotypes_pruned, 'prune.in', sep='.'), infile=genotypes_flipped)
genotypes <- plink('--exclude', paste0(genotypes_with_hapmap_2, '-merge.missnp'), '--extract', paste(genotypes_pruned, 'prune.in', sep='.'), infile = genotypes_flipped)
genotypes_with_hapmap_3 <- plink('--bmerge', paste(resources_dir, 'hapmap3',
'hapmap3r2_CEU.CHB.JPT.YRI.founders.no-at-cg-snps', sep='/'),
'--extract', paste(genotypes_pruned, 'prune.in', sep='.'), infile=genotypes)
# Convert to PED/MAP
genotypes_merged <- tempfile(tmpdir = temp_dir)
system(paste(plink_path, '--noweb', '--bfile', genotypes_with_hapmap_3, '--recode', '--output-missing-phenotype', '1', '--out', genotypes_merged))
# Setup convertf
convertf_params <- tempfile(tmpdir = temp_dir)
eigenstrat_input <- tempfile(tmpdir = temp_dir)
cat(paste0('genotypename: ', genotypes_merged, '.ped\n'), file=convertf_params)
cat(paste0('snpname: ', genotypes_merged, '.map\n'), file=convertf_params, append = T)
cat(paste0('indivname: ', genotypes_merged, '.ped\n'), file=convertf_params, append = T)
cat('outputformat: EIGENSTRAT\n', file=convertf_params, append = T)
cat(paste0('genotypeoutname: ', eigenstrat_input, '.eigenstratgeno\n'), file=convertf_params, append = T)
cat(paste0('snpoutname: ', eigenstrat_input, '.snp\n'), file=convertf_params, append = T)
cat(paste0('indivoutname: ', eigenstrat_input, '.ind\n'), file=convertf_params, append = T)
cat('familynames: NO\n', file=convertf_params, append = T)
# Convert PED/MAP to EIGENSTRAT
system(paste(convertf_path, '-p', convertf_params))
# Setup smartcpa
smartcpa_params <- tempfile(tmpdir = temp_dir)
eigenstrat_output <- tempfile(tmpdir = temp_dir)
cat(paste0('genotypename: ', eigenstrat_input, '.eigenstratgeno\n'), file=smartcpa_params)
cat(paste0('snpname: ', eigenstrat_input, '.snp\n'), file=smartcpa_params, append = T)
cat(paste0('indivname: ', eigenstrat_input, '.ind\n'), file=smartcpa_params, append = T)
cat(paste0('evecoutname: ', eigenstrat_output, '.evec\n'), file=smartcpa_params, append = T)
cat(paste0('evaloutname: ', eigenstrat_output, '.eval\n'), file=smartcpa_params, append = T)
cat('altnormstyle: NO\n', file=smartcpa_params, append = T)
cat('numoutevec: 10\n', file=smartcpa_params, append = T)
cat('numoutlieriter: 0\n', file=smartcpa_params, append = T)
cat('numoutlierevec: 2\n', file=smartcpa_params, append = T)
cat('outliersigmathresh: 8.0\n', file=smartcpa_params, append = T)
cat('qtmode: 0\n', file=smartcpa_params, append = T)
cat('nsnpldregress: 2\n', file=smartcpa_params, append = T)
# cat(paste0('evaloutname: ', eigenstrat_output, '.outlier\n'), file=smartcpa_params, append = T)
# Run smartpca
system(paste(smartpca_path, '-p', smartcpa_params))
population_structure <- read.table(paste0(eigenstrat_output, '.evec'), header = F, skip=1)
names(population_structure) <- c('IID', paste0('PC', 1:10), 'group')
#' Add group labels from HapMap3
relationships_w_pops <- read.table(paste(resources_dir, 'hapmap3', 'relationships_w_pops_121708.txt', sep='/'), header = T)
relationships_w_pops <- relationships_w_pops[,c(2,7)]
population_structure <- merge(population_structure, relationships_w_pops, by='IID', all.x=T)
population_structure$population <- as.character(population_structure$population)
population_structure[is.na(population_structure$population),'population'] <- 'EASD'
population_structure$population <- factor(population_structure$population)
population_structure <- subset(population_structure, select=-group)
#' Population structure PC1 vs PC2
qplot(PC1, PC2, color=population, data=population_structure)
|
ceca37f9e3a417431214a580abac88a8aa961f9e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DAAG/examples/powerplot.Rd.R
|
82d3be4e3379b625599086d35de8b09b063550e3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 525
|
r
|
powerplot.Rd.R
|
library(DAAG)
### Name: powerplot
### Title: Plot of Power Functions
### Aliases: powerplot
### Keywords: models
### ** Examples
oldpar <- par(mfrow = c(2, 3), mar = par()$mar - c(
1, 1, 1.0, 1), mgp = c(1.5, 0.5, 0), oma=c(0,1,0,1))
# on.exit(par(oldpar))
powerplot(expr="sqrt(x)", xlab="")
powerplot(expr="x^0.25", xlab="", ylab="")
powerplot(expr="log(x)", xlab="", ylab="")
powerplot(expr="x^2")
powerplot(expr="x^4", ylab="")
powerplot(expr="exp(x)", ylab="")
par(oldpar)
|
a20003a741f6cccf76bfcf83b1eb6c82d8b5dbba
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.media.services/man/mediaconnect_add_flow_vpc_interfaces.Rd
|
1972a2b9f6e8c00e4119f7546aaa633fff86c71e
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,118
|
rd
|
mediaconnect_add_flow_vpc_interfaces.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mediaconnect_operations.R
\name{mediaconnect_add_flow_vpc_interfaces}
\alias{mediaconnect_add_flow_vpc_interfaces}
\title{Adds VPC interfaces to flow}
\usage{
mediaconnect_add_flow_vpc_interfaces(FlowArn, VpcInterfaces)
}
\arguments{
\item{FlowArn}{[required] The flow that you want to mutate.}
\item{VpcInterfaces}{[required] A list of VPC interfaces that you want to add.}
}
\value{
A list with the following syntax:\preformatted{list(
FlowArn = "string",
VpcInterfaces = list(
list(
Name = "string",
NetworkInterfaceIds = list(
"string"
),
RoleArn = "string",
SecurityGroupIds = list(
"string"
),
SubnetId = "string"
)
)
)
}
}
\description{
Adds VPC interfaces to flow
}
\section{Request syntax}{
\preformatted{svc$add_flow_vpc_interfaces(
FlowArn = "string",
VpcInterfaces = list(
list(
Name = "string",
RoleArn = "string",
SecurityGroupIds = list(
"string"
),
SubnetId = "string"
)
)
)
}
}
\keyword{internal}
|
bd8a4606adf86eefe4d71e429dd02615d57ec4eb
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609963218-test.R
|
72dcdb09b6d9bef720d00de7b3eb93b1f46306c0
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 320
|
r
|
1609963218-test.R
|
testlist <- list(x = c(-42L, -1L, -2745758L, 1869573160L, 711158895L, 1936021353L, 1850564976L, 1449747831L, 690508613L, 1481646179L, 1869509492L, 691004672L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
6e8dd12744652a479ed479d842942b5b2bfcd1a3
|
7ee443860d07d81a6b0e275f393797d65710461a
|
/man/h_GenROT.Rd
|
22a26cf090ff7fef799696dd8673e09a9178c535
|
[] |
no_license
|
cran/locpolExpectile
|
72a27664bd2345d758a897392044328caf7cc5a2
|
fbf7c76f915014bbb90ce2342db1c8a666bbaf88
|
refs/heads/master
| 2023-07-01T08:59:57.908278
| 2021-08-03T08:50:05
| 2021-08-03T08:50:05
| 392,367,208
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,918
|
rd
|
h_GenROT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h_GenROT.R
\name{h_GenROT}
\alias{h_GenROT}
\alias{compDerEst_exp}
\title{Rule-of-Thumb bandwidth selectors}
\usage{
h_GenROT(X, Y, j = 0, p = 1, kernel = gaussK, omega)
compDerEst_exp(X, Y, p, omega)
}
\arguments{
\item{X}{The covariate data values.}
\item{Y}{The response data values.}
\item{j}{The order of derivative to estimate. In default setting, \code{j=0}.}
\item{p}{The order of the local polynomial estimator. In default setting,
\code{p=1}.}
\item{kernel}{The kernel used to perform the estimation. In default setting,
\code{kernel=gaussK}. See details in \code{\link[locpol]{Kernels}}.}
\item{omega}{Numeric vector of level between 0 and 1 where 0.5 corresponds
to the mean.}
}
\value{
\code{\link{h_GenROT}} provides the general Rule-of-Thumb bandwidth selector
for the expectile regression proposed by Adam and Gijbels (2021a).
\code{\link{compDerEst_exp}} returns a data frame whose
components are:
\itemize{
\item \code{X} The covariate data values.
\item \code{Y} The response data values.
\item \code{fit} The fitted values for the parametric estimation
(leading to the Rule-of-Thumb expression).
\item \code{der} The derivative estimation at \eqn{X} values.
}
}
\description{
General Rule-of-Thumb bandwidth selector for univariate expectile regression
proposed by Adam and Gijbels (2021a) see Formula (24). The weight function \eqn{k_0(x)}
is chosen to be equal to the indicator function on \eqn{[min(X_i)+0.1,max(X_i)-0.1]}.
}
\examples{
library(locpol)
data(mcycle)
y=mcycle$accel
x=mcycle$times
h=h_GenROT(X=x,Y=y,j=0,p=1,kernel=gaussK,omega=0.1)
#h=1.887636
}
\references{
{
Adam, C. and Gijbels, I. (2021a). Local polynomial expectile regression.
Annals of the Institute of Statistical Mathematics doi:10.1007/s10463-021-00799-y.
}
}
|
9977e2ea12a8310d4cd0bf2d59329c84558c9522
|
7f02263a680124a9b6fed6e709013034be0dc2e8
|
/SciDataEpi2020/functions/plot_volcano_v2.r
|
7129bb339b2bed7b9cf21ac55fdfa1f989f50a0b
|
[] |
no_license
|
Hindrance/EpiSciData2020
|
a8fa07e67a240a81d76391b614175593369e2810
|
b271bb99bd793992fea7f41fe46ef1a981e33e61
|
refs/heads/master
| 2022-11-10T14:53:32.093301
| 2020-06-23T11:21:55
| 2020-06-23T11:21:55
| 266,233,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,035
|
r
|
plot_volcano_v2.r
|
# PLOT VOLCANO # by Vincent
require(RColorBrewer)
# This function takes a data frame with three columns and plots a volcano plot!
# Submit data in the form of:
# Column 1: data label text
# Column 2: Log fold changes
# Column 3: p values
# Column 4: base means
# OR submit a DESeq results file and specify the argument: input.format="DESeq"
# Note: colour input has to be in the form of RGB vectors. i.e: c(255, 0, 0) for pure red.
plot.volcano <- function(data, input.format="DESeq", colourset1 = c("white", "blue"), colourset2 = c("white", "red"),
custom.significant, fold.change=2, p.level=0.05, basemean.cut, fade_style, lines, plot.legend,
legendPosition, cex, xlab, ylab, new.plot=T, report = F, do_you_want_text, ...)
{
# Input:
# data = dataframe(dataLabels,logFoldChange,Pvalue,BaseMean) (or see below to use a DESeq results object).
# input.format = can be specified to be a "DESeq" format. For ease of use...
# colourset1 = vector of colours e.g c("white, "blue")
# colourset2 = vector of colours e.g c("white, "red")
# fold.change = FC cut off, (non-log) This is log2d in the function.
# p.level = p value cutoff (non-log) This is -log10d in the function
# basemean.cut = base mean cut off. This is the 1st quartile by default.
# fade_style = (0:4) 0:1 are fold-change gradient. 2:3 include a cross-fade for p.values. 0 and 2 plot significant values as the max colour in gradient.
# lines = (T / F) includes the cut off lines as dashed lines.
# new.plot = (T / F) to plot a new plot OR just the points..
# report = (T / F) to report the data alongisde calculated values for plotting and the colour scheme for all points
# plot.legend = (T / F) to plot a legend (FALSE by default)
# legendPosition = legend position, "topleft" by default.
if(missing(fade_style)){fade_style = 0}
if(missing(lines)){do_you_want_lines = F} else {do_you_want_lines = lines}
if(missing(plot.legend)){plot.legend = F}
if(missing(legendPosition)){legendPosition = "topleft"}
if(missing(cex)){cex=2}
if(missing(do_you_want_text)){do_you_want_text = F} else {do_you_want_text = do_you_want_text}
if(missing(xlab)){xlab="log2 fold-change"}
if(missing(ylab)){ylab="-log10 p"}
if(input.format == "DESeq") {data = data.frame("gene" = rownames(data), "L2FC" = data[,2], "p.adj" = data[,6], "basemean" = data[,1])}
if(missing(basemean.cut)){basemean.cut = summary(data[,4])[2]}
# Data manipulation
# data[is.na(data[,3]),3] <- 1 # REPLACE NA PADJ
data <- data[!is.na(data[,3]),] # REMOVE NA PADJ
data1 <- data[data[,2] > 0,]
# check for positive fold change genes - create dummy otherwise
if(length(data1[,1]) != 0) {pos = data1} else {pos <- data.frame(0,0,1,0)}
pos <- pos[which(as.character(pos[,3]) != "NA"),]
data2 <- data[data[,2] < 0,]
# check for negative fold change genes - create dummy otherwise
if(length(data2[,1]) != 0) {neg = data2} else {neg <- data.frame(0,0,1,0)}
neg <- neg[which(as.character(neg[,3]) != "NA"),]
# remove zeros
pos[pos[,3] == 0,3] <- min(pos[pos[,3] != 0,3])
neg[neg[,3] == 0,3] <- min(neg[neg[,3] != 0,3])
pos[,3] <- -log10(pos[,3])
neg[,3] <- -log10(neg[,3])
output<- data.frame(
data,
"x" = numeric(length(data[,1])),
"y" = numeric(length(data[,1])),
"z" = numeric(length(data[,1])),
"x_r" = numeric(length(data[,1])),
"x_r2" = numeric(length(data[,1])),
"y_r" = numeric(length(data[,1])),
"z_r" = numeric(length(data[,1])),
"cols" = character(length(data[,1])),
stringsAsFactors=F)
# Let's go!
###########################################################
# calculate values and colour gradients
if(length(data2[,1]) != 0) {
# negative change
x <- neg[,2]
y <- neg[,3]
z <- neg[,4]
res.vec1 <- which(y > -log10(p.level) & x < -log2(fold.change) & z > basemean.cut)
if(!missing(custom.significant)){
res.vec1 <- match(as.character(custom.significant)[data[custom.significant,2] < 0], rownames(neg))
}
# text?
if(do_you_want_text == T){
txt.norm.x <- rnorm(length(x), x, (max(x)-min(x))/(cex*50))
txt.norm.y <- rnorm(length(y), y, (max(y)-min(y))/(cex*50))
if(length(x[res.vec1]) != 0){text(txt.norm.x[res.vec1], txt.norm.y[res.vec1], neg[res.vec1,1])}}
# colour grading
x_r2 <- x/(min(x)/2)
x_r2[x_r2 > 0.6] <- 0.6
x_r <- x/min(x)
y_r <- y/max(y)
z_r <- x_r * y_r
z_r <- z_r/max(z_r)
if(fade_style <= 1){r = x_r} else {r = z_r}
cols = rgb(colorRamp(colourset1)(normalise(r))/255)
if(fade_style %in% c(0,2)) {cols[res.vec1] = rgb(colorRamp(colourset1)(1)/255)}
# if(fade_style <= 3) {cols[res.vec1] = rgb(colorRamp(colourset1)(1)/255)}
#output / output separately to remove characters
output[rownames(neg),c("x", "y", "z", "x_r", "x_r2", "y_r", "z_r")] = cbind(x, y, z, x_r, x_r2, y_r, z_r)
output[rownames(neg), "cols"] = cols
}
####
if(length(data1[,1]) != 0) {
# positive change
x <- pos[,2]
y <- pos[,3]
z <- pos[,4]
res.vec2 <- which(y > -log10(p.level) & x > log2(fold.change) & z > basemean.cut)
if(!missing(custom.significant)){
res.vec2 <- match(as.character(custom.significant)[data[custom.significant,2] > 0], rownames(pos))
}
# text?
if(do_you_want_text == T){
txt.norm.x <- rnorm(length(x), x, (max(x)-min(x))/(cex*50))
txt.norm.y <- rnorm(length(y), y, (max(y)-min(y))/(cex*50))
if(length(x[res.vec2]) != 0){text(txt.norm.x[res.vec2], txt.norm.y[res.vec2], neg[res.vec2,1])}}
# colour grading
x_r2 <- x/(min(x)/2)
x_r2[x_r2 > 0.6] <- 0.6
x_r <- x/min(x)
y_r <- y/max(y)
z_r <- x_r * y_r
z_r <- z_r/max(z_r)
if(fade_style <= 1){r = x_r} else {r = z_r}
cols = rgb(colorRamp(colourset2)(normalise(r))/255)
if(fade_style %in% c(0,2)) {cols[res.vec2] = rgb(colorRamp(colourset2)(1)/255)}
#output / output separately to remove characters
output[rownames(pos),c("x", "y", "z", "x_r", "x_r2", "y_r", "z_r")] = cbind(x, y, z, x_r, x_r2, y_r, z_r)
output[rownames(pos), "cols"] = cols
}
# significant features
# OLD res.vec <- as.numeric(c(rownames(neg)[res.vec1[length(res.vec1):1]], rownames(pos)[res.vec2[length(res.vec2):1]]))
# OLD_edited? res.vec <- as.numeric(c(res.vec1[length(res.vec1):1], res.vec2[length(res.vec2):1]))
res.vec <- c(rownames(neg)[res.vec1[1:length(res.vec1)]], rownames(pos)[res.vec2[length(res.vec2):1]])
# Make the plot!
######################################################
if(report==F){
# Initialise plot
par(cex=cex); if(new.plot==T) {
plot(c(min(neg[,2])+min(neg[,2])/15,max(pos[,2])+max(pos[,2])/15),c(0,max(neg[,3],pos[,3])+max(neg[,3],pos[,3])/15), pch=NA, col="white", xlab=xlab, ylab=ylab,
...)
}
# plot all
points(output$x, output$y, pch=16, col=output$cols)
# Plot significant
# OLD points(output$x[res.vec], output$y[res.vec], pch=21, bg=output$cols[res.vec])
points(output[res.vec,"x"],output[res.vec,"y"], pch=21, bg=output[res.vec,"cols"])
# lines?
if(do_you_want_lines == T){
lines(c(-10000,10000),rep(-log10(p.level),2), lty=2, lwd=1)
lines(c(-log2(fold.change),-log2(fold.change)),c(-10000,10000), lty=2, lwd=1)
lines(c(log2(fold.change),log2(fold.change)),c(-10000,10000), lty=2, lwd=1)
}
# Legend?
if(plot.legend == T){
par(lend=2); legend(legendPosition, legend=c("Reduced expression", "Increased expression"), pt.bg=c(rgb(colorRamp(colourset1)(1)/255), rgb(colorRamp(colourset2)(1)/255)), lty=0, pch=21, lwd=1, bty="n")
}
} else {
rownames(output) <- output[,1]
output <- output[,-1]
return(output)}
##################
# function(data, input.format="DESeq", colourset1, colourset2, custom.significant, fold.change=2,
# p.level=0.05, basemean.cut, fade_style, lines, plot.legend, legendPosition, cex,
# xlab, ylab, new.plot=T, report = F, ...)
# Input:
# data = dataframe(dataLabels,logFoldChange,Pvalue,BaseMean) (or see below to use a DESeq results object).
# input.format = can be specified to be a "DESeq" format. For ease of use...
# colourset1 = vector of colours e.g c("white, "blue")
# colourset2 = vector of colours e.g c("white, "red")
# fold.change = FC cut off, (non-log) This is log2d in the function.
# p.level = p value cutoff (non-log) This is -log10d in the function
# basemean.cut = base mean cut off. This is the 1st quartile by default.
# custom.significant = a custom vector of significant rows for data
# fade_style = (0:4) 0:1 are fold-change gradient. 2:3 include a cross-fade for p.values. 0 and 2 plot significant values as the max colour in gradient.
# lines = (T / F) includes the cut off lines as dashed lines.
# new.plot = (T / F) to plot a new plot OR just the points..
# report = (T / F) to report the data alongisde calculated values for plotting and the colour scheme for all points
# plot.legend = (T / F) to plot a legend (FALSE by default)
# legendPosition = legend position, "topleft" by default.
}
############## EXAMPLE ##############################
# Some dodgy distributions here...
paste(LETTERS[round(runif(5, 1,26))],collapse="")
data.labels <- sapply(1:1000, function(x) {paste(LETTERS[round(runif(5, 1,26))],collapse="")})
logfoldchange <- rnorm(1000,0,2)
pvalues <- abs(rnorm(1000,0.3,0.5))^2
pvalues[pvalues > 1] <- 1
basemeans <- abs(rnorm(1000,500,300))
# Create dataframe with the correct column order...
t1 <- data.frame(data.labels,
logfoldchange,
pvalues,
basemeans
)
# Plot multiple versions...
# par(mfrow=c(2,2), mar=c(5,5,2,2));
# Here is the function:
# plot.volcano(t1, cex=1.5)
# plot.volcano(t1, legend=F, cex=1.5)
# plot.volcano(t1, legend=F, lines=T, cex=1.5, colour1=c(195,65,173), colour2=c(65,220,205), fade=1)
# plot.volcano(t1, legend=F, lines=T, text=T, cex=1.5, fade=2); par(mfrow=c(1,1))
######################################################
|
bcbea9f922957c8b0cf0380eb723d5dcbf8713f8
|
4b1f08bf4923f93562ad98c020123d43e2c93746
|
/R/mal.phi.R
|
3b47ee6bcadc25637bda0a9ba3edb2c99b5dfb6d
|
[] |
no_license
|
cran/Biodem
|
5f187736005c885301cc1bafc8c79544320e537a
|
f98922e7dce81df5ffdd79f403cbfd3ee32bca7f
|
refs/heads/master
| 2021-06-01T13:27:02.409287
| 2021-01-05T14:50:09
| 2021-01-05T14:50:09
| 17,678,116
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 637
|
r
|
mal.phi.R
|
mal.phi <- function(S,P,N,n){
if (n < 1){
return("Number of cycles too low!!!")
}
phi<-diag(0/N) ## creating the first phi matrix
Pt<-t(P)
x<-0 ## needed for a correct counting cycle
for (i in 1:n){
x<-x+1 ## start the counting cycle
S1<-mtx.exp(S,x) ## powering S
P1<-mtx.exp(P,x) ## powering P
Pt1<-mtx.exp(Pt,x) ## powering the transpose of P
D<-(1-phi)/(2*N) ## calculating the diagonal of the D matrix
D<-diag(D) ## extracting the diagonal of the above
D<-diag(D) ## creating the REAL D matix, which is a diagonal matrix
phi<-phi+(S1%*%Pt1%*%D%*%P1%*%S1) ## Malecot model
}
phi
}
|
c3b0bee68cc18a366a60291021f383f36b5ad39e
|
f9e41ee88d03b59bed2487201f95e36d2566cf82
|
/cachematrix.R
|
897813ee60b5d30d6d82f831c5abf2e9d3c7c056
|
[] |
no_license
|
lrahm/ProgrammingAssignment2
|
3cd9f8ce0b32f97084afd74e6fbdb9e4b6459501
|
188d8bb71be93b161e2a87b3d0f86a727559ac33
|
refs/heads/master
| 2021-01-17T05:37:11.469757
| 2014-06-17T17:38:16
| 2014-06-17T17:38:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 957
|
r
|
cachematrix.R
|
## makeCacheMatrix and cacheSolve work together to store the inverse of the matrix in
## an other environment so that it does not have to be recalculated every time its used.
## makeCacheMatrix creates a special "matrix" to 'set', 'get', 'setinverse', and 'getinverse'
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setinverse <- function(solve) s <<- solve
getinverse <- function() s
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve calculates the mean of the special "matrix" created with the above function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getinverse()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setinverse(s)
s
}
|
8bf3a819f5c04202d2ff2ee01ecdada9abd85971
|
f1183482e47167a9020046c061a53b88179193ec
|
/scripts/dai_lm/plot_coefficients.R
|
1d07fad489a8de71077e80dc5229ccb0e948ad28
|
[
"MIT"
] |
permissive
|
morrislab/plos-medicine-joint-patterns
|
a7a4ff4ce5f16d673fe2af48429ebe43b5132458
|
cfdc6dd4854ec33e7e2efbf36d648b65d278df33
|
refs/heads/master
| 2020-04-17T09:33:09.077084
| 2019-01-18T19:33:32
| 2019-01-18T19:33:32
| 166,462,950
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,445
|
r
|
plot_coefficients.R
|
# Plots significant coefficients from linear regression.
library(argparse)
library(data.table)
library(grid)
library(ggplot2)
rm(list = ls())
# Get arguments.
parser <- ArgumentParser()
parser$add_argument('--input', required = TRUE)
parser$add_argument('--output', required = TRUE)
parser$add_argument('--figure-width', type = 'double', default = 7)
parser$add_argument('--figure-height', type = 'double', default = 7)
args <- parser$parse_args()
# Load the data.
message('Loading data')
dt.coefficients <- fread(args$input)
# Filter the coefficients.
message('Filtering coefficients')
dt.coefficients <- dt.coefficients[p < 0.05 & term != '(Intercept)']
# Generate the plot.
message('Generating plot')
theme_set(
theme_classic(base_size = 8) +
theme(
axis.text = element_text(size = rel(1)),
axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1),
axis.ticks.length = unit(4.8, 'pt')
)
)
pl <- ggplot(dt.coefficients, aes(x = term, y = estimate)) +
geom_col(colour = NA, fill = grey(0.8), width = 0.8) +
labs(x = 'Term', y = 'Coefficient')
if (min(dt.coefficients$estimate) >= 0) {
pl <- pl + scale_y_continuous(limits = c(0, NA), expand = c(0, 0), breaks = pretty)
} else {
pl <- pl +
scale_y_continuous(breaks = pretty) +
geom_hline(yintercept = 0)
}
# Write the plot.
message('Writing plot')
ggsave(args$output, pl, width = args$figure_width, height = args$figure_height)
|
3171470f23ade3833b7e021f1d3185957ad9f2e4
|
9c712aff1298a97802edafce4f582ef6f2c61a4a
|
/Fuzzy matching function.R
|
c7fd00984006bfcc720d2e944a7556756b2a6776
|
[] |
no_license
|
DanielGardiner/Fuzzy-matching-function
|
20f31fe0be650945f8705c54a43cbffe3a9b5217
|
33f19962b2cdd0496a1b48a7a3b84ff263a531e6
|
refs/heads/master
| 2021-01-23T10:10:27.450332
| 2017-06-01T10:27:41
| 2017-06-01T10:27:41
| 93,045,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,005
|
r
|
Fuzzy matching function.R
|
fuzzy.match = function(data, id.col, parameter.df, n.results){
# Author: Daniel Gardiner
# email: daniel.gardiner@phe.gov.uk
###############################################################################
# Functions needed for analysis
# Perform Fuzzy matching #
fuzz.m = function(vec1, vec2 = NULL, FUN, thresh, na.value = 0){
# function to create a data.frame of raw and adjusted jarowinkler or
# levenshtein edit distance scores (dependent on FUN argument)
# NOTE: all missing entries (NAs) will be assigned a score of 0
#
# step 1: an an upper triangular matrix is created with entry i,j the
# raw score of the ith entry of vec1 compared to the jth entry of vec2
# step 2: this matrix is 'melted' to give a data.frame of all raw comparisons
# step 3: an adjusted score column is added
#
# Args: vec1: a vector
# vec2: a vector
# FUN: either jarowinkler or levenshteinSim
# thresh: threshold value
#
# output: var1: comparitor 1
# var2: comparitor 2
# raw.score: raw jarowinkler OR levenshtein edit distance score
# adj.score: adjusted score (according to thresh argument)
#load packages
library(RecordLinkage)
library(reshape2)
# if two vectors are supplied determine shorter and longer vector
if(is.null(vec2)){
NULL
} else if (length(vec1) < length(vec2)) {
short.vec = vec1
long.vec = vec2
} else if (length(vec1) > length(vec2)){
short.vec = vec2
long.vec = vec1
} else {
short.vec = vec1 # note: else vectors are actually of equal length,
long.vec = vec2 # short/long is conventiant naming convention
}
# create a square matrix consisting of vec1^2 or vec1 * vec2 NAs
if(is.null(vec2)){
mat = rep(NA, length(vec1)^2)
mat = matrix(mat, nrow = length(vec1), ncol = length(vec1),
dimnames = list(vec1, vec1))
} else {
mat = rep(NA, length(short.vec) * length(long.vec))
mat = matrix(rep(NA, length(short.vec) * length(long.vec)),
nrow = length(short.vec), ncol = length(long.vec))
}
# take the ith entry of vec and generate a jaro winkler score compared to each other
# entry of vec, store as a row in a matrix, loop over all entries in vec to fill
# all rows of the matrix
if(is.null(vec2)){
for(i in 1:length(vec1)){
mat[i, ] = eval(call(FUN, vec1[i], vec1))
}
} else {
for(i in 1:length(short.vec)){
mat[i, ] = eval(call(FUN, short.vec[i], long.vec))
}
}
# replace all NA values with 0's as raw score
mat[is.na(mat)] = 0
# convert matrix to upper triangular form
mat = ifelse(upper.tri(mat), mat, "REMOVE")
# assign row and column names
if(is.null(vec2)){
mat = matrix(mat, nrow = length(vec1), ncol = length(vec1),
dimnames = list(vec1, vec1))
} else {
mat = matrix(mat, nrow = length(short.vec), ncol = length(long.vec),
dimnames = list(short.vec, long.vec))
}
# melt data to give each row as a comparison
mat.m = melt(mat, id.vars = row.names(mat), measure.vars = colnames(mat))
# keep only those elements on the upper triangular matrix
mat.m = mat.m[mat.m[, 3] != "REMOVE", ]
# make raw score numeric
mat.m[, 3] = as.numeric(as.character(mat.m[, 3]))
# add column of adjusted scores
mat.m[, 4] = ifelse(mat.m[, 3] >= thresh,
1 - ((1 - mat.m[, 3])/(1 - thresh)), 0)
# if Var1 or Var2 is NA assign value na.value
mat.m[is.na(mat.m$Var1) | is.na(mat.m$Var2), 4] = na.value
# rename columns
colnames(mat.m) = c("var1", "var2", "raw.score", "adj.score")
# output data.frame from function
# note: each row represents a single comparison between entries
mat.m
}
####
# derive results using the matching function
# format parameters
char.cols = c("match.cols", "func")
parameter.df[, char.cols] = sapply(parameter.df[, char.cols], as.character)
num.cols = c("thresholds", "NA.value", "weightings")
parameter.df[, num.cols] = sapply(parameter.df[, num.cols], as.numeric)
# format data
data[, id.col] = as.character(data[, id.col])
data[, parameter.df$match.cols] = sapply(data[, parameter.df$match.cols], as.character)
# generate results by applying function to each field
results = NULL
for(i in 1:nrow(parameter.df)){
temp = fuzz.m(vec1 = data[, parameter.df[i, "match.cols"]],
FUN = parameter.df[i, "func"],
thresh = parameter.df[i, "thresholds"],
na.value = parameter.df[i, "NA.value"])
temp = parameter.df[i, "weightings"] * temp$adj.score
results = cbind(results, temp)
}
# sum accross fields to give overall scores for each comparison
overall.score = round(apply(results, 1, sum), 2)
# append overall scores onto each comparison id
final.results = fuzz.m(vec1 = data[, id.col],
FUN = "levenshteinSim",
thresh = 1,
na.value = 1)[, 1:2]
final.results = data.frame(final.results, overall.score)
# order results by overall score
final.results = final.results[rev(order(final.results$overall.score)), ]
# convert ids to characters
final.results$var1 = as.character(final.results$var1)
final.results$var2 = as.character(final.results$var2)
# generate matches
n.results = min(n.results, nrow(final.results))
list.results = vector("list", n.results)
for(i in 1:n.results){
list.results[[i]] = data.frame(data[data[, id.col] %in% final.results[i, c("var1", "var2")], ],
overall.score = final.results[i, c("overall.score")])
}
list.results
}
########
# EXAMPLE
# generate example data
data = data.frame(ID = c(1, 2, 3, 4, 5, 6, 7),
Firstname = c("Steve", "Stve", "Clint", "Julianne", "Meryl", "Mark", "Maryl"),
Surname = c("Carell", "Corel", NA, "Moore", "Streep", "Ruffalo", "Streep"))
# generate parameter data frame
parameters = data.frame(match.cols = c("Firstname", "Surname"),
thresholds = c(0.3, 0.3),
NA.value = c(0.1, 0.1),
weightings = c(0.6, 0.8),
func = "levenshteinSim")
# use function
fuzzy.match(data, id.col = "ID", parameter.df = parameters, n.results = 5)
|
c3d77f4f28af155b898bced05eaa278cc299c72d
|
b059a52cd2c7573ccd216b23a77b2acb49ee267c
|
/quaterly_reviews/FY20Q4_StrongRecovery.R
|
58bdc941b17fc1b77766e935e0aa41c64d929c59
|
[] |
no_license
|
gsarfaty/SA-Scripts
|
f6b977282b709065ac9ec4f9492e4c9555d42eed
|
022505bbcd2f3ce41325ea4fb1b2c06285accbcb
|
refs/heads/main
| 2023-03-06T23:21:00.973088
| 2021-02-21T16:20:24
| 2021-02-21T16:20:24
| 323,477,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,372
|
r
|
FY20Q4_StrongRecovery.R
|
library(extrafont)
library(tidyverse)
library(ICPIutilities)
library(here)
library(glitr)
library(scales)
library(patchwork)
library(formattable)
library(gt)
library(glamr)
# MER
df<-read_msd(here("Processed_Files/MSD_genie", "msd_fy17to20_2020-11-17_attributes.txt"))
# Data - proxy linkage by metro ---------------------------------------------------------------------
annual_growth_metros <-df%>%
filter(indicator %in% c("TX_CURR"),
standardizeddisaggregate %in% c("Total Numerator"),
fiscal_year %in% c("2019","2020"),
DSP=="Yes",
HIGHBURDEN=="YES")%>%
group_by(fiscal_year,agency_lookback,Partner_lookback,short_name,indicator) %>%
summarise_at(vars(targets:cumulative), sum, na.rm = TRUE)%>%
ungroup() %>%
reshape_msd(clean=TRUE) %>%
filter(period_type=="cumulative") %>%
spread(period,val) %>%
mutate(fy20growth=(FY20-FY19)/(FY19),
label=case_when(
short_name=="kz eThekwini MM" ~ paste0(short_name," ",agency_lookback),
TRUE ~ short_name))
fy_growth<-df %>%
filter(indicator %in% c("TX_CURR"),
standardizeddisaggregate %in% c("Total Numerator"),
fiscal_year %in% c("2019","2020"),
DSP=="Yes")%>%
group_by(fiscal_year,agency_lookback,Partner_lookback,short_name,indicator) %>%
summarise_at(vars(targets:cumulative), sum, na.rm = TRUE)%>%
ungroup() %>%
reshape_msd(clean=TRUE) %>%
filter(period_type=="cumulative") %>%
spread(period,val) %>%
mutate(fy20growth=(FY20-FY19)/(FY19),
label=case_when(
short_name=="kz eThekwini MM" ~ paste0(short_name," ",agency_lookback),
TRUE ~ short_name))
growth<-df %>%
filter(indicator %in% c("TX_CURR"),
standardizeddisaggregate %in% c("Total Numerator"),
fiscal_year=="2020",
DSP=="Yes")%>%
group_by(fiscal_year,agency_lookback,Partner_lookback,short_name,indicator) %>%
summarise_at(vars(targets:cumulative), sum, na.rm = TRUE)%>%
ungroup() %>%
reshape_msd(clean=TRUE) %>%
filter(period %in% c("FY20Q3","FY20Q4")) %>%
spread(period,val) %>%
mutate(q4_growth=(FY20Q4-FY20Q3)/(FY20Q3),
label=case_when(
short_name=="kz eThekwini MM" ~ paste0(short_name," ",agency_lookback),
TRUE ~ short_name
),
flag=case_when(
q4_growth >.02 & agency_lookback=="USAID" ~ "USAID",
q4_growth >.02 & agency_lookback=="HHS/CDC" ~ "CDC",
TRUE ~ "NO"
),
top10=case_when(
q4_growth >.019 ~ "YES",
TRUE ~ "NO")) %>%
arrange(desc(q4_growth))
prinf(growth)
write_tsv(growth,here("Quarterly Reviews","FY20Q4_growth.txt"))
NN<-df %>%
filter(indicator %in% c("TX_NET_NEW"),
standardizeddisaggregate %in% c("Total Numerator"),
fiscal_year=="2020",
DSP=="Yes")%>%
group_by(fiscal_year,agency_lookback,indicator) %>%
summarise_at(vars(targets:cumulative), sum, na.rm = TRUE)%>%
ungroup() %>%
reshape_msd(clean=TRUE) %>%
filter(period %in% c("FY20Q4"))
prinf(NN)
NN_psnu<-df %>%
filter(indicator %in% c("TX_NET_NEW"),
standardizeddisaggregate %in% c("Total Numerator"),
fiscal_year=="2020",
DSP=="Yes")%>%
group_by(fiscal_year,agency_lookback,short_name,indicator) %>%
summarise_at(vars(targets:cumulative), sum, na.rm = TRUE)%>%
ungroup() %>%
reshape_msd(clean=TRUE) %>%
filter(period %in% c("FY20Q4")) %>%
mutate(label=case_when(
short_name=="kz eThekwini MM" ~ paste0(short_name," ",agency_lookback),
TRUE ~ short_name)) %>%
arrange(desc(val))
prinf(NN_psnu)
partner_ach<-df %>%
filter(indicator %in% c("HTS_TST_POS","TX_NEW","TX_CURR","TX_NET_NEW"),
standardizeddisaggregate %in% c("Total Numerator"),
fiscal_year=="2020",
DSP=="Yes")%>%
group_by(fiscal_year,agency_lookback,Partner_lookback,indicator) %>%
summarise_at(vars(targets:cumulative), sum, na.rm = TRUE)%>%
ungroup() %>%
reshape_msd(clean=TRUE) %>%
filter(period_type %in% c("cumulative","targets")) %>%
spread(period_type,val) %>%
mutate(ach=(cumulative/targets))
# Viz - NN & New trend -----------------------------------------------------------
#Q4 growth
viz<-growth %>%
ggplot(aes(y =reorder(label,q4_growth),
x = q4_growth,
fill=agency_lookback))+
scale_x_continuous(labels = scales::percent_format(accuracy=1))+
geom_col()+
scale_fill_manual(values=c(usaid_lightblue,usaid_blue))+
geom_vline(xintercept = .02, linetype="dotted")+
annotate(geom = "text", x = 0, y = 28.05, #determine text placement on coordinate plane
label = "USAID", #what you want your text to say
hjust="left", size=5, color="white", fontface="bold",family="Source Sans Pro")+
annotate(geom = "text", x = 0, y = 21.05, #determine text placement on coordinate plane
label = "CDC", #what you want your text to say
hjust="left", size=5, color=usaid_black, fontface="bold", family="Source Sans Pro")+
si_style_yline()+
theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text = element_text(size=14),
legend.position = "none")
print(viz)
ggsave(here("Quarterly Reviews","FY20Q4_growth.png"),
width=9, height=5, dpi=300, units="in")
#annual growth - metros
metro_fy20_growth_viz<-annual_growth_metros %>%
ggplot(aes(x =reorder(label,-fy20growth),
y = fy20growth,
fill=agency_lookback))+
scale_y_continuous(labels=percent)+
geom_bar(stat="identity")+
scale_fill_manual(values=c(usaid_lightblue,usaid_blue))+
geom_vline(xintercept = .02, linetype="dotted")+
si_style_yline()+
theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
legend.position = "none")
print(metro_fy20_growth_viz)
ggsave(here("Quarterly Reviews","FY20_annual_growth_metros.png"))
#Table
growth_table<- growth %>%
select(label,FY20Q3,FY20Q4,q4_growth) %>%
arrange(-q4_growth) %>%
gt()
growth_table<-growth_table %>%
cols_label(label="District", q4_growth="Q4 Growth") %>%
tab_style(
style = list(
cell_text(weight = "bold")),
locations = cells_column_labels(everything())) %>%
opt_table_lines(extent = "default") %>%
fmt_number(
columns = vars(FY20Q3),
decimals=0) %>%
fmt_number(
columns = vars(FY20Q4),
decimals=0) %>%
fmt_percent(
columns=vars(q4_growth),
placement="right",
decimals=1) %>%
tab_style(
style = cell_borders(
sides = "right",
weight = px(1.5),
),
locations = cells_body(
columns = everything(),
rows = everything()
)) %>%
tab_style(style = cell_fill(color = "#91cf60"),
locations = cells_body(
columns = vars(q4_growth),
rows = q4_growth >= .021)) %>%
tab_style(style = cell_fill(color = "#ffffbf"),
locations = cells_body(
columns = vars(q4_growth),
rows = q4_growth <.021)) %>%
tab_style(style = cell_fill(color = "#fc8d59"),
locations = cells_body(
columns = vars(q4_growth),
rows = q4_growth <0))
growth_table
#annual growth table
fy_growth_table<-fy_growth %>%
select(label,FY19,FY20,fy20growth) %>%
arrange(-fy20growth) %>%
gt() %>%
cols_label(label="District", fy20growth="FY20 Growth") %>%
tab_style(
style = list(
cell_text(weight = "bold")),
locations = cells_column_labels(everything())) %>%
opt_table_lines(extent = "default") %>%
fmt_number(
columns = vars(FY19),
decimals=0) %>%
fmt_number(
columns = vars(FY20),
decimals=0) %>%
fmt_percent(
columns=vars(fy20growth),
placement="right",
decimals=1) %>%
tab_options(
column_labels.border.top.color = "white",
column_labels.border.top.width = px(3),
column_labels.border.bottom.color = "black",
table_body.hlines.color = "white",
table.border.bottom.color = "white",
table.border.bottom.width = px(3)) %>%
tab_source_note(md("**Data**: TX_CURR | 2020-11-17 genie")) %>%
tab_style(
style = cell_borders(
sides = "right",
weight = px(1.5),
),
locations = cells_body(
columns = everything(),
rows = everything()
)) %>%
tab_style(style = cell_fill(color = "#91cf60"),
locations = cells_body(
columns = vars(fy20growth),
rows = fy20growth >= .041)) %>%
tab_style(style = cell_fill(color = "#ffffbf"),
locations = cells_body(
columns = vars(fy20growth),
rows = fy20growth <.0411)) %>%
tab_style(style = cell_fill(color = "#fc8d59"),
locations = cells_body(
columns = vars(fy20growth),
rows = fy20growth <0))
fy_growth_table
#NN column
NN_viz<-NN %>%
ggplot(aes(x=reorder(agency_lookback,-val),
y=val,
fill=agency_lookback))+
geom_bar(stat="identity")+
scale_fill_manual(values=c(usaid_lightblue,usaid_blue))+
scale_y_continuous(labels=comma)+
si_style_yline()+
theme(axis.title.y = element_blank(),
axis.title.x = element_blank(),
legend.position = "none")
print(NN_viz)
ggsave(here("Quarterly Reviews","FY20Q4_NN.png"),
width=3, height=4, dpi=300, units="in")
|
69a8a34eb5b167235f2ef5a05c3c313d3b900e04
|
a1355acdd7419f9d0a844cf2524d68b9af0af193
|
/复杂点的图/子弹图.R
|
850379058860004c516bd8b3187920b9fd67b67a
|
[] |
no_license
|
ShydowLi/data-view--with-R
|
d9ededa91f167e5abb70234a8e6b53273db5cb66
|
41148b22b57eb376a1c19179c55d08f4bafa7d01
|
refs/heads/master
| 2020-04-09T01:26:11.890871
| 2018-12-01T03:13:41
| 2018-12-01T03:13:41
| 159,904,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 885
|
r
|
子弹图.R
|
#子弹图(绩效与目标关系)
library(ggplot2)
library(reshape2)
library(ggmap)
library(dplyr)
library(tidyr)
KPI<-c("KPI1","KPI2","KPI3","KPI4","KPI5")
INDEX<-1:5
good<-rep(0.2,5)
excellent<-good
pass<-rep(0.6,5)
target<-c(0.84,0.85,0.7,0.92,0.78)
fact<-c(0.91,0.8,0.68,0.91,0.8)
mydata<-data.frame(KPI,INDEX,excellent,good,pass,target,fact)
dat<-gather(mydata,perform,scope,-KPI,-target,-INDEX,-fact)
color<-c("#43546C","#8695B2","#D9DBDF")
p<-ggplot()+geom_bar(data=dat,aes(KPI,scope,fill=perform),stat = 'identity',position = 'stack',width = 0.7)+ylim(-0.15,1.2)
p1<-p+geom_linerange(data=dat,aes(x=KPI,ymin=0,ymax=fact),col="#000002",size=5)+scale_fill_manual(values=sort(color,decreasing=T) )
p2<-p1+geom_text(data = mydata,aes(x=KPI,y=fact+0.5),label=fact)+theme(
legend.direction="horizontal",
legend.position=c(0.5,.88),
legend.text=element_text(size=12)
)
|
abfe62d054d482254daac2ba3db87bf1163d2ba3
|
1579b8ae1811c912c4cb698a29ba149014819bff
|
/10_extreme_year_analyses.R
|
8ef98c7694bcf72017c6c7ff9cdb4f601ea84ef8
|
[] |
no_license
|
alexanderm10/multiple_limiting_factors_ch2
|
10b0dde79927a1e5a520a384d8c5a626b5603002
|
3e1a748869d509eef552e189b4e18d9e751b4587
|
refs/heads/master
| 2021-01-17T06:49:40.662734
| 2017-01-10T18:33:35
| 2017-01-10T18:33:35
| 53,061,297
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,347
|
r
|
10_extreme_year_analyses.R
|
library(ggplot2)
require(mgcv)
require(lsmeans)
require(car)
require(moments)
# Running ANOVA between Extreme years and normal years BAI for Canopy.Class
load("processed_data/gam2_weights_processed.Rdata")
summary(gam2.weights)
# Running ANOVA's to determine differences between ambient years and extreme years
# Using a lme here to take into account the differences between sites as random effeects
summary(gam2.weights)
# Truncating to 1950 becasue that's the extent of the climate analysis
gam2.weights <- gam2.weights[gam2.weights$Year >=1950,]
# Need to log transform the data to meet assumptions for the ANOVA test
head(gam2.weights)
gam2.temp.anova <- aov(log(BA.inc) ~ TreeID + Site + PlotID + group +Temp.Mark*Canopy.Class, data=gam2.weights)
# anova(gam2.lme.temp)
summary(gam2.temp.anova)
TukeyHSD(gam2.temp.anova)
# lsmeans(gam2.lme.temp, pairwise~Temp.Mark*Canopy.Class, adjust="tukey")
hist(resid(gam2.temp.anova))
plot(resid(gam2.temp.anova)~predict(gam2.temp.anova))
skewness(log(gam2.weights$BA.inc))
kurtosis(log(gam2.weights$BA.inc))
gam2.precip.anova <- aov(log(BA.inc) ~ TreeID + Site + PlotID + group + Precip.Mark*Canopy.Class, data=gam2.weights)
anova(gam2.lme.precip)
summary(gam2.precip.anova)
TukeyHSD(gam2.precip.anova)
lsmeans(gam2.lme.precip, pairwise~Precip.Mark*Canopy.Class, adjust="tukey")
hist(resid(gam2.lme.precip))
plot(resid(gam2.lme.precip)~predict(gam2.lme.precip))
# Recoding the ambient in temp and precip to be unique so that maybe it will plot prettier
gam2.weights$Temp.Mark <- recode(gam2.weights$Temp.Mark, "'A'='1A-Temp';'cold'='3Cool';'hot'='2Hot'")
gam2.weights$Precip.Mark <- recode(gam2.weights$Precip.Mark, "'A'='4A-Precip';'dry'='5Dry';'wet'='6Wet'")
# Reordering factors
# gam2.weights$Temp.Mark <- factor(gam2.weights$Temp.Mark, levels=c("A-Temp", "Hot", "Cool"))
# gam2.weights$Precip.Mark <- factor(gam2.weights$Precip.Mark, levels=c("A-Precip", "Dry", "Wet"))
# summary(gam2.weights)
gam2.weights$State <- factor(gam2.weights$State, levels=c("MO", "IN", "OH", "MA", "ME"))
# using boxplots to show the differences
ggplot(data=gam2.weights) + facet_grid(Canopy.Class~State) +
geom_boxplot(aes(x=Temp.Mark, y=log(BA.inc))) +
#geom_boxplot(aes(x=Precip.Mark, y=log(BA.inc)))
scale_y_continuous(limits=c(-2.5,5), expand=c(0,0))
hist(log(gam2.weights$BA.inc), breaks=100)
|
9778b917d00e2ebab8b83e3e32fabe38b09e6abd
|
8315a8d8e3a026fa88a9a2554b7a8353eb485f79
|
/Lab3-Parte2/regressaologistica.R
|
699ca6617c53ecdec415259c036425f8ffa2832f
|
[] |
no_license
|
brunabarbosa/AD2
|
8432e57d645c13ae9193a983ce8b08f98f23e615
|
d48c5a58fbb09be94947b0def2dd2abb0d7932e5
|
refs/heads/master
| 2020-06-17T20:10:01.064567
| 2017-03-30T03:01:46
| 2017-03-30T03:01:46
| 74,972,998
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,547
|
r
|
regressaologistica.R
|
library(ISLR)
library(readr)
library(caret)
library(dplyr)
library(reshape2)
library(pROC)
setwd("~/AD2/Lab3-Parte2")
treino_classificacao_v2 <- read_csv("treino_classificacao_v2.csv")
##reshape table
classificacao.clean <- treino_classificacao_v2 %>%
filter(!is.na(MAT_MEDIA_FINAL))
classificacao.clean$CREDITOS <- 4
classificacao.cra <- classificacao.clean %>%
group_by(MAT_ALU_MATRICULA, EVADIU) %>%
mutate(cra.contrib = MAT_MEDIA_FINAL*CREDITOS) %>%
summarise(cra = sum(cra.contrib)/sum(CREDITOS))
classificacao.model.input <- classificacao.clean %>%
group_by(MAT_ALU_MATRICULA,disciplina) %>%
filter(MAT_MEDIA_FINAL == max(MAT_MEDIA_FINAL)) %>%
ungroup() %>%
select(MAT_ALU_MATRICULA,disciplina,MAT_MEDIA_FINAL) %>%
mutate(disciplina = as.factor(gsub(" ",".",disciplina))) %>%
dcast(MAT_ALU_MATRICULA ~ disciplina, mean) %>%
merge(classificacao.cra)
split <- createDataPartition(y=classificacao.model.input$EVADIU, p = 0.75, list = FALSE)
train <- classificacao.model.input[split,]
test <- classificacao.model.input[-split,]
train.clean <- train %>%
filter(!is.na(Álgebra.Vetorial.e.Geometria.Analítica)) %>%
filter(!is.na(Cálculo.Diferencial.e.Integral.I)) %>%
filter(!is.na(Introdução.à.Computação)) %>%
filter(!is.na(Laboratório.de.Programação.I)) %>%
filter(!is.na(Programação.I)) %>%
filter(!is.na(Leitura.e.Produção.de.Textos))
test.clean <- test %>%
filter(!is.na(Álgebra.Vetorial.e.Geometria.Analítica)) %>%
filter(!is.na(Cálculo.Diferencial.e.Integral.I)) %>%
filter(!is.na(Introdução.à.Computação)) %>%
filter(!is.na(Laboratório.de.Programação.I)) %>%
filter(!is.na(Programação.I)) %>%
filter(!is.na(Leitura.e.Produção.de.Textos))
train.clean$EVADIU <- as.factor(train.clean$EVADIU)
train.clean$MAT_ALU_MATRICULA <- as.factor(train.clean$MAT_ALU_MATRICULA)
is.factor(train.clean$EVADIU)
is.factor(train.clean$MAT_ALU_MATRICULA)
model <- glm(EVADIU ~ Cálculo.Diferencial.e.Integral.I
+ Introdução.à.Computação
+ Laboratório.de.Programação.I
+ Leitura.e.Produção.de.Textos
+ Programação.I
+ Álgebra.Vetorial.e.Geometria.Analítica,
family=binomial, data=train)
fitted.results <- predict(model,newdata=test.clean,type='response')
fitted.results <- ifelse(fitted.results > 0.5,1,0)
fitted.results
misClasificError <- mean(fitted.results != test.clean$EVADIU)
print(paste('Accuracy',1-misClasificError))
roc.curve(test.clean$EVADIU, fitted.results)
|
89947255dfc65c9222360897d1fd2172898476bc
|
6ce9e9dad626bf0f92ad2295d75f46923e7041e9
|
/R/optimize_function.R
|
4226db4d6061abcf4cf76fc28a121b7f9874af29
|
[] |
no_license
|
smdrozdov/saddle_free_newton
|
da6af09e3ddbfcdaa649078a0d29fd8fe2da1318
|
4513ada03dd9e74dbfa772890d193bf16b644efa
|
refs/heads/master
| 2020-04-13T15:37:04.764281
| 2019-02-14T19:49:13
| 2019-02-14T19:49:13
| 163,297,255
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,893
|
r
|
optimize_function.R
|
# Implementation of Ganguli-Bengio method of non-convex function optimzation.
# Link to article: https://arxiv.org/abs/1406.2572
# Optimization methods available: GD - Gradient Descent. Here mostly for comparison.
# SFN - Saddle-free Newton. Escapes saddle points, takes O(d^3),
# where d is domain dimension.
# ASFN - Approximate Saddle-free Newton. Escapes saddle points,
# takes O(d), where d is domain dimension.
# P0 TODO(smdrozdov): Port to Python.
# P1 TODO(smdrozdov): Test Krylov subspace with Direction curvature.
# TODO(smdrozdov): Krylov subspace unstable if gradient is exactly zero.
# TODO(smdrozdov): Switch from point.container to function.container.
# TODO(smdrozdov): Add previous delta to Krylov.
# TODO(smdrozdov): Optimize in the subspace.
OptimizeFunction <- function(L,
max.steps,
input.dimension,
epsilon,
epsilon.stop,
learning.rate,
EdgeDistance,
optimization.method,
k,
m){
# Calculates global minimum of function defined in euclidian space.
# Args:
# L: function to be optimized, takes vector as input.
# max.steps: amount of steps, set to 500 by default.
# input.dimension: dimension of L input.
# epsilon: size of step.
# epsilon.stop: stop in this case.
# learning.rate: 0.1 by default.
# EdgeDistance: distance from given point to the domain boundary.
# optimization.mehtod: GD, SFN or ASFN.
# k: Krylov subspace dimension in ASFN.
p <- numeric(input.dimension)
if (optimization.method == "GD"){
# Gradient descent.
shifts <- {}
for (step in 1:max.steps){
p <- as.vector(p)
point.container <- pointContainer(p = p,
L = L,
input.dimension = input.dimension,
epsilon = epsilon,
EdgeDistance = EdgeDistance)
delta <- -t(NumericGradient(point.container))
p <- p + delta * learning.rate
shifts <- c(sqrt(sum(delta * delta)), shifts)
s <- sum(shifts[1:10])
if (!is.na(s) && s < epsilon.stop) {
break
}
}
} else if (optimization.method == "SFN"){
# Saddle-free Newton.
shifts <- {}
for (step in 1:max.steps){
p <- as.vector(p)
point.container <- pointContainer(p = p,
L = L,
input.dimension = input.dimension,
epsilon = epsilon,
EdgeDistance = EdgeDistance)
epsilon.shift <- min(point.container$epsilon, point.container$EdgeDistance(point.container$p) / exp(1))
gradient <- NumericGradient(point.container)
hessian <- NumericHessian(point.container)
# Compute hessian absolute value, |H| in Ganguli's notation.
ev <- eigen(hessian)
vectors <- ev$vectors
values <- ev$values
hessian.pos <- vectors %*% diag(abs(values)) %*% t(vectors)
delta <- - gradient %*% solve(hessian.pos)
p <- p + delta * learning.rate
shifts <- c(sqrt(sum(delta * delta)), shifts)
s <- sum(shifts[1:10])
if (!is.na(s) && s < epsilon.stop) {
break
}
}
} else if (optimization.method == "ASFN"){
# Approximate Saddle-free Newton.
shifts <- {}
one <- function(v){ return (1)}
for (step in 1:max.steps){
p <- as.vector(p)
point.container <- pointContainer(p = p,
L = L,
input.dimension = input.dimension,
epsilon = epsilon,
EdgeDistance = EdgeDistance)
gradient <- NumericGradient(point.container)
krylov.subspace <- KrylovSubspace(point.container, k)
V <- krylov.subspace$subspace
V.multiplied.by.hessian <- krylov.subspace$subspace.multiplied.by.hessian
hessian.subspace <- V.multiplied.by.hessian %*% t(V)
# Compute hessian absolute value.
ev <- eigen(hessian.subspace)
vectors <- ev$vectors
values <- ev$values
hessian.subspace.pos <- vectors %*% diag(abs(values)) %*% t(vectors)
delta <- - t(V %*% gradient) %*% solve(hessian.subspace.pos) %*% V
p <- p + delta * learning.rate
shifts <- c(sqrt(sum(delta * delta)), shifts)
s <- sum(shifts[1:10])
if (!is.na(s) && s < epsilon.stop) {
break
}
}
}
return(p)
}
|
01ed6451416db0224afc77c1d970edbc0b93c464
|
8cccb9e03969141f6618c2137ded8318ee3f6082
|
/PenFin_pensionSimInputs.R
|
4f7a9292b97809f3764e25a2da746af4a8a4c03d
|
[] |
no_license
|
yaning19/PenSimMacro_Data
|
54a300398e62707756ee4786f33edf4595faf9c9
|
f2043b338d5f71f932af7ffadbf6589d4f23d577
|
refs/heads/master
| 2022-04-03T05:02:38.089695
| 2020-02-10T14:19:40
| 2020-02-10T14:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,442
|
r
|
PenFin_pensionSimInputs.R
|
# This script is for modeling tax revenue of stylized governments
#**********************************************************************
# Packages ####
#**********************************************************************
library(tidyverse)
library(broom)
library(readxl)
library(magrittr)
library(ggrepel)
library(stringr)
library(forcats)
library(grid)
library(gridExtra)
library(scales)
library(knitr)
library(xlsx)
# packages for econometric and time series modeling
library(plm)
library(astsa) # companion package
library(TSA) # companion package; arimax: flexible transfer function model
library(tseries) #
library(forecast) # Arima
library(MSwM)
library(TTR)
library(dynlm)
library(broom)
#library(MSBVAR)
# packages for ts
library(zoo)
library(xts)
library(timetk)
library(tidyquant)
library(lubridate)
library(feather)
library(psych) # describe
options(tibble.print_max = 60, tibble.print_min = 60)
# check tidyquant, timetk, sweep (broom ), tibbletime
# Intro to zoo cran.r-project.org/web/packages/zoo/vignettes/zoo-quickref.pdf
# sweep: http://www.business-science.io/code-tools/2017/07/09/sweep-0-1-0.html
#**********************************************************************
# Global settings and tools ####
#**********************************************************************
dir_data_raw <- "data_raw/"
dir_data_out <- "data_out/"
dir_fig_out <- "outputs_report/"
# Need to make sure "Model_Main" is in branch "penSimMacro"
dir_penSim <- "C:/Git/PenSim-Projects/Model_Main/"
# NBER recession periods, post-WWII
recessionPeriods <-
matrix(c(
1953+2/4, 1954+2/4,
1957+3/4, 1958+2/4,
1960+2/4, 1961+1/4,
1969+4/4, 1970+4/4,
1973+4/4, 1975+1/4,
1980+1/4, 1980+3/4,
1981+3/4, 1982+4/4,
1990+3/4, 1991+1/4,
2001+1/4, 2001+4/4,
2007+4/4, 2009+2/4
) , ncol = 2, byrow = T) %>%
as.data.frame() %>%
rename(peak = V1,
trough = V2) %>%
mutate(peak = peak - 1/4,
trough = trough - 1/4)
get_logReturn <- function(x){
if(any(x <= 0, na.rm = TRUE)) stop("Nagative value(s)")
log(x/lag(x))
}
# RIG colors and theme
RIG.blue <- "#003598"
RIG.red <- "#A50021"
RIG.green <- "#009900"
RIG.yellow <- "#FFFF66"
RIG.purple <- "#9966FF"
RIG.yellow.dark <- "#ffc829"
RIG.orange <- "#fc9272"
demo.color6 <- c(RIG.red,
RIG.orange,
RIG.purple,
RIG.green ,
RIG.blue,
RIG.yellow.dark)
RIG.theme <- function() {
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.y = element_line(size = 0.5, color = "gray80"),
plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0, size = 9)
)
}
RIG.themeLite <- function() {
theme(
plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
plot.caption = element_text(hjust = 0, size = 9)
)
}
get_geoReturn <- function(x) prod(1 + x)^(1/length(x)) - 1
#**********************************************************************
# Outline ####
#**********************************************************************
# Goals:
# Path of state tax revenue derived from simulated GDP, stock returns, and estimated elasticities.
# What to have in the results
# 1. a deterministic simulation with constant GDP growth and stock return
# 2. A single stochastic simulation, how different stylized governments respond to GDP and stocks differently
# 3. A scenario, if hard to find a stochastic simulation that makes sense, construct a scenario that is similar to history.
# 4. Distribution of 2000 simulations (quanitles)
# 5. Risk measures: probability of sharp decline in tax revenue, and difference stylized governments.
# Calculating growth rates of tax revenues
# Trend growth rates:
# - Trend GDP growth rates (real)
# - CBO projection: 1.9%
# - Need to determined mean (and SD) in recession and expansion. (currently we may want to use SD estimated using historical data)
# - Trend stock returns (real)
# - Trend real return = Assumed overall nominal mean stock return (expansion and recession) - assumed inflation
# - Issue:
# 1. Estimated mean stock return is different from projected mean capital return. When using projected mean, how should the SD be adjusted?
# 2. With a target overall mean stock return (and SD), how to determine mean return (and SD) in recession and expansion periods.
# - Trend growth of taxes:
# - Use assumed trend growth rates
# - To ensure that the share of each major tax category stays stable over the long run,
# we assume that the trend growth rates of all tax categories equal the trend GDP growth rates (CBO projection: 1.9%)
# Cyclical growth rates:
# - Cyclical GDP growth rate: (real)
# - Cyclical GDP growth = simulated real rate - assumed overall trend
# - Potential issue: Growth will be above trend in expansion periods. (Since recessions are not very often, overall trend should be close to rate in expansion)
# - Issue to think about: do we need use a different trend for each single simulation?
# - Cyclical Stock returns (real)
# - Cyclical real stock return = simulated real rate - assumed overall trend
# - Cyclical tax growth
# - cyclical tax growth = e_GDP*GDP_cycle + e_stock*stock_cycle + recession adjustment
# Converting to nominal rates:
# - adding assumed inflation rates to simulated real rates.
# - Need to check whether the nominal numbers make sense, especially the asset returns (will be used in pension finance model)
#**********************************************************************
# Importing simulations of GDP growth and stock return ####
#**********************************************************************
# Notes
# 1. Simulations are generated by Model_simulation(3).R
# 2. What inputs to include:
# - simulated path of real GDP growth
# - Recession and expansion periods in each simulation
# - simulated path stock return
# - simulated path of bond return
# Assumptions:
infl_hist <- 0.022 #(CBO, GDP price index: 1987-2016)
infl_proj <- 0.02 #(CBO, GDP price index: 2017-2047)
infl <- infl_proj
# Loading simulation outputs:
# load("policyBrief_out/sim_results_historical.RData")
# sim_results <- sim_results_historical
load(paste0(dir_data_out, "MacroModel_sim_results_forward.RData"))
#sim_results <- sim_results_forward
# dfs to use:
# df_sim_gdp_y
# df_sim_stockreturn_y
# df_sim_bondreturn_y
df_sim <-
sim_results$df_sim_gdp_regimes_y %>%
left_join(sim_results$df_sim_gdp_y %>% rename(gdp_chg = return_y)) %>%
left_join(sim_results$df_sim_stockreturn_y %>% rename(stockreturn = return_y)) %>%
left_join(sim_results$df_sim_bondreturn_y %>% rename(bondreturn = return_y)) %>%
ungroup() %>%
mutate(sim = str_extract(sim, "\\d+") %>% as.numeric) %>%
mutate(stockreturn_real = stockreturn - infl,
bondreturn_real = bondreturn - infl,
recessionYear = recession_nqtr != 0 )
df_sim %>% head
# Calculating geometric mean of GDP growth and stock return
sim_geoMeans <-
df_sim %>%
group_by(sim) %>%
summarise(gdp_chg_geoMean = get_geoReturn(gdp_chg),
stockReturn_real_geoMean = get_geoReturn(stockreturn_real),
bondReturn_real_geoMean = get_geoReturn(bondreturn_real))
trend_growth_gdp <- sim_geoMeans$gdp_chg_geoMean %>% mean
trend_growth_stock_real <- sim_geoMeans$stockReturn_real_geoMean %>% mean
trend_growth_bond_real <- sim_geoMeans$bondReturn_real_geoMean %>% mean
trend_growth_gdp
trend_growth_stock_real
trend_growth_bond_real
df_sim$stockreturn %>% mean
head(df_sim)
stock_pct <- 0.7
bond_pct <- 0.3
penSimInputs_returns <-
df_sim %>%
select(sim, year, stockreturn, bondreturn) %>%
mutate(return70_30_nom = stockreturn * stock_pct + bondreturn * bond_pct)
save(penSimInputs_returns, file = paste0(dir_data_out, "penSimInputs_returns.RData"))
save(penSimInputs_returns, file = paste0(dir_penSim, "IO_penSimMacro/penSimInputs_returns.RData"))
# source(paste0(dir_penSim, "Model_RunControl.R"))
|
8830380c9ae1cf730446c29a7610ddec9ea65481
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/Rfast/man/colShuffle.Rd
|
fcff0ea0c7a8deb7690086c1345fd1964cb768b4
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,013
|
rd
|
colShuffle.Rd
|
\name{Column and row-wise Shuffle}
\alias{colShuffle}
\alias{rowShuffle}
\title{
Column and row-wise Shuffle
}
\description{
Column and row-wise shuffle of a matrix.
}
\usage{
colShuffle(x)
rowShuffle(x)
}
\arguments{
\item{x}{
A matrix with the data.
}
}
\details{
The functions is written in C++ in order to be as fast as possible.
}
\value{
A vector with the column/row Shuffle.
}
%\references{
%Tsagris M.T., Preston S. and Wood A.T.A. (2011). A data-based power transformation for compositional data. In Proceedings of the 4th Compositional Data Analysis Workshop, Girona, Spain.
%}
\author{
R implementation and documentation: Manos Papadakis <papadakm95@gmail.com>.
}
\seealso{
\code{\link{Median}, \link{colVars}, \link{colMeans} (buit-in R function)
}
}
\examples{
x <- matrix( rnorm(100 * 100), ncol = 100 )
system.time( colShuffle(x) )
system.time( rowShuffle(x) )
x<-NULL
}
\keyword{ Column-wise Shuffle }
\keyword{ Row-wise Shuffle }
|
f61384d08e5091c9f71f63ccb7f007749c54630e
|
c04ba0c23c254f71ad9a9d48e2ee5528c75c3dd4
|
/R/EM_noDE.R
|
27db50d14a49b6de1d42a892c956196ae0f736f4
|
[] |
no_license
|
cz-ye/DECENT
|
121cb865635965ccee27efc21cd15e87925eb12b
|
2abd887ad749897ecf3500676ac7cc6a3b2a04d1
|
refs/heads/master
| 2023-01-04T00:40:50.476513
| 2023-01-03T00:33:26
| 2023-01-03T00:33:26
| 112,068,191
| 14
| 4
| null | 2023-01-03T00:33:27
| 2017-11-26T08:45:11
|
R
|
UTF-8
|
R
| false
| false
| 15,396
|
r
|
EM_noDE.R
|
#' Fitting the unrestricted model with EM algorithm
#'
#' Fit the DECENT model assuming no differentially-expressed (DE) genes
#'
#' @param data.obs Observed count matrix for endogeneous genes, rows represent genes, columns represent cells.
#' @param spike Observed count matrix for spike-ins, rows represent spike-ins, columns represent cells. Only needed if spikes = \code{TRUE}).
#' @param spike.conc A vector of theoretical count for each spike-in in one cell (Only needed if spikes = \code{TRUE}).
#' @param use.spikes If \code{TRUE}, use spike-ins to estimate capture efficiencies.
#' @param CE.range A two-element vector of the lower limit and upper limit for the estimated range of
#' capture efficiencies (ONLY needed if spikes = \code{FALSE}, default [0.02, 0.10]).
#' @param tau.init initial estimates (intcp,slope) that link Beta-Binomial dispersion parameter to the mean expression.
#' @param tau.global whether to use the same tau parameters across cell. Default TRUE
#' @param tau.est Methods to estimate tau parameters. The default 'endo' corresponds to using endogeneous genes. Other options
#' are 'none' which means tau.init is not further estimated and 'spikes' corresponds to using spike-ins.
#' @param normalize Method for estimating size factors, either 'ML' (maximum likelihood, Ye et al., 2017) or 'TMM' (Robinson et al., 2010).
#' @param GQ.approx If \code{TRUE}, use Gaussian-Quadrature approximation to speed up E-step.
#' @param maxit maximum number of iterations for EM algorithm.
#' @param parallel If \code{TRUE}, run DECENT in parallel.
#'
#' @return A list containing estimates of DE model
#' @examples
#'
#' @import MASS
#' @import ZIM
#' @import statmod
#' @import edgeR
#'
#' @export
fitNoDE <- function(data.obs, spikes, spike.conc, use.spikes, CE.range, tau.init, tau.global, tau.est,
normalize, GQ.approx, maxit, parallel)
{
ncell <- ncol(data.obs)
ngene <- nrow(data.obs)
cell.type <- rep(1, ncell)
cell.type.names <- NULL
ncelltype <- length(unique(cell.type))
XW <- as.matrix(model.matrix(~cell.type)[,1])
# Get capture efficiency. Calculate with spike-ins, if available;
# If not, randomize and sort by libsize.
if (use.spikes) {
capeff.spike <- apply(spikes, 2, sum)/sum(spike.conc)
DO.coef <- matrix(0, ncell, 2)
DO.coef[,1] <- log(capeff.spike/(1-capeff.spike))
CE <- capeff.spike
} else {
obs.ls <- log10(colSums(data.obs))
max.ls <- max(obs.ls)
min.ls <- min(obs.ls)
# generate rand.CE within CE.range but following the dist of obs.ls closely
ls.wt <- (obs.ls-min.ls)/(max.ls-min.ls)
rand.CE <- (1-ls.wt)*CE.range[1] + ls.wt*CE.range[2]
CE <- rand.CE
DO.coef <- matrix(0, ncell, 2)
DO.coef[, 1] <- log(CE/(1-CE))
}
# Initialize size factor
data.obs.adj <- sweep(data.obs,2,CE,'/')
est.sf <- apply(data.obs.adj, 2, mean, trim = 0.025)
est.sf <- est.sf/mean(est.sf)
# Initialize other ZINB parameters
est.disp <- rbeta(ngene, 0.1 ,0.6)
est.pi0 <- matrix(0, ngene, ncelltype)
# start with small pi0 (close to zero)
est.pi0[, 1] <- rbeta(ngene, 1,9)
if(ncelltype>1) {
for (K in 2:ncelltype) {
est.pi0[, K] <- est.pi0[, 1]
}
}
est.mu <- matrix(0, ngene, ncelltype)
# start with est.mu close to method of moments estimate
est.mu[, 1] <- rowMeans( sweep(data.obs.adj,2, est.sf, '/' ))/(1-est.pi0[,1])
#....new block of codes, june 7 2019
# use MoM to get a reasonable starting value
dlogZINB <- function(p,y,sf) {
pi0 <- 1/(1+exp(-p[1]))
mu <- exp(p[2])
size<- exp(-p[3])
-sum(ZIM::dzinb(y,omega=pi0,lambda=sf*mu,k=size,log=TRUE))
}
print('Finding starting values for EM algorithm...')
if (parallel) {
temp <- foreach (i = 1:ngene, .combine = 'rbind', .packages = c('ZIM', 'DECENT')) %dopar% {
out <- tryCatch(optim(par = c(0,log(mean(data.obs.adj[i,], na.rm=T)),0),
fn = dlogZINB, y = data.obs[i, ], sf = est.sf*CE,lower=-30),
error = function(e) {
list(p = c(0,log(mean(data.obs.adj[i,], na.rm=T)),0))
})
new.pi0 <- rep(1/(1 + exp(-out$p[1])), ncelltype)
new.mu <- exp(out$p[2])
new.disp <- exp(out$p[length(out$p)])
return(c(new.pi0, new.mu, new.disp))
}
est.pi0 <- as.matrix(temp[, 1:ncelltype])
est.mu <- as.matrix(temp[, (ncelltype+1):(2*ncelltype)])
est.disp <- temp[, 2*ncelltype+1]
} else {
for (i in 1:ngene) {
out <- tryCatch(optim(par = c(0,log(mean(data.obs.adj[i,], na.rm=T)),0),
fn = dlogZINB, y = data.obs[i, ], sf = est.sf*CE,lower=-30),
error = function(e) {
list(p = c(0,log(mean(data.obs.adj[i,], na.rm=T)),0))
})
est.pi0[i, ] <- rep(1/(1 + exp(-out$p[1])), ncelltype)
est.mu[i, ] <- exp(out$p[2])
est.disp[i] <- exp(out$p[length(out$p)])
}
}
#...end of new block
# Initialize other variables
loglik.vec <- rep(0, maxit)
data.imp <- data.obs
PE <- matrix(0, ngene, ncell)
if (tau.global) {
tau0 <- tau.init[1]; tau1 <- tau.init[2]
tau.old <- c(tau0, tau1)
} else{
if (is.null(dim(tau.init))) {
tau0 <- rep(tau.init[1],ncell)
tau1 <- rep(tau.init[2],ncell)
} else {
if(dim(tau.init) != c(ncell, 2)) {
stop('tau.init must be either a vector of 2 or a matrix with shape (#cells, 2)')
}
tau0 <- tau.init[, 1]; tau1 <- tau.init[, 2]
}
tau.old <- cbind(tau0,tau1)
}
tau.conv<- FALSE
if (tau.est == 'spikes') {
if (tau.global) {
set.seed(1)
y.sim <- t(sapply(1:nrow(spikes), function(i) { rpois(ncell, spike.conc[i])}))
est.p <- optim(par = c(tau0, tau1), f = cbbinom.logl, z = spikes, y = y.sim, prob = CE, c = spike.conc)$p
tau0 <- est.p[1]; tau1 <- est.p[2]
} else {
est.p <- matrix(0, ncell, 2)
for (j in 1:ncell) {
set.seed(1)
y.sim <- t(sapply(1:nrow(spikes), function(i) rpois(10, spike.conc[i])))
est.p[j, ] <- optim(par = c(tau0[j], tau1[j]), f = cbbinom.logl, z = spikes[, j], y = y.sim,
prob = CE[j], c = spike.conc)$p
}
tau0 <- est.p[, 1]; tau1 <- est.p[, 2]
}
}
iter <- 1
converge <- FALSE
if (GQ.approx) gq <- gauss.quad(16, kind = 'legendre') else gq <- NULL
message('No-DE model fitting started at ', Sys.time())
# Begin EM algorithm
for (iter in 1:maxit) {
# E-step gene by gene
if (parallel) {
if (!GQ.approx) {
temp <- foreach (i = 1:ngene, .combine = 'rbind', .packages = c('DECENT')) %dopar% {
out <- EstepByGene(par = DO.coef, z = data.obs[i, ], sf = est.sf,
pi0 = est.pi0[i, cell.type], mu = est.mu[i, cell.type], disp = est.disp[i])
return(c(ifelse(is.na(out$EYZ0E1) | is.infinite(out$EYZ0E1),data.obs[i, ],out$EYZ0E1), 1 - out$PE0Z0))
}
} else {
temp <- foreach (i = 1:ngene, .combine = 'rbind', .packages = c('MASS','ZIM', 'DECENT')) %dopar% {
out <- Estep2ByGene(par = DO.coef,z = data.obs[i, ], sf = est.sf,
pi0 = est.pi0[i, cell.type], mu = est.mu[i, cell.type], disp = est.disp[i],
k = tau1, b = tau0, GQ.object = gq)
return(c(ifelse(is.na(out$EYZ0E1) | is.infinite(out$EYZ0E1),data.obs[i, ],out$EYZ0E1), 1 - out$PE0Z0))
}
}
data.imp <- temp[, 1:ncell]
PE <- temp[, (ncell+1):(2*ncell)]
} else {
if (!GQ.approx) {
for (i in 1:ngene) {
# use E-step with expected value evaluated using GQ integral
out <- EstepByGene(par = DO.coef, z = data.obs[i, ], sf = est.sf,
pi0 = est.pi0[i, cell.type], mu = est.mu[i, cell.type], disp = est.disp[i])
data.imp[i, ] <- ifelse(is.na(out$EYZ0E1) | is.infinite(out$EYZ0E1),data.obs[i, ],out$EYZ0E1)
PE[i, ]<- 1 - out$PE0Z0
}
} else {
for (i in 1:ngene) {
out <- Estep2ByGene(par = DO.coef,z = data.obs[i, ], sf = est.sf,
pi0 = est.pi0[i, cell.type], mu = est.mu[i, cell.type], disp = est.disp[i],
k = tau1, b = tau0, GQ.object = gq)
data.imp[i, ] <- ifelse(is.na(out$EYZ0E1) | is.infinite(out$EYZ0E1), data.obs[i, ], out$EYZ0E1)
PE[i, ] <- 1 - out$PE0Z0
}
}
}
# M-step 1: Update SF
data.imp <- as.matrix(data.imp)
data.imp2 <- data.imp*PE
# M-step 2: Estimate SF by maximum-likelihood
if (normalize == 'ML') {
for (i in 1:ncell) {
p0 <- est.pi0[, cell.type[i]] +
(1 - est.pi0[, cell.type[i]])*dnbinom(0, mu = est.sf[i]*est.mu[, cell.type[i]], size = 1/est.disp)
w <- ((p0 - est.pi0[, cell.type[i]]*(1-PE[, i]))*(1 - est.pi0[, cell.type[i]]))/p0
est.sf[i] <- sum(data.imp2[, i], na.rm=T)/sum(w, na.rm=T)
}
} else if (normalize == 'TMM') {
tmm <- calcNormFactors(ifelse(is.na(data.imp2) | is.infinite(data.imp2), data.obs.adj, data.imp2))
est.sf <- colSums(ifelse(is.na(data.imp2) | is.infinite(data.imp2), data.obs.adj, data.imp2))*tmm
} else {
stop('Normalization method should either be "ML" or "TMM"')
}
est.sf <- est.sf/mean(est.sf)
# M-step 3: Update pi_0, mu and phi, gene-by-gene
loglik <- rep(0, ngene)
if (parallel) {
temp <- foreach (i = 1:ngene, .combine = 'rbind', .packages = c('ZIM', 'DECENT')) %dopar% {
if (sum(data.imp[i, ])>sum(data.obs[i, ])) {
prop0 <- ifelse(est.pi0[i, 1] < 0.01,
0.025, ifelse(est.pi0[i, 1] > 0.99, 0.975, est.pi0[i,1]))
out <- tryCatch(optim(par = c(log(prop0/(1-prop0)), log(mean(data.imp[i, ], na.rm=T)), rep(0, ncelltype-1), -2),
fn = MstepNB, y = data.imp[i, ], sf = est.sf, status = PE[i, ], ct = cell.type,lower=-30),
#gr = zinbGrad, method = 'L-BFGS-B'),
error = function(e) {
list(p = c(log(prop0/(1-prop0)), log(mean(data.imp[i, ], na.rm=T)), rep(0, ncelltype-1), -2))
})
new.pi0 <- rep(1/(1 + exp(-out$p[1])), ncelltype)
new.mu <- exp(out$p[2])
new.disp <- exp(out$p[length(out$p)])
if(!GQ.approx){
new.loglik <- -loglI(p = out$p, sf = est.sf, ct = cell.type, DO.par = DO.coef, z = data.obs[i, ])
} else {
rho <- 1/(1+exp(-tau0-tau1*log(est.mu[i]*(1-est.pi0[i]))))
rho <- ifelse(rho<1e-05,1e-05,rho)
new.loglik <- -loglI.GQ(p=out$p, z=data.obs[i,], sf = est.sf, XW=XW, DO.par=DO.coef,rho=rho, GQ.object=gq)
}
return(c(new.pi0, new.mu, new.disp, new.loglik))
} else {
return(c(est.pi0[i, ], est.mu[i, ], est.disp[i], loglik[i]))
}
}
est.pi0 <- as.matrix(temp[, 1:ncelltype])
est.mu <- as.matrix(temp[, (ncelltype+1):(2*ncelltype)])
est.disp <- temp[, 2*ncelltype+1]
loglik <- temp[, 2*ncelltype+2]
} else {
for (i in 1:ngene) {
if (sum(data.imp[i, ])>sum(data.obs[i, ])) {
prop0 <- ifelse(est.pi0[i, 1] < 0.01,
0.025, ifelse(est.pi0[i, 1] > 0.99, 0.975, est.pi0[i,1]))
out <- tryCatch(optim(par = c(log(prop0/(1-prop0)), log(mean(data.imp[i, ], na.rm=T)), rep(0, ncelltype-1), -2),
fn = MstepNB, y = data.imp[i, ], sf = est.sf, status = PE[i, ], ct = cell.type,lower=-30),
#gr = zinbGrad, method = 'L-BFGS-B')
error = function(e) {
list(p = c(log(prop0/(1-prop0)), log(mean(data.imp[i, ], na.rm=T)), rep(0, ncelltype-1), -2))
})
est.pi0[i, ] <- rep(1/(1 + exp(-out$p[1])), ncelltype)
est.mu[i, ] <- exp(out$p[2])
est.disp[i] <- exp(out$p[length(out$p)])
if (!GQ.approx) {
loglik[i] <- -loglI(p = out$p, sf = est.sf, ct = cell.type, DO.par = DO.coef, z = data.obs[i, ])
} else {
loglik[i] <- -loglI2(p = out$p, sf = est.sf, ct = cell.type, DO.par = DO.coef, k = tau1, b = tau0,
z = data.obs[i, ], GQ.object = gq)
}
}
}
}
# update tau1 and tau0 when no-spikeins: NOTE this code is not parallelized yet and will only update (k,b) until (k,b) converges
if(!tau.conv & tau.est=='endo') {
if(tau.global) {
logit.rho <- foreach (i = 1:ngene, .combine = 'rbind', .packages = c('DECENT')) %dopar% {
out.rho <- optim(p=-2,fn=update.rho,x=data.obs[i,],sf=est.sf,size=mean(data.imp2[i,],na.rm=T),CE=CE,method='Brent',upper=10,lower=-10)
out.rho$p
}
data.rho <- data.frame(y=logit.rho,x=log(rowMeans(data.imp2,na.rm=T)),w=rowMeans(data.imp2,na.rm=T))
rho.model <- glm(y ~ x,data=data.rho)
# weighted est is better when number of genes is small
if(ngene<=5000)
rho.model <- glm(y ~ x,weights=w, data=data.rho)
tau.old <- c(tau0, tau1)
tau.new <- coef(rho.model)
tau0 <- tau.new[1] ; tau1 <- tau.new[2]
tau.reltol <- sum( abs(tau.old-tau.new)/abs(tau.old) )
tau.conv <- ifelse(tau.reltol< 1e-04, TRUE,FALSE)
} else {
tau.old <- cbind(tau0,tau1)
tau.new <- foreach (i = 1:ncell, .combine = 'rbind', .packages = c('DECENT')) %dopar% {
#print(i)
#size.bb <- rowMeans(data.imp2,na.rm=TRUE)*est.sf[i]
size.bb <- ifelse(!is.na(data.imp2[,i]) & is.finite(data.imp2[,i]),data.imp2[,i],data.obs.adj[,i])
out.tau <- optim(p=tau.old[i,],fn=update.rho3,z=data.obs[,i], size=size.bb,CE=rep(CE[i],ngene))
out.tau$p
}
tau0 <- tau.new[, 1]; tau1 <- tau.new[, 2]
tau.reltol <- apply( abs(tau.new-tau.old)/abs(tau.old), 2, mean)
tau.conv <- ifelse(any(tau.reltol > 1e-04), FALSE,TRUE)
}
}
loglik.vec[iter] <- sum(loglik)
message('EM iteration ', iter, ' finished at ', Sys.time(), ' Log-likelihood: ', loglik.vec[iter])
if (iter > 5) {
if ( (loglik.vec[iter] - loglik.vec[iter-1])/abs(loglik.vec[iter-1]) < 1e-03 | iter == maxit ) converge <- TRUE
}
if (converge) {
# NOT CALCULATING SE
break
}
} # end of EM loop
message('No-DE model fitting finished at ', Sys.time())
# Output
rownames(est.mu) <- rownames(data.obs)
rownames(est.pi0) <- rownames(data.obs)
names(est.disp) <- rownames(data.obs)
names(est.sf) <- colnames(data.obs)
if (!is.null(cell.type.names)){
colnames(est.mu) <- cell.type.names
colnames(est.pi0) <- cell.type.names
}
output <- list()
output[['est.pi0']] <- est.pi0
output[['est.mu']] <- est.mu
output[['est.disp']] <- est.disp
output[['est.sf']] <- est.sf
output[['CE']] <- CE
output[['loglik']] <- loglik.vec[1:iter]
output[['logl']] <- loglik
output[['GQ']] <- gq
if(tau.global) {
output[['tau1']] <- rep(tau1, ncell)
output[['tau0']] <- rep(tau0, ncell)
} else {
output[['tau1']] <- tau1
output[['tau0']] <- tau0
}
return(output)
}
|
efbefb3b51835e14dcb7552f77452e33a4e06cad
|
bc7cb0d6281727d4283b8635143cec8e1c864287
|
/R/enr.R
|
879c7198f3503db6e35fb305705084af70d1d24b
|
[] |
no_license
|
josue-rodriguez/GGMnonreg
|
e8cfb3e1db1c96c226e91705642227cf4a3ee7eb
|
33ecd010df57525411261a08005a4d2f946327d3
|
refs/heads/master
| 2021-01-02T00:30:37.231210
| 2020-01-27T20:45:52
| 2020-01-27T20:45:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,243
|
r
|
enr.R
|
#' Expected Network Replicability
#'
#' @param net true network
#' @param n samples size
#' @param replications number of networks
#' @param type pearson or spearman
#'
#' @return
#' @export
#'
#' @examples
#' # correlations
#' cors <- cor(GGMnonreg::ptsd)
#'
#' # inverse
#' inv <- solve(cors)
#'
#' # partials
#' pcors <- -cov2cor(inv)
#'
#' # set values to zero
#' pcors <- ifelse(abs(pcors) < 0.05, 0, pcors )
#'
#' enr(net = pcors, n = 500, replications = 2)
enr <- function(net, n, replications, type = "pearson"){
# variables
p <- ncol(net)
# control variables
c <- p - 2
# off diagonals
pcs <- net[upper.tri(net)]
# which are edges
which_nonzero <- which(pcs != 0)
# how many non zeroes
n_nonzero <- length(which_nonzero)
qs <- round( quantile(1:length(which_nonzero),
probs = seq(0, 0.9, 0.1)))
# probability of success
p_s <- power_z(pcs[which_nonzero],
n = n,
c = c,
type = type)^replications
# average power
ave_pwr <- mean(p_s)
#var
var_pwr <- sum((1 - p_s) * p_s)
# CDF greater
cdf <- 1 - poibin::ppoibin(qs, p_s)
returned_object <- list(ave_pwr = ave_pwr,
cdf = cdf,
p_s = p_s,
p = p,
n_nonzero = n_nonzero,
n = n,
replications = replications,
var_pwr = var_pwr,
type = type)
class(returned_object) <- "enr"
returned_object
}
#' Print Method for \code{enr} Objects
#'
#' @param x object of class \code{enr}
#' @param ... currently ignored
#'
#' @return
#' @export
print.enr <- function(x,...){
cat("GGMnonreg: Nonregularized GGMs \n")
cat("Method: Expected Network Replicability \n")
cat("Nodes:", x$p, "\n")
cat("Networks:", x$replications, "\n")
cat("Sample Size:", x$n, "\n")
cat("Number of Edges:", x$n_nonzero, "\n")
cat("----\n\n")
cat("Average Replicability:", round(x$ave_pwr, 2), "\n")
cat("Average Number of Edges:",
round(round(x$ave_pwr, 2) * x$n_nonzero),
paste0( "(SD = ", round(sqrt(x$var_pwr), 2), ")"), "\n\n")
cat("----\n\n")
cat("Cumulative Probability:" , "\n\n")
dat <- data.frame(prop = seq(0, .90, .10),
round(quantile(1:x$n_nonzero,
probs = seq(0, 0.9, 0.1))),
prob = round(x$cdf, 2))
colnames(dat) <- c("Proportion", "Edges", "Probability")
print(dat, row.names = F, right = T)
cat("----\n")
cat(paste0( "note: \nProbability that more edges than corresponding proportion \n",
"and number of edges are replicated \n"))
}
#' Summary Method for \code{enr} Objects
#'
#' @param object object of class \code{enr}
#' @param ... currently ignored
#'
#' @return
#' @export
summary.enr <- function(object,...){
print(object)
}
#' Plot \code{enr} Objects
#'
#' @param x Object of class \code{enr}
#' @param iter number of samples
#' @param fill fill color for density
#' @param alpha transparency
#' @param ... currently ignored
#'
#' @return
#' @export
#' @examples
#' # correlations
#' cors <- cor(GGMnonreg::ptsd)
#'
#' # inverse
#' inv <- solve(cors)
#'
#' # partials
#' pcors <- -cov2cor(inv)
#'
#' # set values to zero
#' pcors <- ifelse(abs(pcors) < 0.05, 0, pcors )
#'
#' est <- enr(net = pcors, n = 500, replications = 2)
#'
#' # plot
#' plot(est)
plot.enr <- function(x, iter = 100000,
fill = "#009E73",
alpha = 0.5, ...){
# random variable
y <- poibin::rpoibin(iter, pp = x$p_s)
# data frame
dat <- data.frame(y = y)
# plot
ggplot(dat, aes(x = y )) +
geom_vline(xintercept = mean(y),
linetype = "dotted") +
geom_density(adjust = 2,
fill = fill, alpha = alpha) +
scale_x_continuous(limits = c(min(y), max(y) ),
breaks = seq(min(y), max(y), length.out = 5),
labels = paste0(round(seq(min(y), max(y),
length.out = 5) / x$n_nonzero, 2)*100, "%")) +
xlab("Replicated Edges") +
scale_y_continuous(expand = c(0,0))
}
|
d85efece125b4f638de91199b0000e280e9760f7
|
2f59a3fae7e935fe8106efa13d89cd04fdbaf4fc
|
/ggplot2-2-variables-to-visuals.R
|
071474762e4cffe1bd1a744d8270a90f888b6866
|
[] |
no_license
|
jyuill/proj-datacamp-ggplot2
|
e88505ef8522a599149fa013e303f7a61f5ae4de
|
731827676d75edb25651dbe0ce4a88d1ab88e69e
|
refs/heads/master
| 2020-12-03T03:31:21.273574
| 2018-01-14T23:33:47
| 2018-01-14T23:33:47
| 58,614,209
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 427
|
r
|
ggplot2-2-variables-to-visuals.R
|
str(iris)
head(iris)
library(tidyr)
iris.tidy <- iris %>%
gather(key, Value, -Species ) %>%
separate(key, c("Part","Measure"),"\\.")
library(ggplot2)
ggplot(iris.tidy, aes(x = Species, y = Value, col = Part)) +
geom_jitter() +
facet_grid(. ~ Measure)
iris$Flower <- 1:nrow(iris)
iris.wide <- iris %>%
gather(key, value, -Species,-Flower) %>%
separate(key, c("Part","Measure"),"\\.") %>%
spread(Measure, value)
|
15a18a3dd7d81e3ae6982343e4dbfd6f6a7dca72
|
66a2afd9c0dab1d55e6d236f3d85bc1b61a11a66
|
/R/utils-xml.R
|
08b9d9b9901c7c40bd7093a265a3a7fc27d9af29
|
[
"MIT"
] |
permissive
|
StevenMMortimer/salesforcer
|
833b09465925fb3f1be8da3179e648d4009c69a9
|
a1e1e9cd0aa4e4fe99c7acd3fcde566076dac732
|
refs/heads/main
| 2023-07-23T16:39:15.632082
| 2022-03-02T15:52:59
| 2022-03-02T15:52:59
| 94,126,513
| 91
| 19
|
NOASSERTION
| 2023-07-14T05:19:53
| 2017-06-12T18:14:00
|
R
|
UTF-8
|
R
| false
| false
| 17,346
|
r
|
utils-xml.R
|
#' xmlToList2
#'
#' This function is an early and simple approach to converting an
#' XML node or document into a more typical R list containing the data values.
#' It differs from xmlToList by not including attributes at all in the output.
#'
#' @importFrom XML xmlApply xmlSApply xmlValue xmlAttrs xmlParse xmlSize xmlRoot
#' @param node the XML node or document to be converted to an R list
#' @return \code{list} parsed from the supplied node
#' @note This function is meant to be used internally. Only use when debugging.
#' @keywords internal
#' @export
xmlToList2 <- function(node){
if (is.character(node)) {
node <- xmlParse(node)
}
if (inherits(node, "XMLAbstractDocument")) {
node <- xmlRoot(node)
}
if (any(inherits(node, c("XMLTextNode", "XMLInternalTextNode")))) {
xmlValue(node)
} else if (xmlSize(node) == 0) {
x <- xmlAttrs(node)
if(length(names(x)) == 0){
NA
} else if(names(x) == "xsi:nil" & x == "true"){
NA
} else {
x
}
} else {
if (is.list(node)) {
tmp = vals = xmlSApply(node, xmlToList2)
tt = xmlSApply(node, inherits, c("XMLTextNode", "XMLInternalTextNode"))
}
else {
tmp = vals = xmlApply(node, xmlToList2)
tt = xmlSApply(node, inherits, c("XMLTextNode", "XMLInternalTextNode"))
}
vals[tt] = lapply(vals[tt], function(x) x[[1]])
if (any(tt) && length(vals) == 1) {
vals[[1]]
} else {
vals
}
}
}
#' xml_nodeset_to_df
#'
#' A function specifically for parsing an XML node into a \code{data.frame}
#'
#' @importFrom dplyr as_tibble
#' @importFrom purrr modify_if
#' @importFrom utils capture.output
#' @param this_node \code{xml_node}; to be parsed out
#' @return \code{tbl_df} parsed from the supplied XML
#' @note This function is meant to be used internally. Only use when debugging.
#' @keywords internal
#' @export
xml_nodeset_to_df <- function(this_node){
# capture any xmlToList grumblings about Namespace prefix
invisible(capture.output(node_vals <- xmlToList2(as.character(this_node))))
# replace any NULL list elements with NA so it can be turned into a tbl_df
node_vals[sapply(node_vals, is.null)] <- NA
# remove any duplicated named node elements
node_vals <- node_vals[unique(names(node_vals))]
# make things tidy so if it's a nested list then that is one row still
# suppressWarning about tibble::enframe
suppressWarnings(res <- as_tibble(modify_if(node_vals, ~(length(.x) > 1 | is.list(.x)), list),
.name_repair = "minimal"))
return(res)
}
#' Make SOAP XML Request Skeleton
#'
#' Create XML in preparation for sending to the SOAP API
#'
#' @importFrom XML newXMLNode xmlValue<-
#' @param soap_headers \code{list}; any number of SOAP headers
#' @param metadata_ns \code{logical}; an indicator of whether to use the namespaces
#' required by the Metadata API or the default ones.
#' @return \code{xmlNode}; an XML object containing just the header portion of the
#' request
#' @references \url{https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/soap_headers.htm}
#' @note This function is meant to be used internally. Only use when debugging.
#' Any of the following SOAP headers are allowed:
#' \itemize{
#' \item AllOrNoneHeader
#' \item AllowFieldTruncationHeader
#' \item AssignmentRuleHeader
#' \item CallOptions
#' \item DisableFeedTrackingHeader
#' \item EmailHeader
#' \item LimitInfoHeader
#' \item LocaleOptions
#' \item LoginScopeHeader
#' \item MruHeader
#' \item OwnerChangeOptions
#' \item PackageVersionHeader
#' \item QueryOptions
#' \item UserTerritoryDeleteHeader
#' }
#' Additionally, Bulk API can't access or query compound address or compound geolocation fields.
#' @references \url{https://developer.salesforce.com/docs/atlas.en-us.api_asynch.meta/api_asynch}
#' @keywords internal
#' @export
make_soap_xml_skeleton <- function(soap_headers=list(), metadata_ns=FALSE){
sf_auth_check()
if(metadata_ns){
these_ns = c("soapenv" = "http://schemas.xmlsoap.org/soap/envelope/",
"xsi" = "http://www.w3.org/2001/XMLSchema-instance",
"ns1" = "http://soap.sforce.com/2006/04/metadata")
ns_prefix <- "ns1"
} else {
these_ns <- c("soapenv" = "http://schemas.xmlsoap.org/soap/envelope/",
"xsi" = "http://www.w3.org/2001/XMLSchema-instance",
"urn" = "urn:partner.soap.sforce.com",
"urn1" = "urn:sobject.partner.soap.sforce.com")
ns_prefix <- "urn"
}
root <- newXMLNode("soapenv:Envelope", namespaceDefinitions = these_ns)
header_node <- newXMLNode("soapenv:Header", parent=root)
sheader_node <- newXMLNode(paste0(ns_prefix, ":", "SessionHeader"),
parent=header_node)
#namespaceDefinitions = c(""))
# get the current session id
this_session_id <- sf_access_token()
if(is.null(this_session_id)){
this_session_id <- sf_session_id()
}
if(is.null(this_session_id)){
stop("Could not find a session id in the environment. Try reauthenticating with sf_auth().")
}
sid_node <- newXMLNode(paste0(ns_prefix, ":", "sessionId"),
this_session_id,
parent=sheader_node)
if(length(soap_headers)>0){
for(i in 1:length(soap_headers)){
option_name <- names(soap_headers)[i]
opt_node <- newXMLNode(paste0(ns_prefix, ":", option_name), parent=header_node)
# process OwnerChangeOptions differently because it can be a list of multiple
# different options all put under the OwnerChangeOptions node
if (option_name == "OwnerChangeOptions"){
options_spec <- soap_headers[[i]]$options
for(j in 1:length(options_spec)){
this_node <- newXMLNode(paste0(ns_prefix, ":", "options"), parent=opt_node)
for(k in 1:length(options_spec[[j]])){
this_node2 <- newXMLNode(paste0(ns_prefix, ":", names(options_spec[[j]])[k]),
as.character(options_spec[[j]][[k]]),
parent=this_node)
}
}
} else {
for(j in 1:length(soap_headers[[i]])){
this_node <- newXMLNode(paste0(ns_prefix, ":", names(soap_headers[[i]])[j]),
as.character(soap_headers[[i]][[j]]),
parent=opt_node)
}
}
}
}
return(root)
}
#' Build XML Request Body
#'
#' Parse data into XML format
#'
#' @importFrom XML newXMLNode xmlValue<-
#' @param input_data a \code{data.frame} of data to fill the XML body
#' @template operation
#' @template object_name
#' @param fields \code{character}; one or more strings indicating the fields to
#' be returned on the records
#' @template external_id_fieldname
#' @param root_name \code{character}; the name of the root node if created
#' @param ns named vector; a collection of character strings indicating the namespace
#' definitions of the root node if created
#' @param root \code{xmlNode}; an XML node to be used as the root
#' @return \code{xmlNode}; an XML node with the complete XML built using the root
#' and the input data in the format needed for the operation.
#' @note This function is meant to be used internally. Only use when debugging.
#' @keywords internal
#' @export
build_soap_xml_from_list <- function(input_data,
operation = c("create", "retrieve",
"update", "upsert",
"delete", "undelete", "emptyRecycleBin",
"getDeleted", "getUpdated",
"search", "query", "queryMore",
"convertLead", "merge", "describeSObjects",
"setPassword", "resetPassword",
"findDuplicates", "findDuplicatesByIds"),
object_name = NULL,
fields = NULL,
external_id_fieldname = NULL,
root_name = NULL,
ns = character(0),
root = NULL){
# ensure that if root is NULL that root_name is not also NULL
# this is so we have something to create the root node
stopifnot(!is.null(root_name) | !is.null(root))
which_operation <- match.arg(operation)
input_data <- sf_input_data_validation(input_data, operation = which_operation)
if(is.null(root)){
root <- newXMLNode(root_name, namespaceDefinitions = ns)
}
body_node <- newXMLNode("soapenv:Body", parent = root)
operation_node <- newXMLNode(sprintf("urn:%s", which_operation), parent = body_node)
if(which_operation == "upsert"){
stopifnot(!is.null(external_id_fieldname))
external_field_node <- newXMLNode("urn:externalIDFieldName", external_id_fieldname,
parent = operation_node)
}
if(which_operation == "retrieve"){
stopifnot(!is.null(object_name))
stopifnot(!is.null(fields))
field_list_node <- newXMLNode("urn:fieldList", paste0(fields, collapse=","),
parent = operation_node)
sobject_type_node <- newXMLNode("urn:sObjectType", object_name,
parent = operation_node)
}
if(which_operation %in% c("getDeleted", "getUpdated")){
stopifnot(!is.null(object_name))
type_node <- newXMLNode("sObjectTypeEntityType", object_name, parent = operation_node)
this_node <- newXMLNode("startDate", input_data$start, parent = operation_node)
this_node <- newXMLNode("endDate", input_data$end, parent = operation_node)
} else if(which_operation %in% c("search", "query")){
element_name <- if(which_operation == "search") "urn:searchString" else "urn:queryString"
this_node <- newXMLNode(element_name, input_data[1,1],
parent = operation_node)
} else if(which_operation == "queryMore"){
this_node <- newXMLNode("urn:queryLocator", input_data[1,1],
parent = operation_node)
} else if(which_operation %in% c("delete", "undelete", "emptyRecycleBin",
"retrieve", "findDuplicatesByIds")){
for(i in 1:nrow(input_data)){
this_node <- newXMLNode("urn:ids", input_data[i,"Id"],
parent = operation_node)
}
} else if(which_operation == "merge"){
stopifnot(!is.null(object_name))
merge_request_node <- newXMLNode('mergeRequest',
attrs = c(`xsi:type`='MergeRequest'),
suppressNamespaceWarning = TRUE,
parent = operation_node)
master_record_node <- newXMLNode("masterRecord",
attrs = c(`xsi:type` = object_name),
suppressNamespaceWarning = TRUE,
parent = merge_request_node)
for(i in 1:length(input_data$master_fields)){
this_node <- newXMLNode(names(input_data$master_fields)[i], parent = master_record_node)
xmlValue(this_node) <- input_data$master_fields[i]
}
for(i in 1:length(input_data$victim_ids)){
this_node <- newXMLNode("recordToMergeIds", parent = merge_request_node)
xmlValue(this_node) <- input_data$victim_ids[i]
}
} else if(which_operation == "describeSObjects"){
for(i in 1:nrow(input_data)){
this_node <- newXMLNode("urn:sObjectType", input_data[i,"sObjectType"],
parent = operation_node)
}
} else if(which_operation == "setPassword"){
this_node <- newXMLNode("userId", input_data$userId,
parent = operation_node)
this_node <- newXMLNode("password", input_data$password,
parent = operation_node)
} else if(which_operation == "resetPassword"){
this_node <- newXMLNode("userId", input_data$userId,
parent = operation_node)
} else {
for(i in 1:nrow(input_data)){
list <- as.list(input_data[i,,drop=FALSE])
if(which_operation == "convertLead"){
this_row_node <- newXMLNode("urn:LeadConvert", parent = operation_node)
} else {
this_row_node <- newXMLNode("urn:sObjects", parent = operation_node)
# if the body elements are objects we must list the type of object_name
# under each block of XML for the row
type_node <- newXMLNode("urn1:type", parent = this_row_node)
xmlValue(type_node) <- object_name
}
if(length(list) > 0){
for (i in 1:length(list)){
if (typeof(list[[i]]) == "list") {
this_node <- newXMLNode(names(list)[i], parent = this_row_node)
build_soap_xml_from_list(list[[i]],
operation = operation,
object_name = object_name,
external_id_fieldname = external_id_fieldname,
root = this_node)
} else {
if (!is.null(list[[i]])){
if(is.na(list[[i]])){
this_node <- newXMLNode("fieldsToNull", parent=this_row_node)
xmlValue(this_node) <- names(list)[i]
} else {
this_node <- newXMLNode(names(list)[i], parent=this_row_node)
xmlValue(this_node) <- list[[i]]
}
}
}
}
}
}
}
return(root)
}
#' Metadata List to XML Converter
#'
#' This function converts a list of metadata to XML
#'
#' @concept metadata salesforce api
#' @importFrom XML newXMLNode xmlValue<-
#' @param input_data XML document serving as the basis upon which to add the list
#' @param metatype a character indicating the element name of each record in the list
#' @param root_name \code{character}; the name of the root node if created
#' @param ns named vector; a collection of character strings indicating the namespace
#' definitions of the root node if created
#' @param root \code{xmlNode}; an XML node to be used as the root
#' @return \code{xmlNode}; an XML node with the input data added as needed for the
#' Metadata API and its objects.
#' @note This function is meant to be used internally. Only use when debugging.
#' @keywords internal
#' @export
build_metadata_xml_from_list <- function(input_data,
metatype = NULL,
root_name = NULL,
ns = c(character(0)),
root = NULL){
# ensure that if root is NULL that root_name is not also NULL
# this is so we have something to create the root node
stopifnot(!is.null(root_name) | !is.null(root))
if (is.null(root))
root <- newXMLNode(root_name, namespaceDefinitions = ns)
for(i in 1:length(input_data)){
if (!is.null(metatype)){
this <- newXMLNode("Metadata", attrs = c(`xsi:type`=paste0("ns2:", metatype)), parent=root,
namespaceDefinitions = c("ns2"="http://soap.sforce.com/2006/04/metadata"),
suppressNamespaceWarning = TRUE)
} else {
this <- newXMLNode(names(input_data)[i], parent=root,
suppressNamespaceWarning = TRUE)
}
if (typeof(input_data[[i]]) == "list"){
build_metadata_xml_from_list(input_data=input_data[[i]], root=this, metatype=NULL)
}
else {
xmlValue(this) <- input_data[[i]]
}
}
return(root)
}
#' Bulk Binary Attachments Manifest List to XML Converter
#'
#' This function converts a list of data for binary attachments to XML
#'
#' @importFrom XML newXMLNode xmlValue<-
#' @param input_data \code{list}; data to be appended
#' @param root \code{xmlNode}; an XML node to be used as the root
#' @return \code{xmlNode}; an XML node constructed into a manifest data required
#' by the Bulk APIs for handling binary attachment data.
#' @references \url{https://developer.salesforce.com/docs/atlas.en-us.api_asynch.meta/api_asynch/binary_create_request_file.htm}
#' @note This function is meant to be used internally. Only use when debugging.
#' @keywords internal
#' @export
build_manifest_xml_from_list <- function(input_data, root = NULL){
stopifnot(is.list(input_data))
if(is.null(root))
root <- newXMLNode("sObjects",
namespaceDefinitions = c("http://www.force.com/2009/06/asyncapi/dataload",
"xsi"="http://www.w3.org/2001/XMLSchema-instance"))
for(i in 1:length(input_data)){
this <- newXMLNode(names(input_data)[i], parent = root, suppressNamespaceWarning = TRUE)
if (typeof(input_data[[i]]) == "list"){
build_manifest_xml_from_list(input_data=input_data[[i]], root=this)
}
else {
xmlValue(this) <- input_data[[i]]
}
}
return(root)
}
|
e81c799b36ba8d9002de82c2617d5c1523934bd4
|
ed20192f1e34c6f1fcead837c69373e79cb65d7f
|
/R/sem.R
|
c05da66da84a1dd6e131b11f3b42ba341ca2f09f
|
[] |
no_license
|
TimKDJ/jaspSem
|
a836069b23c50f7ce9f960cd04b0043be141d9d5
|
4f18040959c7cbe369dbf74536631d60481c16ce
|
refs/heads/master
| 2023-05-02T05:36:48.366958
| 2021-05-19T12:07:06
| 2021-05-19T12:10:31
| 287,003,140
| 0
| 0
| null | 2020-08-12T12:04:04
| 2020-08-12T12:04:03
| null |
UTF-8
|
R
| false
| false
| 66,029
|
r
|
sem.R
|
#
# Copyright (C) 2013-2020 University of Amsterdam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
SEM <- function(jaspResults, dataset, options, ...) {
jaspResults$addCitation("Rosseel, Y. (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. URL http://www.jstatsoft.org/v48/i02/")
# Read dataset
options <- .semPrepOpts(options)
# TODO: don't read data if we aren't ready anyway...
dataset <- .semReadData(dataset, options)
ready <- .semIsReady(dataset, options)
modelContainer <- .semModelContainer(jaspResults)
# check for errors
.semCheckErrors(dataset, options, ready, modelContainer)
# Output functions
.semFitTab(jaspResults, modelContainer, dataset, options, ready)
.semParameters(modelContainer, dataset, options, ready)
.semAdditionalFits(modelContainer, dataset, options, ready)
.semRsquared(modelContainer, dataset, options, ready)
.semMardiasCoefficient(modelContainer, dataset, options, ready)
.semCov(modelContainer, dataset, options, ready)
.semMI(modelContainer, datset, options, ready)
.semPathPlot(modelContainer, dataset, options, ready)
}
# helper functions
.semPrepOpts <- function(options) {
# backwards compatability after changes to bouncontrollavaantextarea.cpp
fixModel <- function(model) {
newModel <- c(model[1], model[[2]])
names(newModel)[names(newModel) == "model"] <- "syntax"
return(newModel)
}
options[["models"]] <- lapply(options[["models"]], fixModel)
emptymod <- vapply(options[["models"]], function(x) x[["syntax"]] == "", TRUE)
options[["models"]] <- options[["models"]][!emptymod]
return(options)
}
.semReadData <- function(dataset, options) {
if (!is.null(dataset)) return(dataset)
variablesToRead <- if (options[["groupingVariable"]] == "") character() else options[["groupingVariable"]]
for (model in options[["models"]])
variablesToRead <- unique(c(variablesToRead, model[["columns"]]))
return(.readDataSetToEnd(columns = variablesToRead))
}
.semIsReady <- function(dataset, options) {
if (length(options[["models"]]) < 1) return(FALSE)
for (m in options[["models"]])
if (length(m[["columns"]]) > 0)
return(TRUE)
return(FALSE)
}
.semCheckErrors <- function(dataset, options, ready, modelContainer) {
if (!ready) return()
if (options$Data == "varcov") {
# Check if dataset is variance covariance matrix:
.hasErrors(dataset, type = c("varCovMatrix", "infinity"),
message='default', exitAnalysisIfErrors = TRUE)
} else if (ncol(dataset) > 0) {
if (length(options[["models"]]) < 1) return(FALSE)
usedvars <- unique(unlist(lapply(options[["models"]], function(x) {
.semGetUsedVars(x[["syntax"]], colnames(dataset))
})))
.hasErrors(dataset[,usedvars],
type = c("infinity"), message='default', exitAnalysisIfErrors = TRUE)
}
# check FIML
if (!options[["estimator"]] %in% c("default", "ML") && options[["missing"]] == "ml") {
modelContainer$setError(gettext("FIML missing data handling only available with ML-type estimators"))
}
# Check whether grouping variable is a grouping variable
if (options[["groupingVariable"]] != "") {
groupfac <- factor(dataset[[.v(options[["groupingVariable"]])]])
factab <- table(groupfac)
if (any(factab < 3)) {
violations <- names(table(groupfac))[table(groupfac) < 3]
.quitAnalysis(gettextf("Grouping variable has fewer than 3 observations in group %s",
paste(violations, collapse = ", ")))
}
}
# Check mean structure:
if (options[["Data"]] == "varcov") {
if (options[["meanstructure"]]) {
modelContainer$setError(gettext("Mean structure can not be included when data is variance-covariance matrix"))
return()
}
options$meanstructure <- FALSE
if (options[["SampleSize"]] == 0) {
modelContainer$setError(gettext("Please set the sample size!"))
return()
}
# Check for multiple groups:
if (options[["groupingVariable"]] != "") {
modelContainer$setError(gettext("Multiple group analysis not supported when data is variance-covariance matrix"))
return()
}
} else {
if (ncol(dataset) > 0 && !nrow(dataset) > ncol(dataset)) {
modelContainer$setError(gettext("Not more cases than number of variables. Is your data a variance-covariance matrix?"))
return()
}
}
}
checkLavaanModel <- function(model, availableVars) {
# function returns informative printable string if there is an error, else ""
if (model == "") return("Enter a model")
# translate to base64 - function from semsimple.R
vvars <- availableVars
usedvars <- vvars #.semGetUsedVars(model, vvars)
vmodel <- model # .semTranslateModel(model, usedvars)
unvvars <- availableVars
names(unvvars) <- vvars
# Check model syntax
parsed <- try(lavaan::lavParseModelString(vmodel, TRUE), silent = TRUE)
if (inherits(parsed, "try-error")) {
msg <- attr(parsed, "condition")$message
if (msg == "NA/NaN argument") {
return("Enter a model")
}
return(stringr::str_replace_all(msg, unvvars))
}
# Check variable names
if (!missing(availableVars)) {
latents <- unique(parsed[parsed$op == "=~",]$lhs)
modelVars <- setdiff(unique(c(parsed$lhs, parsed$rhs)), latents)
modelVars <- modelVars[modelVars != ""] # e.g., x1 ~ 1 yields an empty rhs entry
modelVarsInAvailableVars <- (modelVars %in% vvars)
if (!all(modelVarsInAvailableVars)) {
notRecognized <- modelVars[!modelVarsInAvailableVars]
return(paste("Variable(s) in model syntax not recognized:",
paste(stringr::str_replace_all(notRecognized, unvvars),
collapse = ", ")))
}
}
# if checks pass, return empty string
return("")
}
.semGetUsedVars <- function(syntax, availablevars) {
vv <- .unv(availablevars)
findpattern <- paste0("(?<=[\\s\\+\\^\\=\\~\\<\\*\\>\\:\\%\\|\\+]|^)\\Q",
vv,
"\\E(?=[\\s\\+\\^\\=\\~\\<\\*\\>\\:\\%\\|\\+]|$)")
return(vv[vapply(findpattern,
function(p) stringr::str_detect(syntax, p),
FUN.VALUE = TRUE,
USE.NAMES = FALSE)])
}
.semModelContainer <- function(jaspResults) {
if (!is.null(jaspResults[["modelContainer"]])) {
modelContainer <- jaspResults[["modelContainer"]]
} else {
modelContainer <- createJaspContainer()
modelContainer$dependOn(c("sampling.weights", "meanstructure", "int.ov.free", "int.lv.free", "fixed.x", "orthogonal",
"factorStandardisation", "auto.fix.single", "auto.var", "auto.cov.lv.x",
"auto.cov.y", "auto.th", "auto.delta", "auto.efa", "std.ov", "missing", "estimator", "test",
"se", "information", "emulation", "groupingVariable", "eq_loadings", "eq_intercepts",
"eq_residuals", "eq_residualcovariances", "eq_means", "eq_thresholds", "eq_regressions",
"eq_variances", "eq_lvcovariances", "Data", "SampleSize", "group.partial"))
jaspResults[["modelContainer"]] <- modelContainer
}
return(modelContainer)
}
.semComputeResults <- function(modelContainer, dataset, options) {
#' create result list from options
# find reusable results
oldmodels <- modelContainer[["models"]][["object"]]
oldresults <- modelContainer[["results"]][["object"]]
reuse <- match(options[["models"]], oldmodels)
if (identical(reuse, seq_along(reuse))) return(oldresults) # reuse everything
# create results list
results <- vector("list", length(options[["models"]]))
if (any(!is.na(reuse))) {
# where possible, prefill results with old results
results[seq_along(reuse)] <- oldresults[reuse]
}
# generate lavaan options list
lavopts <- .semOptionsToLavOptions(options, dataset)
for (i in seq_along(results)) {
if (!is.null(results[[i]])) next # existing model is reused
# create options
lav_args <- lavopts
syntax <- .semTranslateModel(options[["models"]][[i]][["syntax"]], dataset)
lav_args[["model"]] <- syntax
if (options[["Data"]] == "raw") {
lav_args[["data"]] <- dataset
} else {
lav_args[["sample.cov"]] <- .semDataCovariance(dataset, options[["models"]][[i]][["syntax"]])
lav_args[["sample.nobs"]] <- options[["SampleSize"]]
}
# fit the model
fit <- try(do.call(lavaan::lavaan, lav_args))
if (inherits(fit, "try-error")) {
errmsg <- gettextf("Estimation failed\nMessage:\n%s", attr(fit, "condition")$message)
modelContainer$setError(paste0("Error in model \"", options[["models"]][[i]][["modelName"]], "\" - ",
.decodeVarsInMessage(names(dataset), errmsg)))
modelContainer$dependOn("models") # add dependency so everything gets updated upon model change
break
}
if (options[["se"]] == "bootstrap") {
fit <- lavBootstrap(fit, options[["errorCalculationBootstrapSamples"]])
}
results[[i]] <- fit
}
# store in model container
if (!modelContainer$getError()) {
modelContainer[["results"]] <- createJaspState(results)
modelContainer[["results"]]$dependOn(optionsFromObject = modelContainer)
modelContainer[["models"]] <- createJaspState(options[["models"]])
modelContainer[["models"]]$dependOn(optionsFromObject = modelContainer)
}
return(results)
}
.semDataCovariance <- function(dataset, syntax) {
usedvars <- .semGetUsedVars(syntax, colnames(dataset))
var_idx <- match(usedvars, .unv(colnames(dataset)))
mat <- try(as.matrix(dataset[var_idx, var_idx]))
if (inherits(mat, "try-error") || any(is.na(mat)))
.quitAnalysis("Input data does not seem to be a covariance matrix! Please check the format of the input data.
All cells must be numeric, and the number of rows must equal the number of columns.")
colnames(mat) <- rownames(mat) <- colnames(dataset)[var_idx]
print(mat)
return(mat)
}
.semOptionsToLavOptions <- function(options, dataset) {
#' mapping the QML options from JASP to lavaan options
#' see ?lavOptions for documentation
lavopts <- lavaan::lavOptions()
lavopts[["mimic"]] <- options[["emulation"]]
# model features
lavopts[["meanstructure"]] <- options[["meanstructure"]]
lavopts[["int.ov.free"]] <- !options[["int.ov.fixed"]]
lavopts[["int.lv.free"]] <- !options[["int.lv.fixed"]]
lavopts[["fixed.x"]] <- options[["fixed.x"]]
lavopts[["orthogonal"]] <- options[["orthogonal"]]
lavopts[["std.lv"]] <- options[["factorStandardisation"]] == "std.lv"
lavopts[["effect.coding"]] <- options[["factorStandardisation"]] == "effect.coding"
lavopts[["auto.fix.first"]] <- options[["factorStandardisation"]] == "auto.fix.first"
lavopts[["auto.fix.single"]] <- options[["auto.fix.single"]]
lavopts[["auto.var"]] <- options[["auto.var"]]
lavopts[["auto.cov.lv.x"]] <- options[["auto.cov.lv.x"]]
lavopts[["auto.cov.y"]] <- options[["auto.cov.y"]]
lavopts[["auto.th"]] <- options[["auto.th"]]
lavopts[["auto.delta"]] <- options[["auto.delta"]]
lavopts[["auto.efa"]] <- options[["auto.efa"]]
# data options
lavopts[["std.ov"]] <- options[["std.ov"]]
lavopts[["missing"]] <- options[["missing"]]
# estimation options
lavopts[["estimator"]] <- options[["estimator"]]
lavopts[["se"]] <- ifelse(options[["se"]] == "bootstrap", "standard", options[["se"]])
lavopts[["information"]] <- options[["information"]]
lavopts[["test"]] <- options[["test"]]
# group.equal options
equality_constraints <- c(
options[["eq_loadings"]],
options[["eq_intercepts"]],
options[["eq_means"]],
options[["eq_thresholds"]],
options[["eq_regressions"]],
options[["eq_residuals"]],
options[["eq_residualcovariances"]],
options[["eq_variances"]],
options[["eq_lvcovariances"]]
)
if (any(equality_constraints)) {
lavopts[["group.equal"]] <- c("loadings", "intercepts", "means", "thresholds", "regressions", "residuals",
"residual.covariances", "lv.variances", "lv.covariances")[equality_constraints]
}
# group.partial options
# split params
splitted <- strsplit(options[["group.partial"]], "[\\n,;]+", perl = TRUE)[[1]]
lavopts[["group.partial"]] <- vapply(splitted, .semTranslateModel, dataset = dataset, "")
# group variable
if (options[["groupingVariable"]] != "") {
lavopts[["group"]] <- .v(options[["groupingVariable"]])
}
# sampling weights
if (options[["sampling.weights"]] != "") {
lavopts[["sampling.weights"]] <- .v(options[["sampling.weights"]])
}
return(lavopts)
}
.semTranslateModel <- function(syntax, dataset) {
#' translate model syntax to jasp column names syntax
usedvars <- .semGetUsedVars(syntax, colnames(dataset))
if (length(usedvars) == 0) {
return(syntax)
}
usedvars <- usedvars[order(nchar(usedvars), decreasing = TRUE)]
with.s.quotes <- paste("\\b'", usedvars, "'\\b", sep="")
with.d.quotes <- paste('\\b"', usedvars, '"\\b', sep="")
new.names <- .v(usedvars)
for (i in 1:length(usedvars)) {
syntax <- gsub(with.d.quotes[i], new.names[i], syntax)
}
for (i in 1:length(usedvars)) {
syntax <- gsub(with.s.quotes[i], new.names[i], syntax)
}
for (i in 1:length(usedvars)) {
syntax <- gsub(paste0("\\b", usedvars[i], "\\b"), new.names[i], syntax)
}
return(syntax)
}
# output functions
.semFitTab <- function(jaspResults, modelContainer, dataset, options, ready) {
if (!is.null(modelContainer[["fittab"]])) return()
fittab <- createJaspTable(title = gettext("Model fit"))
fittab$dependOn("models")
fittab$position <- 0
fittab$addColumnInfo(name = "Model", title = "", type = "string" )
fittab$addColumnInfo(name = "AIC", title = gettext("AIC"), type = "number" )
fittab$addColumnInfo(name = "BIC", title = gettext("BIC"), type = "number" )
fittab$addColumnInfo(name = "N", title = gettext("n"), type = "integer")
fittab$addColumnInfo(name = "Chisq", title = gettext("χ²"), type = "number" ,
overtitle = gettext("Baseline test"))
fittab$addColumnInfo(name = "Df", title = gettext("df"), type = "integer",
overtitle = gettext("Baseline test"))
fittab$addColumnInfo(name = "PrChisq", title = gettext("p"), type = "pvalue",
overtitle = gettext("Baseline test"))
fittab$addColumnInfo(name = "dchisq", title = gettext("Δχ²"), type = "number" ,
overtitle = gettext("Difference test"))
fittab$addColumnInfo(name = "ddf", title = gettext("Δdf"), type = "integer",
overtitle = gettext("Difference test"))
fittab$addColumnInfo(name = "dPrChisq", title = gettext("p"), type = "pvalue" ,
overtitle = gettext("Difference test"))
modelContainer[["fittab"]] <- fittab
if (!ready) return()
# add data to the table!
semResults <- .semComputeResults(modelContainer, dataset, options)
if (modelContainer$getError()) return()
if (length(semResults) == 1) {
lrt <- .withWarnings(lavaan::lavTestLRT(semResults[[1]])[-1, ])
rownames(lrt$value) <- options[["models"]][[1]][["modelName"]]
Ns <- lavaan::lavInspect(semResults[[1]], "ntotal")
} else {
Ns <- vapply(semResults, lavaan::lavInspect, 0, what = "ntotal")
lrt_args <- semResults
names(lrt_args) <- "object" # (the first result is object, the others ...)
lrt_args[["model.names"]] <- vapply(options[["models"]], getElement, name = "modelName", "")
lrt <- .withWarnings(do.call(lavaan::lavTestLRT, lrt_args))
lrt$value[1,5:7] <- NA
}
fittab[["Model"]] <- rownames(lrt$value)
fittab[["AIC"]] <- lrt$value[["AIC"]]
fittab[["BIC"]] <- lrt$value[["BIC"]]
fittab[["N"]] <- Ns
fittab[["Chisq"]] <- lrt$value[["Chisq"]]
fittab[["Df"]] <- lrt$value[["Df"]]
fittab[["PrChisq"]] <- pchisq(q = lrt$value[["Chisq"]], df = lrt$value[["Df"]], lower.tail = FALSE)
fittab[["dchisq"]] <- lrt$value[["Chisq diff"]]
fittab[["ddf"]] <- lrt$value[["Df diff"]]
fittab[["dPrChisq"]] <- lrt$value[["Pr(>Chisq)"]]
# add warning footnote
if (!is.null(lrt$warnings)) {
fittab$addFootnote(gsub("lavaan WARNING: ", "", lrt$warnings[[1]]$message))
}
# add test statistic correction footnote
test <- lavaan::lavInspect(semResults[[1]], "options")[["test"]]
if (test != "standard") {
LUT <- tibble::tribble(
~option, ~name,
"Satorra.Bentler", gettext("Satorra-Bentler scaled test-statistic"),
"Yuan.Bentler", gettext("Yuan-Bentler scaled test-statistic"),
"Yuan.Bentler.Mplus", gettext("Yuan-Bentler (Mplus) scaled test-statistic"),
"mean.var.adjusted", gettext("mean and variance adjusted test-statistic"),
"Satterthwaite", gettext("mean and variance adjusted test-statistic"),
"scaled.shifted", gettext("scaled and shifted test-statistic"),
"Bollen.Stine", gettext("bootstrap (Bollen-Stine) probability value"),
"bootstrap", gettext("bootstrap (Bollen-Stine) probability value"),
"boot", gettext("bootstrap (Bollen-Stine) probability value")
)
testname <- LUT[test == tolower(LUT$option), "name"][[1]]
ftext <- gettextf("Model tests based on %s.", testname)
fittab$addFootnote(message = ftext)
}
}
.semParameters <- function(modelContainer, dataset, options, ready) {
if (!is.null(modelContainer[["params"]])) return()
params <- createJaspContainer(gettext("Parameter estimates"))
params$position <- 1
params$dependOn(c("ciWidth", "bootCItype", "std", "models"))
modelContainer[["params"]] <- params
if (length(options[["models"]]) < 2) {
.semParameterTables(modelContainer[["results"]][["object"]][[1]], NULL, params, options, ready)
} else {
for (i in seq_along(options[["models"]])) {
fit <- modelContainer[["results"]][["object"]][[i]]
modelname <- options[["models"]][[i]][["modelName"]]
.semParameterTables(fit, modelname, params, options, ready)
}
}
}
.semParameterTables <- function(fit, modelname, parentContainer, options, ready) {
if (is.null(modelname)) {
pecont <- parentContainer
} else {
pecont <- createJaspContainer(modelname, initCollapsed = TRUE)
}
# Measurement model
indtab <- createJaspTable(title = gettext("Factor Loadings"))
if (options[["groupingVariable"]] != "")
indtab$addColumnInfo(name = "group", title = gettext("Group"), type = "string", combine = TRUE)
indtab$addColumnInfo(name = "lhs", title = gettext("Latent"), type = "string", combine = TRUE)
indtab$addColumnInfo(name = "rhs", title = gettext("Indicator"), type = "string")
indtab$addColumnInfo(name = "label", title = "", type = "string")
indtab$addColumnInfo(name = "est", title = gettext("Estimate"), type = "number", format = "sf:4;dp:3")
indtab$addColumnInfo(name = "se", title = gettext("Std. Error"), type = "number", format = "sf:4;dp:3")
indtab$addColumnInfo(name = "z", title = gettext("z-value"), type = "number", format = "sf:4;dp:3")
indtab$addColumnInfo(name = "pvalue", title = gettext("p"), type = "number", format = "dp:3;p:.001")
indtab$addColumnInfo(name = "ci.lower", title = gettext("Lower"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
indtab$addColumnInfo(name = "ci.upper", title = gettext("Upper"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
if (options[["std"]]) {
indtab$addColumnInfo(name = "std.all", title = gettext("All"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
indtab$addColumnInfo(name = "std.lv", title = gettext("LV"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
indtab$addColumnInfo(name = "std.nox", title = gettext("Endo"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
}
pecont[["ind"]] <- indtab
# Structural Model
regtab <- createJaspTable(title = gettext("Regression coefficients"))
if (options[["groupingVariable"]] != "")
regtab$addColumnInfo(name = "group", title = gettext("Group"), type = "string", combine = TRUE)
regtab$addColumnInfo(name = "rhs", title = gettext("Predictor"), type = "string", combine = TRUE)
regtab$addColumnInfo(name = "lhs", title = gettext("Outcome"), type = "string")
regtab$addColumnInfo(name = "label", title = "", type = "string")
regtab$addColumnInfo(name = "est", title = gettext("Estimate"), type = "number", format = "sf:4;dp:3")
regtab$addColumnInfo(name = "se", title = gettext("Std. Error"), type = "number", format = "sf:4;dp:3")
regtab$addColumnInfo(name = "z", title = gettext("z-value"), type = "number", format = "sf:4;dp:3")
regtab$addColumnInfo(name = "pvalue", title = gettext("p"), type = "number", format = "dp:3;p:.001")
regtab$addColumnInfo(name = "ci.lower", title = gettext("Lower"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
regtab$addColumnInfo(name = "ci.upper", title = gettext("Upper"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
if (options[["std"]]) {
regtab$addColumnInfo(name = "std.all", title = gettext("All"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
regtab$addColumnInfo(name = "std.lv", title = gettext("LV"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
regtab$addColumnInfo(name = "std.nox", title = gettext("Endo"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
}
pecont[["reg"]] <- regtab
# Latent variances
lvartab <- createJaspTable(title = gettext("Factor variances"))
if (options[["groupingVariable"]] != "")
lvartab$addColumnInfo(name = "group", title = gettext("Group"), type = "string", combine = TRUE)
lvartab$addColumnInfo(name = "lhs", title = gettext("Variable"), type = "string")
lvartab$addColumnInfo(name = "label", title = "", type = "string")
lvartab$addColumnInfo(name = "est", title = gettext("Estimate"), type = "number", format = "sf:4;dp:3")
lvartab$addColumnInfo(name = "se", title = gettext("Std. Error"), type = "number", format = "sf:4;dp:3")
lvartab$addColumnInfo(name = "z", title = gettext("z-value"), type = "number", format = "sf:4;dp:3")
lvartab$addColumnInfo(name = "pvalue", title = gettext("p"), type = "number", format = "dp:3;p:.001")
lvartab$addColumnInfo(name = "ci.lower", title = gettext("Lower"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
lvartab$addColumnInfo(name = "ci.upper", title = gettext("Upper"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
if (options[["std"]]) {
lvartab$addColumnInfo(name = "std.all", title = gettext("All"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
lvartab$addColumnInfo(name = "std.lv", title = gettext("LV"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
lvartab$addColumnInfo(name = "std.nox", title = gettext("Endo"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
}
pecont[["lvar"]] <- lvartab
# Latent covariances
lcovtab <- createJaspTable(title = gettext("Factor covariances"))
if (options[["groupingVariable"]] != "")
lcovtab$addColumnInfo(name = "group", title = gettext("Group"), type = "string", combine = TRUE)
lcovtab$addColumnInfo(name = "lhs", title = gettext("Variables"), type = "string")
lcovtab$addColumnInfo(name = "label", title = "", type = "string")
lcovtab$addColumnInfo(name = "est", title = gettext("Estimate"), type = "number", format = "sf:4;dp:3")
lcovtab$addColumnInfo(name = "se", title = gettext("Std. Error"), type = "number", format = "sf:4;dp:3")
lcovtab$addColumnInfo(name = "z", title = gettext("z-value"), type = "number", format = "sf:4;dp:3")
lcovtab$addColumnInfo(name = "pvalue", title = gettext("p"), type = "number", format = "dp:3;p:.001")
lcovtab$addColumnInfo(name = "ci.lower", title = gettext("Lower"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
lcovtab$addColumnInfo(name = "ci.upper", title = gettext("Upper"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
if (options[["std"]]) {
lcovtab$addColumnInfo(name = "std.all", title = gettext("All"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
lcovtab$addColumnInfo(name = "std.lv", title = gettext("LV"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
lcovtab$addColumnInfo(name = "std.nox", title = gettext("Endo"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
}
pecont[["lcov"]] <- lcovtab
# Residual variances
vartab <- createJaspTable(title = gettext("Residual variances"))
if (options[["groupingVariable"]] != "")
vartab$addColumnInfo(name = "group", title = gettext("Group"), type = "string", combine = TRUE)
vartab$addColumnInfo(name = "lhs", title = gettext("Variable"), type = "string")
vartab$addColumnInfo(name = "label", title = "", type = "string")
vartab$addColumnInfo(name = "est", title = gettext("Estimate"), type = "number", format = "sf:4;dp:3")
vartab$addColumnInfo(name = "se", title = gettext("Std. Error"), type = "number", format = "sf:4;dp:3")
vartab$addColumnInfo(name = "z", title = gettext("z-value"), type = "number", format = "sf:4;dp:3")
vartab$addColumnInfo(name = "pvalue", title = gettext("p"), type = "number", format = "dp:3;p:.001")
vartab$addColumnInfo(name = "ci.lower", title = gettext("Lower"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
vartab$addColumnInfo(name = "ci.upper", title = gettext("Upper"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
if (options[["std"]]) {
vartab$addColumnInfo(name = "std.all", title = gettext("All"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
vartab$addColumnInfo(name = "std.lv", title = gettext("LV"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
vartab$addColumnInfo(name = "std.nox", title = gettext("Endo"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
}
pecont[["var"]] <- vartab
# Residual covariances
covtab <- createJaspTable(title = gettext("Residual covariances"))
if (options[["groupingVariable"]] != "")
covtab$addColumnInfo(name = "group", title = gettext("Group"), type = "string", combine = TRUE)
covtab$addColumnInfo(name = "lhs", title = gettext("Variables"), type = "string")
covtab$addColumnInfo(name = "label", title = "", type = "string")
covtab$addColumnInfo(name = "est", title = gettext("Estimate"), type = "number", format = "sf:4;dp:3")
covtab$addColumnInfo(name = "se", title = gettext("Std. Error"), type = "number", format = "sf:4;dp:3")
covtab$addColumnInfo(name = "z", title = gettext("z-value"), type = "number", format = "sf:4;dp:3")
covtab$addColumnInfo(name = "pvalue", title = gettext("p"), type = "number", format = "dp:3;p:.001")
covtab$addColumnInfo(name = "ci.lower", title = gettext("Lower"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
covtab$addColumnInfo(name = "ci.upper", title = gettext("Upper"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
if (options[["std"]]) {
covtab$addColumnInfo(name = "std.all", title = gettext("All"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
covtab$addColumnInfo(name = "std.lv", title = gettext("LV"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
covtab$addColumnInfo(name = "std.nox", title = gettext("Endo"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
}
pecont[["cov"]] <- covtab
# Means
if (options[["meanstructure"]]) {
mutab <- createJaspTable(title = gettext("Means"))
if (options[["groupingVariable"]] != "")
mutab$addColumnInfo(name = "group", title = gettext("Group"), type = "string", combine = TRUE)
mutab$addColumnInfo(name = "lhs", title = gettext("Variable"), type = "string")
mutab$addColumnInfo(name = "label", title = "", type = "string")
mutab$addColumnInfo(name = "est", title = gettext("Estimate"), type = "number", format = "sf:4;dp:3")
mutab$addColumnInfo(name = "se", title = gettext("Std. Error"), type = "number", format = "sf:4;dp:3")
mutab$addColumnInfo(name = "z", title = gettext("z-value"), type = "number", format = "sf:4;dp:3")
mutab$addColumnInfo(name = "pvalue", title = gettext("p"), type = "number", format = "dp:3;p:.001")
mutab$addColumnInfo(name = "ci.lower", title = gettext("Lower"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
mutab$addColumnInfo(name = "ci.upper", title = gettext("Upper"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
if (options[["std"]]) {
mutab$addColumnInfo(name = "std.all", title = gettext("All"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
mutab$addColumnInfo(name = "std.lv", title = gettext("LV"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
mutab$addColumnInfo(name = "std.nox", title = gettext("Endo"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
}
pecont[["mu"]] <- mutab
}
deftab <- createJaspTable(title = gettext("Defined parameters"))
deftab$addColumnInfo(name = "lhs", title = gettext("Name"), type = "string")
deftab$addColumnInfo(name = "est", title = gettext("Estimate"), type = "number", format = "sf:4;dp:3")
deftab$addColumnInfo(name = "se", title = gettext("Std. Error"), type = "number", format = "sf:4;dp:3")
deftab$addColumnInfo(name = "z", title = gettext("z-value"), type = "number", format = "sf:4;dp:3")
deftab$addColumnInfo(name = "pvalue", title = gettext("p"), type = "number", format = "dp:3;p:.001")
deftab$addColumnInfo(name = "ci.lower", title = gettext("Lower"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
deftab$addColumnInfo(name = "ci.upper", title = gettext("Upper"), type = "number", format = "sf:4;dp:3",
overtitle = gettextf("%s%% Confidence Interval", options$ciWidth * 100))
if (options[["std"]]) {
deftab$addColumnInfo(name = "std.all", title = gettext("All"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
deftab$addColumnInfo(name = "std.lv", title = gettext("LV"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
deftab$addColumnInfo(name = "std.nox", title = gettext("Endo"), type = "number", format = "sf:4;dp:3",
overtitle = gettext("Standardized"))
}
pecont[["def"]] <- deftab
if (!is.null(modelname)) parentContainer[[modelname]] <- pecont
if (!ready || !inherits(fit, "lavaan")) return()
# fill tables with values
lvnames <- lavaan::lavNames(fit, "lv")
ovnames <- lavaan::lavNames(fit, "ov")
pe <- lavaan::parameterestimates(fit, standardized = TRUE, level = options[["ciWidth"]],
boot.ci.type = options[["bootCItype"]])
pe <- lavaan::lavMatrixRepresentation(lavaan::lav_partable_complete(pe))
if (options[["groupingVariable"]] != "") {
pe[pe[["op"]] != ":=", "groupname"] <- lavaan::lavInspect(fit, "group.label")[pe[["group"]]]
} else {
pe[["group"]] <- 0
}
# Measurement model
pe_ind <- pe[pe$op == "=~",]
pe_ind <- pe_ind[order(pe_ind[["group"]], pe_ind[["lhs"]]),]
if (nrow(pe_ind) == 0) pecont[["ind"]] <- NULL # remove if no estimates
if (options[["groupingVariable"]] != "")
indtab[["group"]] <- pe_ind[["groupname"]]
indtab[["rhs"]] <- .unv(pe_ind[["rhs"]])
indtab[["lhs"]] <- .unv(pe_ind[["lhs"]])
indtab[["label"]] <- pe_ind[["label"]]
indtab[["est"]] <- pe_ind[["est"]]
indtab[["se"]] <- pe_ind[["se"]]
indtab[["z"]] <- pe_ind[["z"]]
indtab[["pvalue"]] <- pe_ind[["pvalue"]]
indtab[["ci.lower"]] <- pe_ind[["ci.lower"]]
indtab[["ci.upper"]] <- pe_ind[["ci.upper"]]
if (options[["std"]]) {
indtab[["std.all"]] <- pe_ind[["std.all"]]
indtab[["std.lv"]] <- pe_ind[["std.lv"]]
indtab[["std.nox"]] <- pe_ind[["std.nox"]]
}
# Structural model
pe_reg <- pe[pe$op == "~",]
pe_reg <- pe_reg[order(pe_reg[["group"]], pe_reg[["lhs"]]),]
if (nrow(pe_reg) == 0) pecont[["reg"]] <- NULL # remove if no estimates
if (options[["groupingVariable"]] != "")
regtab[["group"]] <- pe_reg[["groupname"]]
regtab[["rhs"]] <- .unv(pe_reg[["rhs"]])
regtab[["lhs"]] <- .unv(pe_reg[["lhs"]])
regtab[["label"]] <- pe_reg[["label"]]
regtab[["est"]] <- pe_reg[["est"]]
regtab[["se"]] <- pe_reg[["se"]]
regtab[["z"]] <- pe_reg[["z"]]
regtab[["pvalue"]] <- pe_reg[["pvalue"]]
regtab[["ci.lower"]] <- pe_reg[["ci.lower"]]
regtab[["ci.upper"]] <- pe_reg[["ci.upper"]]
if (options[["std"]]) {
regtab[["std.all"]] <- pe_reg[["std.all"]]
regtab[["std.lv"]] <- pe_reg[["std.lv"]]
regtab[["std.nox"]] <- pe_reg[["std.nox"]]
}
# Latent variances
pe_lvar <- pe[pe$op == "~~" & pe$lhs %in% lvnames & pe$lhs == pe$rhs,]
if (nrow(pe_lvar) == 0) pecont[["lvar"]] <- NULL # remove if no estimates
if (options[["groupingVariable"]] != "")
lvartab[["group"]] <- pe_lvar[["groupname"]]
lvartab[["rhs"]] <- .unv(pe_lvar[["rhs"]])
lvartab[["lhs"]] <- .unv(pe_lvar[["lhs"]])
lvartab[["label"]] <- pe_lvar[["label"]]
lvartab[["est"]] <- pe_lvar[["est"]]
lvartab[["se"]] <- pe_lvar[["se"]]
lvartab[["z"]] <- pe_lvar[["z"]]
lvartab[["pvalue"]] <- pe_lvar[["pvalue"]]
lvartab[["ci.lower"]] <- pe_lvar[["ci.lower"]]
lvartab[["ci.upper"]] <- pe_lvar[["ci.upper"]]
if (options[["std"]]) {
lvartab[["std.all"]] <- pe_lvar[["std.all"]]
lvartab[["std.lv"]] <- pe_lvar[["std.lv"]]
lvartab[["std.nox"]] <- pe_lvar[["std.nox"]]
}
# Latent covariances
pe_lcov <- pe[pe$op == "~~" & pe$lhs %in% lvnames & pe$rhs %in% lvnames & pe$lhs != pe$rhs,]
if (nrow(pe_lcov) == 0) pecont[["lcov"]] <- NULL # remove if no estimates
if (options[["groupingVariable"]] != "")
lcovtab[["group"]] <- pe_lcov[["groupname"]]
lcovtab[["lhs"]] <- paste(.unv(pe_lcov[["lhs"]]), "-", .unv(pe_lcov[["rhs"]]))
lcovtab[["label"]] <- pe_lcov[["label"]]
lcovtab[["est"]] <- pe_lcov[["est"]]
lcovtab[["se"]] <- pe_lcov[["se"]]
lcovtab[["z"]] <- pe_lcov[["z"]]
lcovtab[["pvalue"]] <- pe_lcov[["pvalue"]]
lcovtab[["ci.lower"]] <- pe_lcov[["ci.lower"]]
lcovtab[["ci.upper"]] <- pe_lcov[["ci.upper"]]
if (options[["std"]]) {
lcovtab[["std.all"]] <- pe_lcov[["std.all"]]
lcovtab[["std.lv"]] <- pe_lcov[["std.lv"]]
lcovtab[["std.nox"]] <- pe_lcov[["std.nox"]]
}
# Residual variances
pe_var <- pe[pe$op == "~~" & pe$lhs %in% ovnames & pe$lhs == pe$rhs,]
if (nrow(pe_var) == 0) pecont[["var"]] <- NULL # remove if no estimates
if (options[["groupingVariable"]] != "")
vartab[["group"]] <- pe_var[["groupname"]]
vartab[["rhs"]] <- .unv(pe_var[["rhs"]])
vartab[["lhs"]] <- .unv(pe_var[["lhs"]])
vartab[["label"]] <- pe_var[["label"]]
vartab[["est"]] <- pe_var[["est"]]
vartab[["se"]] <- pe_var[["se"]]
vartab[["z"]] <- pe_var[["z"]]
vartab[["pvalue"]] <- pe_var[["pvalue"]]
vartab[["ci.lower"]] <- pe_var[["ci.lower"]]
vartab[["ci.upper"]] <- pe_var[["ci.upper"]]
if (options[["std"]]) {
vartab[["std.all"]] <- pe_var[["std.all"]]
vartab[["std.lv"]] <- pe_var[["std.lv"]]
vartab[["std.nox"]] <- pe_var[["std.nox"]]
}
# Residual covariances
pe_cov <- pe[pe$op == "~~" & pe$lhs %in% ovnames & pe$rhs %in% ovnames & pe$lhs != pe$rhs,]
if (nrow(pe_cov) == 0) pecont[["cov"]] <- NULL # remove if no estimates
if (options[["groupingVariable"]] != "")
covtab[["group"]] <- pe_cov[["groupname"]]
covtab[["lhs"]] <- paste(.unv(pe_cov[["lhs"]]), "-", .unv(pe_cov[["rhs"]]))
covtab[["label"]] <- pe_cov[["label"]]
covtab[["est"]] <- pe_cov[["est"]]
covtab[["se"]] <- pe_cov[["se"]]
covtab[["z"]] <- pe_cov[["z"]]
covtab[["pvalue"]] <- pe_cov[["pvalue"]]
covtab[["ci.lower"]] <- pe_cov[["ci.lower"]]
covtab[["ci.upper"]] <- pe_cov[["ci.upper"]]
if (options[["std"]]) {
covtab[["std.all"]] <- pe_cov[["std.all"]]
covtab[["std.lv"]] <- pe_cov[["std.lv"]]
covtab[["std.nox"]] <- pe_cov[["std.nox"]]
}
# Means
if (options[["meanstructure"]]) {
pe_mu <- pe[pe$op == "~1",]
if (options[["groupingVariable"]] != "")
mutab[["group"]] <- pe_mu[["groupname"]]
mutab[["lhs"]] <- .unv(pe_mu[["lhs"]])
mutab[["label"]] <- pe_mu[["label"]]
mutab[["est"]] <- pe_mu[["est"]]
mutab[["se"]] <- pe_mu[["se"]]
mutab[["z"]] <- pe_mu[["z"]]
mutab[["pvalue"]] <- pe_mu[["pvalue"]]
mutab[["ci.lower"]] <- pe_mu[["ci.lower"]]
mutab[["ci.upper"]] <- pe_mu[["ci.upper"]]
if (options[["std"]]) {
mutab[["std.all"]] <- pe_mu[["std.all"]]
mutab[["std.lv"]] <- pe_mu[["std.lv"]]
mutab[["std.nox"]] <- pe_mu[["std.nox"]]
}
}
# defined parameters
pe_def <- pe[pe$op == ":=",]
if (nrow(pe_def) == 0) pecont[["def"]] <- NULL # remove if no estimates
deftab[["lhs"]] <- pe_def[["lhs"]]
deftab[["est"]] <- pe_def[["est"]]
deftab[["se"]] <- pe_def[["se"]]
deftab[["z"]] <- pe_def[["z"]]
deftab[["pvalue"]] <- pe_def[["pvalue"]]
deftab[["ci.lower"]] <- pe_def[["ci.lower"]]
deftab[["ci.upper"]] <- pe_def[["ci.upper"]]
if (options[["std"]]) {
deftab[["std.all"]] <- pe_def[["std.all"]]
deftab[["std.lv"]] <- pe_def[["std.lv"]]
deftab[["std.nox"]] <- pe_def[["std.nox"]]
}
}
.semAdditionalFits <- function(modelContainer, dataset, options, ready) {
if (!options[["outputAdditionalFitMeasures"]] || !is.null(modelContainer[["addfit"]])) return()
fitms <- createJaspContainer(gettext("Additional fit measures"))
fitms$dependOn(c("outputAdditionalFitMeasures", "models"))
fitms$position <- 0.5
# Fit indices
fitms[["indices"]] <- fitin <- createJaspTable(gettext("Fit indices"))
fitin$addColumnInfo(name = "index", title = gettext("Index"), type = "string")
if (length(options[["models"]]) < 2) {
fitin$addColumnInfo(name = "value", title = gettext("Value"), type = "number", format = "sf:4;dp:3")
} else {
for (i in seq_along(options[["models"]])) {
fitin$addColumnInfo(name = paste0("value_", i), title = options[["models"]][[i]][["modelName"]], type = "number",
format = "sf:4;dp:3")
}
}
fitin$setExpectedSize(rows = 1, cols = 2)
# information criteria
fitms[["incrits"]] <- fitic <- createJaspTable(gettext("Information criteria"))
fitic$addColumnInfo(name = "index", title = "", type = "string")
if (length(options[["models"]]) < 2) {
fitic$addColumnInfo(name = "value", title = gettext("Value"), type = "number", format = "sf:4;dp:3")
} else {
for (i in seq_along(options[["models"]])) {
fitic$addColumnInfo(name = paste0("value_", i), title = options[["models"]][[i]][["modelName"]], type = "number",
format = "sf:4;dp:3")
}
}
fitic$setExpectedSize(rows = 1, cols = 2)
# other fit measures
fitms[["others"]] <- fitot <- createJaspTable(gettext("Other fit measures"))
fitot$addColumnInfo(name = "index", title = gettext("Metric"), type = "string")
if (length(options[["models"]]) < 2) {
fitot$addColumnInfo(name = "value", title = gettext("Value"), type = "number", format = "sf:4;dp:3")
} else {
for (i in seq_along(options[["models"]])) {
fitot$addColumnInfo(name = paste0("value_", i), title = options[["models"]][[i]][["modelName"]], type = "number",
format = "sf:4;dp:3")
}
}
fitot$setExpectedSize(rows = 1, cols = 2)
modelContainer[["addfit"]] <- fitms
if (!ready || modelContainer$getError()) return()
# actually compute the fit measures
fmli <- lapply(modelContainer[["results"]][["object"]], lavaan::fitmeasures)
# Fit indices
fitin[["index"]] <- c(
gettext("Comparative Fit Index (CFI)"),
gettext("Tucker-Lewis Index (TLI)"),
gettext("Bentler-Bonett Non-normed Fit Index (NNFI)"),
gettext("Bentler-Bonett Normed Fit Index (NFI)"),
gettext("Parsimony Normed Fit Index (PNFI)"),
gettext("Bollen's Relative Fit Index (RFI)"),
gettext("Bollen's Incremental Fit Index (IFI)"),
gettext("Relative Noncentrality Index (RNI)")
)
if (length(options[["models"]]) == 1) {
fitin[["value"]] <- fmli[[1]][c("cfi", "tli", "nnfi", "nfi", "pnfi", "rfi", "ifi", "rni")]
} else {
for (i in seq_along(options[["models"]])) {
fitin[[paste0("value_", i)]] <- fmli[[i]][c("cfi", "tli", "nnfi", "nfi", "pnfi", "rfi", "ifi", "rni")]
}
}
# information criteria
fitic[["index"]] <- c(
gettext("Log-likelihood"),
gettext("Number of free parameters"),
gettext("Akaike (AIC)"),
gettext("Bayesian (BIC)"),
gettext("Sample-size adjusted Bayesian (SSABIC)")
)
if (length(options[["models"]]) == 1) {
fitic[["value"]] <- fmli[[1]][c("logl", "npar", "aic", "bic", "bic2")]
} else {
for (i in seq_along(options[["models"]])) {
fitic[[paste0("value_", i)]] <- fmli[[i]][c("logl", "npar", "aic", "bic", "bic2")]
}
}
# other fitmeasures
fitot[["index"]] <- c(
gettext("Root mean square error of approximation (RMSEA)"),
gettextf("RMSEA 90%% CI lower bound"),
gettextf("RMSEA 90%% CI upper bound"),
gettext("RMSEA p-value"),
gettext("Standardized root mean square residual (SRMR)"),
gettextf("Hoelter's critical N (%s = .05)","\u03B1"),
gettextf("Hoelter's critical N (%s = .01)","\u03B1"),
gettext("Goodness of fit index (GFI)"),
gettext("McDonald fit index (MFI)"),
gettext("Expected cross validation index (ECVI)")
)
if (length(options[["models"]]) == 1) {
fitot[["value"]] <- fmli[[1]][c("rmsea", "rmsea.ci.lower", "rmsea.ci.upper", "rmsea.pvalue",
"srmr", "cn_05", "cn_01", "gfi", "mfi", "ecvi")]
} else {
for (i in seq_along(options[["models"]])) {
fitot[[paste0("value_", i)]] <- fmli[[i]][c("rmsea", "rmsea.ci.lower", "rmsea.ci.upper", "rmsea.pvalue",
"srmr", "cn_05", "cn_01", "gfi", "mfi", "ecvi")]
}
}
}
.semRsquared <- function(modelContainer, dataset, options, ready) {
if (!options[["outputRSquared"]] || !is.null(modelContainer[["rsquared"]])) return()
# init table
tabr2 <- createJaspTable(gettext("R-Squared"))
if (options[["groupingVariable"]] != "")
tabr2$addColumnInfo(name = "__grp__", title = "", type = "string", combine = TRUE)
tabr2$addColumnInfo(name = "__var__", title = "", type = "string")
if (length(options[["models"]]) < 2) {
tabr2$addColumnInfo(name = "rsq", title = "R\u00B2", type = "number", format = "sf:4;dp:3")
} else {
for (i in seq_along(options[["models"]])) {
tabr2$addColumnInfo(name = paste0("rsq_", i), title = options[["models"]][[i]][["modelName"]],
overtitle = "R\u00B2", type = "number", format = "sf:4;dp:3")
}
}
tabr2$dependOn(c("outputRSquared", "models"))
tabr2$position <- .75
modelContainer[["rsquared"]] <- tabr2
if (!ready || modelContainer$getError()) return()
# compute data and fill table
if (options[["groupingVariable"]] == "") {
if (length(options[["models"]]) < 2) {
r2res <- lavaan::inspect(modelContainer[["results"]][["object"]][[1]], "r2")
tabr2[["__var__"]] <- .unv(names(r2res))
tabr2[["rsq"]] <- r2res
} else {
# determine variable names
r2li <- lapply(modelContainer[["results"]][["object"]], lavaan::inspect, what = "r2")
# generate df with these names
r2df <- data.frame("varname__" = unique(unlist(lapply(r2li, names))))
tabr2[["__var__"]] <- .unv(unique(unlist(lapply(r2li, names))))
for (i in 1:length(r2li)) {
# fill matching vars from model with df
r2df[match(names(r2li[[i]]), r2df[["varname__"]]), i + 1] <- r2li[[i]]
# add column to table
tabr2[[paste0("rsq_", i)]] <- r2df[[i + 1]]
}
}
} else {
if (length(options[["models"]]) < 2) {
r2res <- lavaan::inspect(modelContainer[["results"]][["object"]][[1]], "r2")
tabr2[["__grp__"]] <- rep(names(r2res), vapply(r2res, length, 0))
tabr2[["__var__"]] <- .unv(unlist(lapply(r2res, names)))
tabr2[["rsq"]] <- unlist(r2res)
} else {
# here is the most difficult case with multiple groups and multiple models
# create a list with r2 results per model. each element is a list with ngroup elements
r2li <- lapply(modelContainer[["results"]][["object"]], lavaan::inspect, what = "r2")
# now comes the difficult part: determine unique variable names in each group
# for each group, find all variable names in each model
unique_per_group <- lapply(seq_along(r2li[[1]]), function(grp) {
all_names <- lapply(r2li, function(r2res) {
# get names for each model
names(r2res[[grp]])
})
# find the unique variable names
unique(unlist(all_names))
})
# generate df with these names
r2df <- data.frame(
"grpname__" = rep(names(r2li[[1]]), vapply(unique_per_group, length, 0)),
"varname__" = unlist(unique_per_group),
stringsAsFactors = FALSE
)
for (mod_idx in seq_along(r2li)) {
for (grpname in names(r2li[[1]])) {
# find correct rows in r2df for each model and group in r2li
grp_idx <- which(r2df[["grpname__"]] == grpname)
# complex code because varnames in r2res can be in different order
row_idx <- grp_idx[match(names(r2li[[mod_idx]][[grpname]]), r2df[grp_idx, "varname__"])]
# fill r2df with r2 results
r2df[row_idx, mod_idx + 2] <- r2li[[mod_idx]][[grpname]]
}
}
# fill jasp table with data
tabr2[["__grp__"]] <- r2df[["grpname__"]]
tabr2[["__var__"]] <- .unv(r2df[["varname__"]])
for (i in seq_along(r2li)) tabr2[[paste0("rsq_", i)]] <- r2df[[i + 2]]
}
}
}
.semMardiasCoefficient <- function(modelContainer, dataset, options, ready) {
if (!options[["outputMardiasCoefficients"]] || !is.null(modelContainer[["semMardiasTable"]])) return()
mardiatab <- createJaspTable(title = gettext("Mardia's coefficients"))
mardiatab$position <- .2
mardiatab$addColumnInfo(name = "Type", title = "", type = "string")
mardiatab$addColumnInfo(name = "Coefficient", title = gettext("Coefficient"), type = "number")
mardiatab$addColumnInfo(name = "z", title = gettext("z"), type = "number")
mardiatab$addColumnInfo(name = "Chisq", title = gettext("χ²"), type = "number")
mardiatab$addColumnInfo(name = "DF", title = gettext("df"), type = "integer")
mardiatab$addColumnInfo(name = "pvalue", title = gettext("p"), type = "pvalue")
mardiatab$dependOn(c("outputMardiasCoefficients", "models"))
modelContainer[["mardiasTable"]] <- mardiatab
if (!ready || modelContainer$getError()) return()
varNames <- unique(unlist(lapply(modelContainer[["results"]][["object"]], lavaan::lavaanNames, type = "ov")))
if (length(options[["models"]]) > 1)
mardiatab$addFootnote(
gettext("Multivariate skewness and kurtosis calculated for observed variables from all models.")
)
if (!all(sapply(dataset[, varNames, drop = FALSE], is.numeric))) {
mardiatab$setError(gettext("Not all used variables are numeric. Mardia's coefficients not available."))
return()
}
mardiaSkew <- unname(semTools:::mardiaSkew(dataset[, varNames]))
mardiaKurtosis <- unname(semTools:::mardiaKurtosis(dataset[, varNames]))
mardiatab$addRows(
data.frame(Type = gettext("Skewness"),
Coefficient = mardiaSkew[1],
z = NA,
Chisq = mardiaSkew[2],
DF = mardiaSkew[3],
pvalue = mardiaSkew[4])
)
mardiatab$addRows(
data.frame(Type = gettext("Kurtosis"),
Coefficient = mardiaKurtosis[1],
z = mardiaKurtosis[2],
Chisq = NA,
DF = NA,
pvalue = mardiaKurtosis[3])
)
return()
}
.semCov <- function(modelContainer, dataset, options, ready) {
if (!(options[["outputObservedCovariances"]] || options[["outputImpliedCovariances"]] ||
options[["outputResidualCovariances"]]) || !is.null(modelContainer[["covars"]])) return()
covars <- createJaspContainer(gettext("Covariance tables"))
covars$position <- 3
covars$dependOn(c("outputObservedCovariances", "outputImpliedCovariances", "outputResidualCovariances",
"outputStandardizedResiduals", "models"))
modelContainer[["covars"]] <- covars
if (length(options[["models"]]) < 2) {
.semCovTables(modelContainer[["results"]][["object"]][[1]], NULL, covars, options, ready)
} else {
for (i in seq_along(options[["models"]])) {
fit <- modelContainer[["results"]][["object"]][[i]]
modelname <- options[["models"]][[i]][["modelName"]]
.semCovTables(fit, modelname, covars, options, ready)
}
}
}
.semCovTables <- function(fit, modelname, parentContainer, options, ready) {
if (is.null(modelname)) {
cocont <- parentContainer
} else {
cocont <- createJaspContainer(modelname, initCollapsed = TRUE)
}
if (options[["groupingVariable"]] == "") {
# without groups, these are tables
if (options[["outputObservedCovariances"]]) {
octab <- createJaspTable("Observed covariance matrix")
octab$dependOn("outputObservedCovariances")
octab$position <- 1
cocont[["observed"]] <- octab
}
if (options[["outputImpliedCovariances"]]) {
ictab <- createJaspTable("Implied covariance matrix")
ictab$dependOn("outputImpliedCovariances")
ictab$position <- 2
cocont[["implied"]] <- ictab
}
if (options[["outputResidualCovariances"]]) {
rctab <- createJaspTable("Residual covariance matrix")
rctab$dependOn("outputResidualCovariances")
rctab$position <- 3
cocont[["residual"]] <- rctab
}
if (options[["outputStandardizedResiduals"]]) {
srtab <- createJaspTable("Standardized residuals matrix")
srtab$dependOn("outputStandardizedResiduals")
srtab$position <- 4
cocont[["stdres"]] <- srtab
}
} else {
# with multiple groups these become containers
if (options[["outputObservedCovariances"]]) {
occont <- createJaspContainer("Observed covariance matrix", initCollapsed = TRUE)
occont$dependOn("outputObservedCovariances")
occont$position <- 1
cocont[["observed"]] <- occont
}
if (options[["outputImpliedCovariances"]]) {
iccont <- createJaspContainer("Implied covariance matrix", initCollapsed = TRUE)
iccont$dependOn("outputImpliedCovariances")
iccont$position <- 2
cocont[["implied"]] <- iccont
}
if (options[["outputResidualCovariances"]]) {
rccont <- createJaspContainer("Residual covariance matrix", initCollapsed = TRUE)
rccont$dependOn("outputResidualCovariances")
rccont$position <- 3
cocont[["residual"]] <- rccont
}
if (options[["outputStandardizedResiduals"]]) {
srcont <- createJaspContainer("Standardized residuals matrix", initCollapsed = TRUE)
srcont$dependOn("outputStandardizedResiduals")
srcont$position <- 4
cocont[["stdres"]] <- srcont
}
}
if (!ready || !inherits(fit, "lavaan")) return()
if (options[["groupingVariable"]] == "") {
# without groups, just fill the tables
if (options[["outputObservedCovariances"]]) {
# actually compute the observed covariance
ov <- lavaan::inspect(fit, "sampstat")
oc <- ov$cov
oc[upper.tri(oc)] <- NA
for (i in 1:ncol(oc)) {
nm <- colnames(oc)[i]
octab$addColumnInfo(nm, title = .unv(nm), type = "number", format = "sf:4;dp:3;p:.001")
}
octab$addRows(oc, rowNames = colnames(oc))
}
if (options[["outputImpliedCovariances"]]) {
# actually compute the implied covariance
fv <- lavaan::fitted.values(fit)
ic <- fv$cov
ic[upper.tri(ic)] <- NA
for (i in 1:ncol(ic)) {
nm <- colnames(ic)[i]
ictab$addColumnInfo(nm, title = .unv(nm), type = "number", format = "sf:4;dp:3;p:.001")
}
ictab$addRows(ic, rowNames = colnames(ic))
}
if (options[["outputResidualCovariances"]]) {
# actually compute the implied covariance
rv <- lavaan::residuals(fit)
rc <- rv$cov
rc[upper.tri(rc)] <- NA
for (i in 1:ncol(rc)) {
nm <- colnames(rc)[i]
rctab$addColumnInfo(nm, title = .unv(nm), type = "number", format = "sf:4;dp:3;p:.001")
}
rctab$addRows(rc, rowNames = colnames(rc))
}
if (options[["outputStandardizedResiduals"]]) {
# actually compute the implied covariance
sv <- lavaan::residuals(fit, type = "standardized")
sr <- sv$cov
sr[upper.tri(sr)] <- NA
for (i in 1:ncol(sr)) {
nm <- colnames(sr)[i]
srtab$addColumnInfo(nm, title = .unv(nm), type = "number", format = "sf:4;dp:3;p:.001")
}
srtab$addRows(sr, rowNames = colnames(sr))
}
} else {
# with groups, create tables and fill them
if (options[["outputObservedCovariances"]]) {
# actually compute the observed covariance
ov <- lavaan::inspect(fit, "sampstat")
level_names <- names(ov)
for (i in 1:length(ov)) {
oc <- ov[[i]]$cov
oc[upper.tri(oc)] <- NA
occont[[level_names[i]]] <- createJaspTable(level_names[i])
for (j in 1:ncol(oc)) {
nm <- colnames(oc)[j]
occont[[level_names[i]]]$addColumnInfo(nm, title = .unv(nm), type = "number", format = "sf:4;dp:3;p:.001")
}
occont[[level_names[i]]]$addRows(oc, rowNames = colnames(oc))
}
}
if (options[["outputImpliedCovariances"]]) {
# actually compute the observed covariance
fv <- lavaan::fitted.values(fit)
level_names <- names(fv)
for (i in 1:length(fv)) {
ic <- fv[[i]]$cov
ic[upper.tri(ic)] <- NA
iccont[[level_names[i]]] <- createJaspTable(level_names[i])
for (j in 1:ncol(ic)) {
nm <- colnames(ic)[j]
iccont[[level_names[i]]]$addColumnInfo(nm, title = .unv(nm), type = "number", format = "sf:4;dp:3;p:.001")
}
iccont[[level_names[i]]]$addRows(ic, rowNames = colnames(ic))
}
}
if (options[["outputResidualCovariances"]]) {
# actually compute the observed covariance
rv <- lavaan::residuals(fit)
level_names <- names(rv)
for (i in 1:length(rv)) {
rc <- rv[[i]]$cov
rc[upper.tri(rc)] <- NA
rccont[[level_names[i]]] <- createJaspTable(level_names[i])
for (j in 1:ncol(rc)) {
nm <- colnames(rc)[j]
rccont[[level_names[i]]]$addColumnInfo(nm, title = .unv(nm), type = "number", format = "sf:4;dp:3;p:.001")
}
rccont[[level_names[i]]]$addRows(rc, rowNames = colnames(rc))
}
}
if (options[["outputStandardizedResiduals"]]) {
# actually compute the observed covariance
sv <- lavaan::residuals(fit, type = "standardized")
level_names <- names(sv)
for (i in 1:length(sv)) {
sr <- sv[[i]]$cov
sr[upper.tri(sr)] <- NA
srcont[[level_names[i]]] <- createJaspTable(level_names[i])
for (j in 1:ncol(sr)) {
nm <- colnames(sr)[j]
srcont[[level_names[i]]]$addColumnInfo(nm, title = .unv(nm), type = "number", format = "sf:4;dp:3;p:.001")
}
srcont[[level_names[i]]]$addRows(sr, rowNames = colnames(sr))
}
}
}
if (!is.null(modelname)) {
parentContainer[[modelname]] <- cocont
}
return()
}
.semMI <- function(modelContainer, dataset, options, ready) {
if (!options[["outputModificationIndices"]] || !is.null(modelContainer[["modindices"]])) return()
modindices <- createJaspContainer(gettext("Modification indices"))
modindices$position <- 4
modindices$dependOn(c("outputModificationIndices", "miHideLow", "miThreshold", "models"))
modelContainer[["modindices"]] <- modindices
if (length(options[["models"]]) < 2) {
.semMITable(modelContainer[["results"]][["object"]][[1]], NULL, modindices, options, ready)
} else {
for (i in seq_along(options[["models"]])) {
fit <- modelContainer[["results"]][["object"]][[i]]
modelname <- options[["models"]][[i]][["modelName"]]
.semMITable(fit, modelname, modindices, options, ready)
}
}
}
.semMITable <- function(fit, modelname, parentContainer, options, ready) {
if (is.null(modelname)) {
micont <- parentContainer
} else {
micont <- createJaspContainer(modelname, initCollapsed = TRUE)
}
semModIndicesTable <- createJaspTable(title = gettext("Modification Indices"))
semModIndicesTable$addColumnInfo(name = "lhs", title = "", type = "string")
semModIndicesTable$addColumnInfo(name = "op", title = "", type = "string")
semModIndicesTable$addColumnInfo(name = "rhs", title = "", type = "string")
if (options[["groupingVariable"]] != "")
semModIndicesTable$addColumnInfo(name = "group", title = gettext("group"), type = "string")
semModIndicesTable$addColumnInfo(name = "mi", title = gettext("mi"), type = "number")
semModIndicesTable$addColumnInfo(name = "epc", title = gettext("epc"), type = "number")
semModIndicesTable$addColumnInfo(name = "sepc.lv", title = gettext("sepc (lv)"), type = "number")
semModIndicesTable$addColumnInfo(name = "sepc.all", title = gettext("sepc (all)"), type = "number")
semModIndicesTable$addColumnInfo(name = "sepc.nox", title = gettext("sepc (nox)"), type = "number")
semModIndicesTable$showSpecifiedColumnsOnly <- TRUE
micont[["table"]] <- semModIndicesTable
if (!ready || !inherits(fit, "lavaan")) return()
# Extract modidffication indices:
semModIndResult <- lavaan:::modificationIndices(fit)
### Remove NA:
semModIndResult <- semModIndResult[!is.na(semModIndResult$mi), , drop=FALSE]
## Sort:
semModIndResult <- semModIndResult[order(semModIndResult$mi, decreasing = TRUE), , drop=FALSE]
### Remove low indices:
if (isTRUE(options$miHideLow)) {
semModIndResult <- semModIndResult[semModIndResult$mi > options$miThreshold, , drop=FALSE]
}
if (options[["groupingVariable"]] != "")
semModIndResult[["group"]] <- lavaan::lavInspect(fit, "group.label")[semModIndResult[["group"]]]
semModIndicesTable$setData(lapply(semModIndResult, .unv))
if (!is.null(modelname)) {
parentContainer[[modelname]] <- micont
}
return()
}
.semPathPlot <- function(modelContainer, dataset, options, ready) {
if (!options[["outputPathPlot"]] || !ready || !is.null(modelContainer[["plot"]])) return()
pcont <- createJaspContainer(gettext("Path diagram"))
pcont$position <- 7
pcont$dependOn(c("outputPathPlot", "pathPlotPar", "pathPlotLegend", "models"))
modelContainer[["plot"]] <- pcont
if (length(options[["models"]]) < 2) {
.semCreatePathPlot(modelContainer[["results"]][["object"]][[1]], NULL, pcont, options, ready)
} else {
for (i in seq_along(options[["models"]])) {
fit <- modelContainer[["results"]][["object"]][[i]]
modelname <- options[["models"]][[i]][["modelName"]]
.semCreatePathPlot(fit, modelname, pcont, options, ready)
}
}
}
.semCreatePathPlot <- function(fit, modelname, parentContainer, options, ready) {
if (is.null(modelname)) {
modelname <- gettext("Path diagram")
}
if (options[["groupingVariable"]] == "") {
plt <- createJaspPlot(title = modelname, width = 600, height = 400)
} else {
plt <- createJaspContainer(title = modelname, initCollapsed = TRUE)
}
plt$dependOn(c("outputPathPlot", "pathPlotPar", "pathPlotLegend"))
parentContainer[[modelname]] <- plt
if (!ready || !inherits(fit, "lavaan")) return()
if (length(lavaan::lavInspect(fit, "ordered")) > 0) {
plt$setError(gettext("Model plot not available with ordinal variables"))
return()
}
# create a qgraph object using semplot
po <- .lavToPlotObj(fit)
pp <- .suppressGrDevice(semPlot::semPaths(
object = po,
layout = "tree2",
intercepts = FALSE,
reorder = FALSE,
whatLabels = ifelse(options[["pathPlotPar"]], "par", "name"),
edge.color = "black",
color = list(lat = "#EAEAEA", man = "#EAEAEA", int = "#FFFFFF"),
title = FALSE,
legend = options[["pathPlotLegend"]],
legend.mode = "names",
legend.cex = 0.6,
label.cex = 1.3,
edge.label.cex = 0.9,
nodeNames = decodeColNames(po@Vars$name),
nCharNodes = 3,
rotation = 2,
ask = FALSE
))
if (options[["groupingVariable"]] == "") {
plt$plotObject <- pp
} else {
level_names <- lavaan::lavInspect(fit, "group.label")
for (i in seq_along(level_names)) {
plt[[level_names[i]]] <- createJaspPlot(title = level_names[i], width = 600, height = 400)
plt[[level_names[i]]]$plotObject <- pp[[i]]
}
}
}
|
38329f5f76883b1d9027d0954bd575d1eb319138
|
620e89c282f4379c694b40a75faeee8bef7a7913
|
/man/isotree.subset.trees.Rd
|
22f2a7e0567e6ae012eaad070a36c1765772a31c
|
[
"BSD-2-Clause"
] |
permissive
|
david-cortes/isotree
|
7f33092398389dd71bc58ba1406c69ea22475bad
|
edbdd569b36e2942b006c8f710d6dcc0f0ca3033
|
refs/heads/master
| 2023-08-07T16:11:38.305331
| 2023-07-28T20:16:28
| 2023-07-28T20:16:28
| 217,133,055
| 157
| 39
|
BSD-2-Clause
| 2022-07-27T18:16:07
| 2019-10-23T19:04:02
|
C++
|
UTF-8
|
R
| false
| true
| 717
|
rd
|
isotree.subset.trees.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isoforest.R
\name{isotree.subset.trees}
\alias{isotree.subset.trees}
\title{Subset trees of a given model}
\usage{
isotree.subset.trees(model, trees_take)
}
\arguments{
\item{model}{An `isolation_forest` model object.}
\item{trees_take}{Indices of the trees of `model` to copy over to a new model,
as an integer vector.
Must be integers with numeration starting at one}
}
\value{
A new isolation forest model object, containing only the subset of trees
from this `model` that was specified under `trees_take`.
}
\description{
Creates a new isolation forest model containing only selected trees of a
given isolation forest model object.
}
|
95d858ead6368062075b6ad74d1dd59711f8417b
|
80a0d8483a6df9635012d4ceb1683207a6e77921
|
/man/DiDiSTATIS_Summary_Stats_Table.Rd
|
d483519930b42e0d8ab129f729236b684656a5f3
|
[] |
no_license
|
michaelkriegsman/DiDiSTATIS
|
e11f9d21fa9ff18cf6b5daea7460e1830792f077
|
c2a6e6f84af7dd9718992d3c02b0b646b1f3e90e
|
refs/heads/master
| 2021-07-13T02:50:23.138682
| 2020-05-13T20:32:34
| 2020-05-13T20:32:34
| 130,097,579
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 479
|
rd
|
DiDiSTATIS_Summary_Stats_Table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DiDiSTATIS_Summary_Stats_Table.R
\name{DiDiSTATIS_Summary_Stats_Table}
\alias{DiDiSTATIS_Summary_Stats_Table}
\title{Print a table of summary stats}
\usage{
DiDiSTATIS_Summary_Stats_Table(res_DiDiSTATIS, main = NULL)
}
\arguments{
\item{res_DiDiSTATIS}{DiDiSTATIS output with all inference}
\item{main}{Table title}
}
\value{
A table of summary stats
}
\description{
Print a table of summary stats
}
|
a5dc393edb80005611840ce3897b23d19f6f0dbb
|
58accfda666d580c22762e9df34e7524a0771b6a
|
/ASLO/figure_making/making_new_plots.R
|
b377ed5ddb21f283fc8de874d62b2cbb1afa1a06
|
[] |
no_license
|
mmh1133/CMOP
|
39135cb534b4092550a3b963b6badef6d2fd56f7
|
f2e3d679ec7e8c4e27b34a9ece6a405d4ac20cef
|
refs/heads/master
| 2020-04-16T02:17:26.090747
| 2017-04-20T22:16:22
| 2017-04-20T22:16:22
| 23,399,631
| 0
| 1
| null | 2014-11-14T00:32:30
| 2014-08-27T17:53:24
|
R
|
UTF-8
|
R
| false
| false
| 9,951
|
r
|
making_new_plots.R
|
library(popcycle)
set.evt.location("/Volumes/seaflow/CMOP_6")
set.project.location("~/CMOP_2013_f2")
set.cruise.id("CMOP_6")
cruise <-"CMOP_6"
library(rgl)
library(ggplot2)
library(zoo)
library(plotrix)
###############################################################
#### all the setup for plotting everything you ever wanted ####
###############################################################
#### setting up abundance data ####
stat <- get.stat.table() # to load the aggregate statistics
stat$time <- as.POSIXct(stat$time,format="%FT%T",tz='GMT')
# subseting files #
pre.crypto1 <- subset(stat, pop == 'crypto')
id <- which(pre.crypto1$flow_rate < 2400) #subset files that have low flow rate
pre.crypto2 <- pre.crypto1[-id,]
crypto <- subset(pre.crypto2, time > as.POSIXct("2013-09-23 22:50:00") & time < as.POSIXct("2013-09-26 00:50:00"))
# roll mean abundance #
pre.crypto2$daily.mean <- rollapply(data=pre.crypto2$abundance, width=24, FUN=mean, na.rm=T, fill=NA)*24
#### setting up salinity ####
pre.flu <- read.csv("/Users/francois/CMOP/auxillary_data/salinityCMOP_6")
pre.flu2 <- as.data.frame(pre.flu, row.names=NULL)
pre.flu2$time <- as.POSIXct(strptime(pre.flu2$time.YYYY.MM.DD.hh.mm.ss.PST., "%Y/%m/%d %H:%M:%S"), tz="GMT")
flu <- subset(pre.flu2, time > as.POSIXct("2013-09-23 22:50:00") & time < as.POSIXct("2013-09-26 00:50:00"))
#### setting up binned data ####
<<<<<<< HEAD
yay <- read.csv("/Users/mariaham/CMOP/CMOP_field/crypto_HD_CMOP_6.binned.csv")
=======
yay <- read.csv("/Users/francois/CMOP/CMOP_field/crypto_HD_CMOP_6.binned.csv")
>>>>>>> b92b220ccfccc902f0629890da9e09a0e1bb77b1
yay$daily.GRmean <- rollapply(data=yay$h.dr.mean, width=24, FUN=mean, na.rm=T, fill=NA)*24
yay$daily.GRsd <- rollapply(data=yay$h.dr.sd, width=24, FUN=mean, na.rm=T, fill=NA)*24
#### setting up PAR data ####
<<<<<<< HEAD
in.dir <- out.dir <- "/Users/mariaham/CMOP/CMOP_field"
=======
in.dir <- out.dir <- "/Users/francois/CMOP/CMOP_field"
>>>>>>> b92b220ccfccc902f0629890da9e09a0e1bb77b1
Par.path <- paste0(in.dir,"/Par_",cruise)
Par <- read.csv(Par.path, sep=",")
Par$time <- as.POSIXct(Par$time, format="%Y/%m/%d %H:%M:%S", tz= "GMT")
Par$num.time <- as.numeric(Par$time)
Par2 <- subset(Par, time > as.POSIXct("2013-09-10 16:50:00") & time < as.POSIXct("2013-10-03 23:58:00"))
Par4<- as.data.frame(matrix(data=NA, nrow=23, ncol=2))
colnames(Par4) <- c("par.max", "time")
start <- as.POSIXct("2013-09-10 16:50:00")
for (i in 1:23)
{
print(i)
end <- start + 86400
sub <- subset(Par2, Par2$time > start & Par2$time < end)
if(nrow(sub) > 46){
Par4$par.max[i] <- max(sub$par, na.rm=T)
}else print(paste("error"))
Par4$time[i] <- sub[which(sub$par == max(sub$par)), 'time']
start <- end
}
Par4$time2 <- as.POSIXct(Par4$time, origin="1970-01-01", tz='GMT')
##########################################################################################################################
########################
#### abundance plot ####
########################
par(mai=c(1,1.5,1,1))
<<<<<<< HEAD
plot(pre.crypto2$time, pre.crypto2$abundance, lwd=2, pch=16, xlab="time", ylab="abundance (10^6 cells/L)")
## how do I make an axis break (for y axis, so it doesn't look like a plot of nothing)?
=======
plot(pre.crypto2$time, pre.crypto2$abundance, lwd=2, pch=16, xlab="", ylab="abundance (10^6 cells/L)", cex.lab=2, ylim=c(0,1))
## how do I make an axis break (for y axis, so it doesn't look like a plot of nothing)?
gap.plot(pre.crypto2$time, pre.crypto2$abundance, gap = c(5,17), lwd=2, pch=16, xlab="time", ylab="abundance (10^6 cells/L)", ylim=c(0,20))
################################
#### one day abundance plot ####
################################
plot(crypto$time, crypto$abundance, lwd=2, pch=16, xlab="", ylab="abundance (10^6 cells/L)", cex.lab=1.5)
>>>>>>> b92b220ccfccc902f0629890da9e09a0e1bb77b1
#####################################
#### salinity vs. abundance plot ####
#####################################
par(mai=c(1,1.5,1,1))
<<<<<<< HEAD
plot(crypto$time, crypto$abundance, type="n", ylab="abundance (10^6 cells/L)", xlab="time")
=======
plot(crypto$time, crypto$abundance, type="n", ylab="abundance (10^6 cells/L)", xlab="", cex.lab=1.5)
points(smooth.spline(as.POSIXct(crypto$time, origin="1970-01-01", tz='GMT'), crypto$abundance, spar=0.5), lwd=2, pch=16, xlab="", ylab="", type="l", cex=5)
par(new=T)
plot(flu$time, flu$water_salinity, xlab="", ylab="", axes=F, type="n")
points(smooth.spline(as.POSIXct(flu$time, origin="1970-01-01", tz='GMT'), flu$water_salinity, spar=0.5), lwd=2, col="cyan4", pch=16, xlab="", ylab="", axes=F)
#type="l", cex=2, lty=2
axis(4)
mtext("salinity", side=4, line=3, cex=1.5)
legend(1380100000, 0.35, c("crypto abundance", "salinity"), lty=c(1,1), lwd=c(2.5,2.5), col=c("darkred", "darkblue"))
#legend not working probably due to time issue
###########################
#### new sal vs. abund ####
###########################
<<<<<<< HEAD
plot(crypto$time, crypto$abundance, type="n", ylab="abundance (10^6 cells/L)", xlab="", cex.lab=1.5, axes=F)
>>>>>>> b92b220ccfccc902f0629890da9e09a0e1bb77b1
points(smooth.spline(as.POSIXct(crypto$time, origin="1970-01-01", tz='GMT'), crypto$abundance, spar=0.5), lwd=2, pch=16, xlab="", ylab="", type="l", cex=5)
=======
plot(crypto$time, crypto$abundance, ylab="abundance (10^6 cells/L)", xlab="", cex.lab=1.5, pch=16)
points(smooth.spline(as.POSIXct(crypto$time, origin="1970-01-01", tz='GMT'), crypto$abundance, spar=0.5), lwd=2, pch=16, xlab="", ylab="", type="l", cex=5, axes=F)
>>>>>>> 3fa2a634d9e7c8f5cece108975be27f4d77d026f
par(new=T)
plot(flu$time, flu$water_salinity, xlab="", ylab="", axes=F, type="n")
points(smooth.spline(as.POSIXct(flu$time, origin="1970-01-01", tz='GMT'), flu$water_salinity, spar=0.5), lwd=2, col="cyan4", pch=16, xlab="", ylab="", axes=F)
#type="l", cex=2, lty=2
axis(4)
<<<<<<< HEAD
mtext("salinity", side=4, line=3)
=======
mtext("salinity", side=4, line=3, cex=1.5)
>>>>>>> b92b220ccfccc902f0629890da9e09a0e1bb77b1
legend(1380100000, 0.35, c("crypto abundance", "salinity"), lty=c(1,1), lwd=c(2.5,2.5), col=c("darkred", "darkblue"))
#legend not working probably due to time issue
<<<<<<< HEAD
=======
>>>>>>> b92b220ccfccc902f0629890da9e09a0e1bb77b1
#######################
#### div rate plot ####
#######################
<<<<<<< HEAD
<<<<<<< HEAD
par(mai=c(1,1.1,1,1))
plotCI(as.POSIXct(yay$h.time, origin="1970-01-01", tz='GMT'), yay$daily.GRmean, uiw= yay$daily.GRsd, sfrac=0, pch=16, xlab="time", ylab="mean daily div rate", cex.main=2, cex.lab=1.5)
=======
plotCI(as.POSIXct(yay$h.time, origin="1970-01-01", tz='GMT'), yay$daily.GRmean, uiw= yay$daily.GRsd, sfrac=0, pch=16, xlab="time", ylab="daily div rate", main="mean daily div rate", cex.main=2, cex.lab=1.5)
>>>>>>> b92b220ccfccc902f0629890da9e09a0e1bb77b1
=======
par(mai=c(1,1.5,1,1))
plotCI(as.POSIXct(yay$h.time, origin="1970-01-01", tz='GMT'), yay$daily.GRmean, uiw= yay$daily.GRsd, sfrac=0, pch=16, xlab="", ylab="mean daily division rate", cex.lab=1.5)
>>>>>>> 3fa2a634d9e7c8f5cece108975be27f4d77d026f
## IMPORTANT NOTE: I still can't figure out why the binned file is still the old one?
## the new div rate should be under 2.5 max
## can you recommit the new file to git when you find it?
#####################################
#### div rate vs. abundance plot ####
#####################################
par(mai=c(1,1.5,1,1))
plotCI(as.POSIXct(yay$h.time, origin="1970-01-01", tz='GMT'), yay$daily.GRmean, uiw= yay$daily.GRsd, sfrac=0, pch=16, xlab="", ylab="mean daily division rate", cex.lab=1.5)
#ylim=c(0,20)
par(new=T)
plot(pre.crypto2$time, pre.crypto2$abundance, lwd=2, pch=16, xlab="", ylab="", cex.lab=2, axes=F, cex=.75, col="darkred", ylim=c(0,1))
axis(4)
mtext("abundance (10^6 cells/L)", side=4, lin=3, cex=1.5)
par(mai=c(1,1.5,1,1))
plotCI(as.POSIXct(yay$h.time, origin="1970-01-01", tz='GMT'), yay$daily.GRmean, uiw= yay$daily.GRsd, sfrac=0, pch=16, xlab="", ylab="mean daily division rate", cex.lab=1.5)
#ylim=c(0,20)
par(new=T)
plot(pre.crypto2$time, pre.crypto2$daily.mean, lwd=2, pch=16, xlab="", ylab="", cex.lab=2, axes=F, cex=.75, col="darkred")
axis(4)
mtext("mean daily abundance (10^6 cells/L)", side=4, lin=3, cex=1.5)
###############################
#### div rate vs. PAR plot ####
###############################
par(mai=c(1,1,1,1))
plotCI(as.POSIXct(yay$h.time, origin="1970-01-01", tz='GMT'), yay$daily.GRmean, uiw= yay$daily.GRsd, sfrac=0, pch=16, xlab="", ylab="mean daily division rate", cex.main=2, cex.lab=1.5, axes=F)
axis(2)
mtext("mean daily division rate", side=2, line=3, cex=1.5)
par(new=T)
plot(Par4$time2, Par4$par.max, col="darkblue", pch=16, axes=F, type="o", xlab="", ylab="", cex.lab=1.5)
axis(4)
<<<<<<< HEAD
<<<<<<< HEAD
mtext("PAR", side=4, line=3, cex.lab=1.5)
=======
mtext("PAR", side=4, line=3)
>>>>>>> b92b220ccfccc902f0629890da9e09a0e1bb77b1
=======
mtext("PAR", side=4, line=3, cex=1.5)
plot(Par2$time, Par2$par, col="darkblue", pch=16, xlab="", cex.lab=1.5, ylab="PAR")
>>>>>>> 3fa2a634d9e7c8f5cece108975be27f4d77d026f
#####################################################################################################
##########################
#### non field data!! ####
##########################
## setup ##
library(lmodel2)
home <- "/Users/francois/CMOP/"
out.dir <- paste0(home, "Rhodo_labExperiment/")
m <- read.csv(paste0(out.dir,"model_output-V2.csv"))
cc <- read.csv(paste0(out.dir,"RHODO_div-rate.csv"))[-1,]
##################################
#### div rate of model vs. cc ####
##################################
par(pty='m')
plotCI(as.POSIXct(cc$time, origin="1970-01-01"), cc$div, cc$div.se, ylim=c(0,0.05), sfrac=0, lwd=2, pch=16, cex=1,ylab=NA, xlab=NA)
plotCI(m$time, m$div.ave, m$div.sd, col=2,add=T, sfrac=0, lwd=2, pch=16,cex=1)
mtext(substitute(paste("Division (h"^{-1},")")), side=2, line=3, cex=1)
mtext("time", side=1, line=3, cex=1)
|
736f04266a3a25ee8ecc570299db328c0b6b204d
|
c98196475185bc4a99d92bd7fc25042ce9394661
|
/R/rstar_figure.R
|
0458ed56c154535a6a834954c264ae942d785def
|
[] |
no_license
|
traitecoevo/competition_kernels
|
b69e86e836daed00614b84ae1ec4daf82c8e6fad
|
c6370be27df27735c139eccac02404b6aa72966f
|
refs/heads/master
| 2023-03-27T10:26:42.490901
| 2021-03-25T18:34:55
| 2021-03-25T18:37:53
| 17,126,686
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,650
|
r
|
rstar_figure.R
|
## NOTE: If changing parameters, be sure to update (manually) the
## values in appendix sec:Rstar.
dat_rstar <- function() {
x1 <- c(0.6, 0.5)
x2 <- c(0.7, 0.9)
C1 <- c(.2, .2) * 10
C2 <- c(.3, .7)
x1b <- c(0.6, 0.5)
x2b <- c(0.9, 0.7)
p1 <- rstar_parameters(rstar_mat_2_tradeoff, matrix(C1, nrow=2), S=0.5)
p2 <- rstar_parameters(rstar_mat_2_tradeoff, matrix(C2, nrow=2), S=0.5)
p1b <- rstar_parameters(rstar_mat_2_tradeoff, rstar_mat_2_tradeoff, S=0.5)
p2b <- rstar_parameters(rstar_mat_2_tradeoff, rstar_mat_2_tradeoff, S=c(0.7, 0.3))
list(dat_rstar1(p1, x1),
dat_rstar1(p2, x2),
dat_rstar1(p1b, x1b),
dat_rstar1(p2b, x2b))
}
dat_rstar1 <- function(p, x_resident) {
dat <- lapply(x_resident,
function(xi) rstar_competition(matrix(xi, 1, 1), p))
list(p=p,
x=drop(dat[[1]]$x_invade),
r=dat[[1]]$r_invade,
K=dat[[1]]$K_invade,
x1=x_resident[[1]],
x2=x_resident[[2]],
N1=dat[[1]]$N_resident,
N2=dat[[2]]$N_resident,
w1=dat[[1]]$w_invade,
w2=dat[[2]]$w_invade,
a1=dat[[1]]$alpha,
a2=dat[[2]]$alpha)
}
fig_rstar <- function() {
dat <- dat_rstar()
dat1 <- dat[[1]]
dat2 <- dat[[2]]
xx <- seq(0, 1, length.out=6)
ylim_alpha <- c(.45, 1.75)
par(mfrow=c(2, 2), mar=rep(1, 4), oma=c(3, 3, 1, 1))
plot(dat1$x, dat1$a1, type="l", ylim=ylim_alpha, las=1, xaxt="n")
axis(1, labels=FALSE)
abline(h=1.0, v=dat1$x1, lty=2, col="darkgrey")
points(dat1$x1, 1.0, pch=19)
add_black_bar(dat1$x, dat1$w1)
label_panel(1)
mtext(expression("Competition (" * alpha * ")"), 2, 1.6, outer=TRUE)
mtext("Trait value", 1, 1.8, outer=TRUE)
# mtext("Away from attractor", 3, 0.5, xpd=NA, cex=.8)
plot(dat1$x, dat1$a2, type="l", ylim=ylim_alpha, xaxt="n", yaxt="n")
axis(1, labels=FALSE)
axis(2, labels=FALSE)
abline(h=1.0, v=dat1$x2, lty=2, col="darkgrey")
points(dat1$x2, 1.0, pch=19)
label_panel(2)
# mtext("At attractor", 3, 0.5, xpd=NA, cex=.8)
mtext("Equal resources", 4, 0.5, xpd=NA, cex=.8)
plot(dat2$x, dat2$a1, type="l", ylim=ylim_alpha, las=1)
abline(h=1.0, v=dat2$x1, lty=2, col="darkgrey")
points(dat2$x1, 1.0, pch=19)
add_black_bar(dat2$x, dat2$w1)
label_panel(3)
plot(dat2$x, dat2$a2, type="l", ylim=ylim_alpha, yaxt="n")
axis(2, labels=FALSE)
abline(h=1.0, v=dat2$x2, lty=2, col="darkgrey")
points(dat2$x2, 1.0, pch=19)
label_panel(4)
mtext("Unequal resources", 4, 0.5, xpd=NA, cex=.8)
}
fig_rstar_components <- function(i) {
d <- dat_rstar()[[i]]
plot_components(d$x, d$r, d$K, d$x1, d$x2, d$N1, d$N2,
d$w1, d$w2, d$a1, d$a2)
}
|
ea2ed1d3750692f9e8bdf0ff92760a312a7f30ac
|
ff95b3fadf3c9525be307c8ea8982e41d38a7dce
|
/plot2.R
|
6733015972f1b0754be7d299fbb7dd7a8dd895ee
|
[] |
no_license
|
Tyler-Axdorff/ExData_Plotting1
|
8cea9ebcf3402ecd86ec81b34459ba908934eb62
|
3eded60e1c0780461d5ca25119ce10433094c551
|
refs/heads/master
| 2021-01-15T09:18:31.290595
| 2015-05-10T20:14:47
| 2015-05-10T20:14:47
| 35,384,107
| 0
| 0
| null | 2015-05-10T19:08:07
| 2015-05-10T19:08:06
| null |
UTF-8
|
R
| false
| false
| 1,073
|
r
|
plot2.R
|
# Read in 'household_power_consumption.txt' in working directory
rawData <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?", quote='\"', stringsAsFactors=F)
# Convert date string into Date type
rawData$Date <- as.Date(rawData$Date, format="%d/%m/%Y")
# Subset data to just Feb 1st 2007 - Feb 2nd 2007
subsetData <- rawData[(rawData$Date >= "2007-02-01" & rawData$Date <= "2007-02-02"),]
# Create new DateTime field (so we can have continuous line like in example plot rather than just by day or hour)
DateTime <- paste(as.Date(subsetData$Date), subsetData$Time)
# Add new DateTime field to dataframe
subsetData$DateTime <- as.POSIXct(DateTime) # use standard time format
# Make the plot $Global_active_power by $DateTime
# type=l for line graph
# x-axis is blank
# y-axis = Global Active Power (kilowatts)
# title is blank
plot(subsetData$Global_active_power~subsetData$DateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
# Save the plot to 'plot2.png'
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
f1f9f705650d33f2b9c46005a5a4e884031c5069
|
e67f3901197c81d982db034d42ddde4f3d3c703f
|
/src/processing/states/MI.R
|
9c1ced04c364007de9ea1df30cc7a314ffe5440c
|
[] |
no_license
|
sneakers-the-rat/openpolicing
|
29d3ab03a3fa9e323660521318da48b15ac515af
|
97f58158eca31c56fd3b35083d7cbf1ac6949b66
|
refs/heads/master
| 2020-05-01T04:42:59.217759
| 2019-03-23T11:46:51
| 2019-03-23T11:46:51
| 177,282,043
| 0
| 0
| null | 2019-03-23T11:44:32
| 2019-03-23T11:44:32
| null |
UTF-8
|
R
| false
| false
| 3,106
|
r
|
MI.R
|
# Processing code for Michigan
# Set-up
this_state <- 'MI'
change_path(this_state)
# Read in and combine data
print(sprintf("[%s] reading in the data", this_state))
colnames <- read_csv("colnames.csv", col_names='col')
d <- read_csv("data.csv", col_names=colnames$col, na=c("NA","","NULL")) %>%
# Separate out date and time
separate(TicketDate, c("date","time"), " ")
# Value dictionaries
race_keys <- c("A", "B", "H", "I", "U", "W")
race_vals <- c("Asian", "Black", "Hispanic", "Native American", "Unknown", "White")
race_vals_clean <- c("Asian", "Black", "Hispanic", "Other", NA, "White")
# Do a group_by to remove duplicate stops because each stop corresponds not to a stop but to a violation reason.
print(sprintf("Prior to removing duplicates, %i rows.", nrow(d)))
d = group_by(d, PrimaryOfficerID, date, time, VehicleID, CountyCode, UponStreet, Department, Race, ArrestNum) %>%
summarise(Felony = max(Felony),
Misdemeanor = max(Misdemeanor),
CivilInfraction = max(CivilInfraction),
Warning = max(Warning),
Description = paste0(Description, collapse = ';;')) %>%
ungroup()
print(sprintf("After removing duplicates, %i rows.", nrow(d)))
# Rename and extract columns
print(sprintf("[%s] extracting columns", this_state))
d$state <- this_state
d$stop_date <- make_date(d$date)
d$stop_time <- strftime(strptime(d$time, "%H:%M:%S"), format='%H:%M')
d$id <- make_row_id(d)
d$location_raw <- d$CountyCode
counties_clean <- normalize_county(d)
d$county_name <- counties_clean$county_name
d$county_fips <- counties_clean$fips
d$fine_grained_location <- d$UponStreet
d$state_patrol <- TRUE
d$police_department <- d$Department
d$driver_gender <- NA # not included
d$driver_age_raw <- NA # not included
d$driver_age <- NA # not included
# There is one weird race value to scrub
d$Race <- ifelse(d$Race %in% race_keys, d$Race, NA)
d$driver_race_raw <- map(d$Race, race_keys, race_vals)
d$driver_race <- map(d$Race, race_keys, race_vals_clean)
d$violation_raw <- d$Description
d$violation <- normalize_violation_multiple(d, d$Description, clean=TRUE, sep = ';;')
d$search_conducted <- NA # not included
d$search_type_raw <- NA # not included
d$search_type <- NA # not included
d$contraband_found <- NA # not included
# stop outcome is the most severe outcome of the stop, consistent with other states.
d$stop_outcome <- ifelse(!is.na(d$ArrestNum), 'Arrest',
ifelse(d$Felony == 1, 'Felony',
ifelse(d$Misdemeanor == 1, 'Misdemeanor',
ifelse(d$CivilInfraction == 1, 'Infraction',
ifelse(d$Warning == 1, 'Warning', NA)))))
d$is_arrested <- !is.na(d$ArrestNum)
# Extra fields
d$officer_id <- d$PrimaryOfficerID
# Close-up
write_cleaned_state(d, extra_cols=c('officer_id'))
change_path(NA)
|
44d19116fae883b81bab0a5fcc43e928f414cb63
|
d2f94260646f8563f4b20c216a11e15a62c9363a
|
/LOOCV.R
|
7fcb00805ca47fc1bd6a58fd8805911cba9ec4a4
|
[] |
no_license
|
KDDing/DSGCR
|
e75be5a5f0d06df09a9a03e75e7bf8e312c3a1d8
|
56609698b954e576acb97b5a14641c1050df4733
|
refs/heads/master
| 2020-05-27T08:01:23.111564
| 2019-05-25T08:05:13
| 2019-05-25T08:05:13
| 188,538,271
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,196
|
r
|
LOOCV.R
|
rm(list=ls())
setwd("...")
RDS <- read.table("***.txt")
SDA1 <- read.table("***.txt")
SDA2 <- read.table("***.txt")
SDA3 <- read.table("***.txt")
RDG <- read.table("***.txt")
RGG <- read.table("***.txt")
RGG <- as.matrix(RGG)
RDS <- as.matrix(RDS)
SDA1 <- as.matrix(SDA1)
SDA2 <- as.matrix(SDA2)
SDA3 <- as.matrix(SDA3)
SDA <- (SDA1+SDA2+SDA3)/3
setwd("...")
source("BMA.R")
source("Lapla.R")
source("CrossValidation.R")
source("Sim_Rank.R")
SGG1 <- Sim_rank(RGG,2)
SGG2 <- Sim_rank(RGG,3)
SGG3 <- Sim_rank(RGG,4)
SGG <- (1/3)*(SGG1+SGG2+SGG3)
S_ge_geSet <- Cal_ge_geSet(SGG, t(RDG))
SDT <- Cal_drug_tar(S_ge_geSet,t(RDG))
runs <- 1
drug_num <- dim(RDS)[1]
p_in_all <- P_positive(RDS)
K <- dim(p_in_all)[1]
num_te <- floor(dim(p_in_all)[1]/K)
TPR_ALL_N <- matrix(nrow=(drug_num*(drug_num-1)/2-(dim(p_in_all)[1]-num_te))+1,ncol=runs*floor(K))
FPR_ALL_N <- matrix(nrow=(drug_num*(drug_num-1)/2-(dim(p_in_all)[1]-num_te))+1,ncol=runs*floor(K))
PRE_ALL_N <- matrix(nrow=(drug_num*(drug_num-1)/2-(dim(p_in_all)[1]-num_te))+1,ncol=runs*floor(K))
lamdaA <- 0.1
lamdaS <- 0.3
lamdaT <- 0.01
for (r in 1:runs) {
RDS_v <- RDS
tep_pos_set <- sample(dim(p_in_all)[1],dim(p_in_all)[1])
tep_pos_set_all <- rep(0,num_te)
for (i in 1:floor(K)) {
t_p <- 1
RDS_v <- RDS
for (j in ((i-1)*num_te+1):(i*num_te)) {
RDS_v[p_in_all[tep_pos_set[j],1],p_in_all[tep_pos_set[j],2]] <- 0
RDS_v[p_in_all[tep_pos_set[j],2],p_in_all[tep_pos_set[j],1]] <- 0
}
LDT <- Lapla_nor(SDT)
LDS <- Lapla_nor(RDS_v)
LDA <- Lapla_nor(SDA)
LDC <- Lapla_nor(SDC)
I <- diag(drug_num)
F <- (RDS_v)%*%solve(I+lamdaA*(I-LDA)+lamdaS*(I-LDS)+lamdaT*(I-LDT))
F <- (F+t(F))*0.5
data_ROC_n <- Get_Test_Score(F,RDS_v,RDS)
FTP <- Get_fpr_tpr_pre(data_ROC_n)
TPR_ALL_N[,((r-1)*floor(K)+i)] <- FTP$tpr_n
FPR_ALL_N[,((r-1)*floor(K)+i)] <- FTP$fpr_n
PRE_ALL_N[,((r-1)*floor(K)+i)] <- FTP$pre_n
}
}
tpr_p <- rowMeans(TPR_ALL_N)
fpr_p <- rowMeans(FPR_ALL_N)
pre_p <- rowMeans(PRE_ALL_N)
library("caTools")
AUC <- trapz(fpr_p,tpr_p)
|
d9d523cbf0e7858b274d979a438a0fd8234457b9
|
d1f231d793617a69ae96b1bc6942657fa3d5fca4
|
/cachematrix.R
|
21ea3cf5d1e333ddd43b6a6ce1bf2ac2d7d1a99c
|
[] |
no_license
|
tycai000/ProgrammingAssignment2
|
6263f694292a469b87393ebba247e7c4e4b7147a
|
b9056eee28fa22a3565d75581bdd14f5e068b305
|
refs/heads/master
| 2020-04-05T18:57:12.587988
| 2014-12-16T05:18:01
| 2014-12-16T05:18:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,196
|
r
|
cachematrix.R
|
#Two functions used to solve and cache matrices in order to speed up processing of matrix data
#Creates a special matrix which produces a list to set and get the matrix, set and get the solution of the matrix
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) { #sets the matrix
x <<- y
s <<- NULL
}
get <- function() x #returns the value of original matrix
setinverse <- function(solve) s <<- solve # function used by cacheSolve in first instance to solve the original matrix
getinverse <- function() s #function used by cacheSolve in order to retrieve a cached matrix solution
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#Input is a matrix created by makeCacheMatrix, output is its solution
cacheSolve <- function(x, ...) {
s <- x$getinverse() #if matrix solution has been previous solved and cached, then use getinverse() function to retrieve result
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get() #if matrix solution has not been solved previously, then solve matrix,cache matrix solution, and then print
s <- solve(data, ...)
x$setinverse(s)
s
}
|
6fd13f7a16946609ce0c7ab28226e1d1f7d43193
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/oppr/examples/add_rsymphony_solver.Rd.R
|
2cc651ebee4847b30b450205fccbafb4ae26eba9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
add_rsymphony_solver.Rd.R
|
library(oppr)
### Name: add_rsymphony_solver
### Title: Add a SYMPHONY solver with 'Rsymphony'
### Aliases: add_rsymphony_solver
### ** Examples
## No test:
# load data
data(sim_projects, sim_features, sim_actions)
# build problem with Rsymphony solver
p <- problem(sim_projects, sim_actions, sim_features,
"name", "success", "name", "cost", "name") %>%
add_max_richness_objective(budget = 200) %>%
add_binary_decisions() %>%
add_rsymphony_solver()
# print problem
print(p)
# solve problem
s <- solve(p)
# print solution
print(s)
# plot solution
plot(p, s)
## End(No test)
|
874f38510e29548eb04e62b0c58c29d033cc4372
|
b502e9912eb38e05124717dcde6781e142f62d59
|
/pkg/man/lmSelect.Rd
|
325316420e287c3f30ea9a092b2f2d517663eaaa
|
[] |
no_license
|
Anhmike/lmSubsets.R
|
d019d8b19355a257b4e6c25c9e1b73beeb0a3ce8
|
55787ffaaa66f1c76d97c688923b65501c7b71f7
|
refs/heads/master
| 2022-11-10T10:26:37.936155
| 2020-06-26T14:55:57
| 2020-06-26T14:55:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,269
|
rd
|
lmSelect.Rd
|
\name{lmSelect}
\alias{lmSelect}
\alias{lmSelect.default}
\alias{lmSelect.matrix}
\alias{lmSelect.lmSubsets}
\alias{lmSelect_fit}
\alias{lmSubsets_select}
\alias{print.lmSelect}
\alias{plot.lmSelect}
\alias{summary.lmSelect}
\alias{print.summary.lmSelect}
\title{Best-Subset Regression}
\description{Best-subset regression for ordinary linear models.}
\usage{
lmSelect(formula, \ldots)
\method{lmSelect}{default}(formula, data, subset, weights, na.action, model = TRUE,
x = FALSE, y = FALSE, contrasts = NULL, offset, \ldots)
\method{lmSelect}{matrix}(formula, y, intercept = TRUE, \ldots)
\method{lmSelect}{lmSubsets}(formula, penalty = "BIC", \ldots)
lmSelect_fit(x, y, weights = NULL, offset = NULL, include = NULL,
exclude = NULL, penalty = "BIC", tolerance = 0,
nbest = 1, \ldots, pradius = NULL)
}
\arguments{
\item{formula, data, subset, weights, na.action, model, x, y, contrasts,
offset}{Standard formula interface.}
\item{intercept}{Include intercept.}
\item{include, exclude}{Force regressors in or out.}
\item{penalty}{Penalty per parameter.}
\item{tolerance}{Approximation tolerance.}
\item{nbest}{Number of best subsets.}
\item{\dots}{Forwarded to \code{lmSelect_fit}.}
\item{pradius}{Preordering radius.}
}
\details{
The \code{lmSelect} generic provides a convenient interface for best
variable-subset selection in linear regression: The \code{nbest} best
-- according to an information criterion of the AIC family -- subset
models are returned.
The information criterion is specified with the \code{penalty}
parameter. Accepted values are \code{"AIC"}, \code{"BIC"}, or a
\code{numeric} value representing the penalty per model parameter (see
\code{\link[stats]{AIC}}).
A custom selection criterion may be specified by passing an R function
as the \code{penalty} argument. The expected signature is
\code{function(size, rss)}, where \code{size} is the number of
predictors (including intercept, if any), and \code{rss} the residual
sum of squares. The function must be non-decreasing in both
parameters.
A low-level matrix interface is provided by \code{lmSelect_fit}.
See \code{\link{lmSubsets}} for further information.
}
\value{
An object of class \code{"lmSelect"}, i.e., a list with the following
components:
\item{nobs, nvar}{Number of observations, of variables.}
\item{intercept}{\code{TRUE} if model has intercept term;
\code{FALSE} otherwise.}
\item{include, exclude}{Included, excluded variables.}
\item{size}{Subset sizes.}
\item{tolerance}{Approximation tolerance.}
\item{nbest}{Number of best subsets.}
\item{submodel}{Submodel information.}
\item{subset}{Selected variables.}
Further components include \code{call}, \code{na.action},
\code{weights}, \code{offset}, \code{contrasts}, \code{xlevels},
\code{terms}, \code{mf}, \code{x}, and \code{y}. See
\code{\link[stats]{lm}} for more information.
}
\references{
Hofmann M, Gatu C, Kontoghiorghes EJ, Colubi A, Zeileis A (2020).
lmSubsets: Exact Variable-Subset Selection in Linear Regression for
R. \emph{Journal of Statistical Software}. \bold{93}, 1--21.
doi:10.18637/jss.v093.i03.
}
\seealso{\code{\link{lmSubsets}}, \code{\link{summary}},
\link{methods}.}
\examples{
## load data (with logs for relative potentials)
data("AirPollution", package = "lmSubsets")
###################
## basic usage ##
###################
## fit 20 best subsets (BIC)
lm_best <- lmSelect(mortality ~ ., data = AirPollution, nbest = 20)
lm_best
## equivalent to:
\dontrun{
lm_all <- lmSubsets(mortality ~ ., data = AirPollution, nbest = 20)
lm_best <- lmSelect(lm_all)
}
## summary statistics
summary(lm_best)
## visualize
plot(lm_best)
########################
## custom criterion ##
########################
## the same as above, but with a custom criterion:
M <- nrow(AirPollution)
ll <- function (rss) {
-M/2 * (log(2 * pi) - log(M) + log(rss) + 1)
}
aic <- function (size, rss, k = 2) {
-2 * ll(rss) + k * (size + 1)
}
bic <- function (size, rss) {
aic(size, rss, k = log(M))
}
lm_cust <- lmSelect(mortality ~ ., data = AirPollution,
penalty = bic, nbest = 20)
lm_cust
}
\keyword{regression}
|
7ae16ff00a20683232c51296cb5c724280c637dd
|
a17c62648baee18fffec862e1d11ad5848936495
|
/sagebrush/3sage_functions.R
|
5309baf74fd1b82fbcc7ca284bf37d877580b480
|
[] |
no_license
|
DataFusion18/Tree_Distribution_Database
|
7598ef0624f805d26ab2e3471ab066ac266da115
|
58de8e8ee4d9084f7574378fa183aa385aa8fb32
|
refs/heads/master
| 2020-04-20T05:53:48.643436
| 2018-06-01T15:01:02
| 2018-06-01T15:01:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,275
|
r
|
3sage_functions.R
|
##3sage_functions.R
####################################################################################################
## pca_subset() chooses the variables through PCA; you define the number of variables
####################################################################################################
pca_subset <- function(cdata, firstvar='AHM', sname=SDMNAME, nvars, stddev=1){
##firstvar='AHM' for the first AdaptWest raster
##firstvar='bio_1' for the first WorldClim (Bioclim) raster
##firstvar='XXX' for the first OakCons raster
outname <- paste0(sname, '_RFfits_')
sink(file.path('SDM_output', paste0(outname, nm.extra, '.txt')), append=TRUE)
cat(as.character(Sys.time()), '\n', sname, '\n', model_algorithm, ' model\n'); sink()
# firstvar='bio_1'
require(randomForest)
spname = gsub('_', ' ', sname)
spname = gsub('var','var.',spname)
spname = gsub('subsp','subsp.',spname)
# spyes = cdata$SPCD
# cat ('Number of present observations: ',sum(spyes),'\n')
# fv = match('mat',colnames(cdata))
fv = match(firstvar, colnames(cdata))
cols=fv:ncol(cdata)
cdata <- cdata[complete.cases(cdata[,cols]),]
spyes <- cdata$sppres
prcdata=prcomp(cdata[,cols])$x[,1:2] # first 2 pcs, considering first 19 cols, all observations.
rownames(prcdata)=rownames(cdata)
sdin = apply(cdata[spyes==1, cols], 2, sd)
env = apply(cdata[spyes==1, cols], 2, range)
nStd = find('nStandardDeviations')[1]
if (is.na(nStd)) nStd = 'no'
# cat ('nStd=', nStd,'\n'); flush.console()
sink(file.path('SDM_output', paste0(outname, nm.extra, '.txt')), append=TRUE)
cat ('nStd=', nStd,'\n'); sink(); flush.console()
if (nStd == '.GlobalEnv') if (!is.null(nStandardDeviations)) sdin=sdin*nStandardDeviations
env[1,]=env[1,]-sdin*stddev
env[2,]=env[2,]+sdin*stddev
tags = matrix(data=FALSE,nrow=nrow(cdata),ncol=length(cols))
colnames(tags)=colnames(cdata)[cols]
rownames(tags)=rownames(cdata)
for (var in colnames(tags)) {tags[, var] = cdata[,var] >= env[1,var] & cdata[,var] <= env[2,var]}
envBoth=apply(tags,1,all) ##for within both PC1 and PC2
envNotPresent = rownames(cdata)[envBoth & spyes==0]
InEnvSPNO = length(envNotPresent)
Nyes = sum(spyes)
# Nyes = nrow(cdata)
totsamp = 2*2.5*Nyes
size = totsamp*.4
prcdata=prcdata[!envBoth,]
div=ceiling(nrow(prcdata)/10)
PC1 = factor(floor(rank(prcdata[,1])/(div+.001)))
names(PC1)=NULL
ns=length(levels(PC1))
sz1=floor((totsamp*.2*.5)/ns) # 20% for this method, half on first pc
PC2 = factor(floor(rank(prcdata[,2])/(div+.001)))
names(PC2)=NULL
ns=length(levels(PC2))
sz2=floor((totsamp*.2*.5)/ns) # 20% for this method, half on second pc
nForests=round(InEnvSPNO/(2*Nyes))
if (nForests > 30) nForests=30
if (nForests < 10) nForests=10
if (Nyes > 5000) nForests = 5
obsSamp = vector('list', nForests)
names(obsSamp)=paste('S', 1:nForests, sep='')
ysp <- obsSamp
forests <- obsSamp
repl.occs <- obsSamp
addNO = find('addNOObs')[1]
if (is.na(addNO)) addNO = 'no'
# cat ('addNO=', addNO,'\n'); flush.console()
sink(file.path('SDM_output', paste0(outname, nm.extra, '.txt')), append=TRUE)
cat ('addNO=', addNO,'\n'); sink(); flush.console()
if (addNO != 'no')
{
norows=(nrow(cdata)+1):(nrow(cdata)+nrow(addNOObs))
if (Nyes < nrow(addNOObs)) norows=sample(norows, Nyes)
keepcols=intersect(colnames(cdata),colnames(addNOObs))
cdata=rbind(cdata[,keepcols], addNOObs[,keepcols])
# fv = match('mat',colnames(cdata))
fv = match(firstvar, colnames(cdata))
cols=fv:ncol(cdata)
}
sink(file.path('SDM_output', paste0(outname, nm.extra, '.txt')), append=TRUE)
cat ('presence pts: ', Nyes,'\n', 'absence pts: ', nrow(bgdata), '\n\nOut-of-bag error:\n'); sink(); flush.console()
yesrows = (1:nrow(cdata))[spyes==1]
# n <- names(obsSamp)[1]
for (n in names(obsSamp))
{
cat ('n=',n,'\n'); flush.console()
sampPC1 = tapply(rownames(prcdata), PC1, sample, size=sz1)
sampPC2 = tapply(rownames(prcdata), PC2, sample, size=sz2)
allSampOut=unique(c(unlist(sampPC1), unlist(sampPC2)))
sizNot = totsamp*.4
sampNot = if (length(envNotPresent) > sizNot) sample(envNotPresent, sizNot) else envNotPresent
samp = c(allSampOut, sampNot)
isamp = match(samp, rownames(cdata))
repl.occ <- occ[rownames(occ@data) %in% c(yesrows, samp),]
rows=c(yesrows, isamp, yesrows) # yes rows are in twice
if (addNO != 'no') rows=c(rows, if (Nyes < nrow(addNOObs)) sample(norows, Nyes) else norows)
obsSamp[[n]] = cdata[rows, cols]
ysp[[n]] = as.factor(cdata$sppres[rows])
repl.occs[[n]] <- repl.occ
}
vars = names(cdata)[cols]
##todel is the number of predictor variables to remove each time; Crookston chose 4 to cut 35 variables down to 18 or 8
# todel=2
todel=3
# todel=4
# require(multicore)
require(parallel)
while (length(vars) > 1)
{
for (n in names(obsSamp))
{
mcparallel(randomForest(y=ysp[[n]], x=obsSamp[[n]][,vars], ntree=100, importance=TRUE,
proximity=FALSE, norm.votes=FALSE))
}
forests = mccollect()
if (length(vars) == nvars) {
names(forests) <- names(obsSamp)
save(forests, vars, cdata, ysp, obsSamp, repl.occs, sname, file=file.path(model.outdir, sname, paste0(sname, '_forests', nvars, '.', nm.extra,'.RData')))
# vars.layers <- paste( which(names(cdata[,cols]) %in% vars), collapse=',' )
# loadList <- list(paste( which(names(cdata[,cols]) %in% vars), collapse=',' ), nForests)
# names(loadList) <- c('vars.layers', 'nReps')
return(list(var.layers=paste(which(names(cdata[,cols]) %in% vars), collapse=','), nReps=nForests))
# return(loadList)
}
gi = unlist(lapply(forests, function (x) { cbind(importance(x)[,'MeanDecreaseAccuracy'])} ))
dim(gi)=c(length(vars), length(obsSamp))
colnames(gi)=names(obsSamp)
rownames(gi)=vars
top = sort(apply(gi,1,mean), decreasing=TRUE, index.return=TRUE)$ix
meanOOB = mean(unlist(lapply(forests, function (x) tail(x$err.rate[,'OOB'],1))))
sink(file.path('SDM_output', paste0(outname, nm.extra, '.txt')), append=TRUE)
cat ('meanOOB=', meanOOB, ' delete=', vars[top[(length(vars)-todel+1):length(vars)]], '\n'); sink(); flush.console()
vars = vars[top[1:(length(vars)-todel)]]
if (length(vars) > 17+todel){todel <- todel} else todel= max(1, todel-1)
}
names(forests)=names(obsSamp)
# return(paste( which(names(cdata[,cols]) %in% vars), collapse=',' ))
}
####################################################################################################
## other functions
####################################################################################################
## mean.list : calculate the mean of a list of matrices
mean.list <- function (x, ...)
{
if (!all(sapply(x, is.matrix)))
stop(''x' must be a list containing matrices')
dims <- sapply(x, dim)
n <- dims[1, 1]
p <- dims[2, 1]
if (!all(n == dims[1, ]) || !all(p == dims[2, ]))
stop('the matrices must have the same dimensions')
mat <- matrix(unlist(x), n * p, length(x))
mm <- matrix(rowMeans(mat, ...), n, p)
dimnames(mm) <- dimnames(x[[1]])
mm
}
####################################################################################################
# x <- file.path(r_locc, paste0(p.m.cur.reps.filename, '.t', th.v*100, '.tif'))
# pypath='/Library/Frameworks/Python.framework/Versions/2.7/Programs/gdal_polygonize.py'
#
# gdal_polygonizeR <- function(x, outshape=NULL, gdalformat = 'ESRI Shapefile',
# pypath='/Library/Frameworks/GDAL.framework/Versions/1.9/Programs/gdal_polygonize.py', readpoly=TRUE, quiet=TRUE) { #, overshp=TRUE
# if (is.null(pypath)) {
# pypath <- Sys.which('gdal_polygonize.py')
# }
# if (!file.exists(pypath)) stop('Can't find gdal_polygonize.py on your system.')
# owd <- getwd()
# on.exit(setwd(owd))
# # setwd(dirname(pypath))
# if (!is.null(outshape)) {
# outshape <- sub('\\.shp$', '', outshape)
# f.exists <- file.exists(paste(outshape, c('shp', 'shx', 'dbf'), sep='.'))
# if (any(f.exists))
# stop(sprintf('File already exists: %s',
# toString(paste(outshape, c('shp', 'shx', 'dbf'),
# sep='.')[f.exists])), call.=FALSE)
# } else outshape <- tempfile()
# if (is(x, 'Raster')) {
# require(raster)
# writeRaster(r, {f <- tempfile(fileext='.tif')})
# rastpath <- normalizePath(f)
# } else if (is.character(x)) {
# rastpath <- normalizePath(x)
# } else stop('x must be a file path (character string), or a Raster object.')
# system2('python', args=(sprintf(''%1$s' '%2$s' -f '%3$s' '%4$s.shp'',
# pypath, rastpath, gdalformat, outshape)))
# # system2('python', args=(sprintf(''%1$s' '%2$s' -f '%3$s' '%4$s.shp'',
# # pypath, rastpath, gdalformat, outshape)))
# if (readpoly) {
# shp <- readOGR(dirname(outshape), layer = basename(outshape), verbose=!quiet)
# return(shp)
# }
# return(NULL)
# }
# # x <- in.raster
# # r <- in.raster
# gdal_polygonizeR <- function(x=in.raster, outshape=NULL, gdalformat = 'ESRI Shapefile',
# pypath='/Library/Frameworks/GDAL.framework/Versions/1.9/Programs/gdal_polygonize.py', readpoly=TRUE, quiet=TRUE) { #, overshp=TRUE
# if (is.null(pypath)) {
# pypath <- Sys.which('gdal_polygonize.py')
# }
# if (!file.exists(pypath)) stop('Can't find gdal_polygonize.py on your system.')
# owd <- getwd()
# on.exit(setwd(owd))
# # setwd(dirname(pypath))
# if (!is.null(outshape)) {
# outshape <- sub('\\.shp$', '', outshape)
# f.exists <- file.exists(paste(outshape, c('shp', 'shx', 'dbf'), sep='.'))
# if (any(f.exists))
# stop(sprintf('File already exists: %s',
# toString(paste(outshape, c('shp', 'shx', 'dbf'),
# sep='.')[f.exists])), call.=FALSE)
# } else outshape <- tempfile()
# # if (is(x, 'Raster')) {
# # require(raster)
# # writeRaster(r, {f <- tempfile(fileext='.tif')})
# # # writeRaster(x, {f <- tempfile(fileext='.asc')})
# # rastpath <- normalizePath(f)
# # } else if (is.character(x)) {
# # rastpath <- normalizePath(x)
# # } else stop('x must be a file path (character string), or a Raster object.')
# system2('python', args=(sprintf("'%1$s' '%2$s' -f '%3$s' '%4$s.shp'",
# pypath, rastpath, gdalformat, outshape)))
# # system2('python', args=(sprintf("'%1$s' '%2$s' -f '%3$s' '%4$s.shp'",
# # pypath, rastpath, gdalformat, outshape)))
# if (readpoly) {
# shp <- readOGR(dirname(outshape), layer = basename(outshape), verbose=!quiet)
# return(shp)
# }
# return(NULL)
# }
# ##the fxn to call
# thresh_raster2poly <- function(in.raster, out.dir=s_loc, outshp){
# # out.dir <- file.path(f_out, 'poly_models_thresh', dir.nm)
# if (!file.exists(out.dir)){dir.create(out.dir, recursive=TRUE)}
# # in.raster <- file.path(f_out, 'raster_models', eachPath)
# # out.nm <- gsub(pattern='.tif', replacement='', base.nm)
# # outshp <- file.path(f_out, 'poly_models_thresh', dir.nm, out.nm)
#
# gdal_polygonizeR(in.raster, outshape=outshp, readpoly=FALSE)
#
# }
####################################################################################################
# ##kfold the presence and absence occurrences
# kfold_set <- function(pocc, nocc, k.folds=5, proj2use='+proj=longlat +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +no_defs'){
# test_per <- (1/k.folds)*100
# nr <- nrow(pocc) #; nr
# s <- sample(nr, 1/k.folds * nr)
# ptrain <- pocc[-s, ]
# ptest <- pocc[s, ]
#
# nr <- nrow(nocc) #; nr
# s <- sample(nr, 1/k.folds * nr)
# atrain <- nocc[-s, ]
# atest <- nocc[s, ]
#
# train <- rbind(ptrain, atrain)
# test <- rbind(ptest, atest)
# # names(occ2)
# occ.train <- SpatialPointsDataFrame(cbind(train1$x_coord, train1$y_coord), train)
# occ.test <- SpatialPointsDataFrame(cbind(test1$x_coord, test1$y_coord), test)
# # occ <- SpatialPointsDataFrame(cbind(occ.c2$x_coord, occ.c2$y_coord), occ.c2)
# proj4string(occ.train) <- proj2use
# proj4string(occ.test) <- proj2use
#
# return(list(occ.train, occ.test))
# }
|
6192f242c5966357c160a7f7d5ae0fb451340e42
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bamlss/examples/predict.bamlss.Rd.R
|
5ae98f0e73aedd49aae859d019ad511498d48f10
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,802
|
r
|
predict.bamlss.Rd.R
|
library(bamlss)
### Name: predict.bamlss
### Title: BAMLSS Prediction
### Aliases: predict.bamlss
### Keywords: regression models
### ** Examples
## Not run:
##D ## Generate some data.
##D d <- GAMart()
##D
##D ## Model formula.
##D f <- list(
##D num ~ s(x1) + s(x2) + s(x3) + te(lon,lat),
##D sigma ~ s(x1) + s(x2) + s(x3) + te(lon,lat)
##D )
##D
##D ## Estimate model.
##D b <- bamlss(f, data = d)
##D
##D ## Predictions.
##D p <- predict(b)
##D str(b)
##D
##D ## Prediction for "mu" model and term "s(x2)".
##D p <- predict(b, model = "mu", term = "s(x2)")
##D
##D ## Plot effect
##D plot2d(p ~ x2, data = d)
##D
##D ## Same for "sigma" model.
##D p <- predict(b, model = "sigma", term = "s(x2)")
##D plot2d(p ~ x2, data = d)
##D
##D ## Prediction for "mu" model and term "s(x1)" + "s(x2)"
##D ## without intercept.
##D p <- predict(b, model = "mu", term = c("s(x1)", "s(x2)"),
##D intercept = FALSE)
##D
##D ## Prediction based on quantiles.
##D p <- predict(b, model = "mu", term = "s(x2)", FUN = c95)
##D plot2d(p ~ x2, data = d)
##D
##D ## Extract samples of predictor for "s(x2)".
##D p <- predict(b, model = "mu", term = "s(x2)",
##D intercept = FALSE, FUN = function(x) { x })
##D print(dim(p))
##D plot2d(p ~ x2, data = d, col.lines = rgb(0.1, 0.1, 0.1, alpha = 0.1))
##D
##D ## Or using specific combinations of terms.
##D p <- predict(b, model = "mu", term = c("s(x2)", "te(lon,lat)"),
##D intercept = FALSE, FUN = function(x) { x })
##D head(p)
##D
##D ## Prediction using new data.
##D ## Only need x3 data when predicting
##D ## for s(x3).
##D nd <- data.frame("x3" = seq(0, 1, length = 100))
##D nd <- cbind(nd, predict(b, newdata = nd, term = "s(x3)"))
##D print(head(nd))
##D plot2d(mu ~ x3, data = nd)
##D plot2d(sigma ~ x3, data = nd)
## End(Not run)
|
6aaa505d094eac570efc2b093062eeee765916b0
|
78d4e36526c16fdb5940c2e9de2e6c60dadfcc70
|
/man/lm.loss_x_y.Rd
|
01dce54334753fb47bfe57e734995f204b6aa3eb
|
[] |
no_license
|
markushuff/PsychHelperFunctions
|
b7d5e39ea8d77ff5522e155c0e01d816dcdcf7ab
|
00373633ff40eaf102ca580f0232bbccd9d0f940
|
refs/heads/master
| 2022-09-21T20:36:48.629152
| 2022-09-08T08:54:01
| 2022-09-08T08:54:01
| 47,286,370
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 536
|
rd
|
lm.loss_x_y.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lm.loss_x_y.R
\name{lm.loss_x_y}
\alias{lm.loss_x_y}
\title{Estimate the parameters of a linear regression (intercept, slope, error standard deviation)
with maximum likelihood estimations}
\usage{
lm.loss_x_y(par)
}
\arguments{
\item{par}{parameter triple (slope, intercept, error standard deviation)}
}
\value{
deviance
}
\description{
Maximum likelihood estimation: Linear regression with one predictor
}
\references{
https://rpubs.com/YaRrr/MLTutorial
}
|
4d6ff7d1484970b9257f9d51958136ee97a7cf0d
|
80ba5c4ec283b5628c95510a344822039bf48712
|
/Machine Learning A-Z New/Part 3 - Classification/Section 15 - K-Nearest Neighbors (K-NN)/knn_MSL.R
|
81414a85af5e96ed31ade0e3d0bd6a80852abfd4
|
[] |
no_license
|
remsanjiv/Machine_Learning
|
bfea8581038364b44a26793f9a75da7b35d4a67e
|
8ee8b4188ac1adfe694b79b061a3971aae1f8e5d
|
refs/heads/master
| 2022-01-29T10:38:34.375245
| 2019-06-23T00:15:46
| 2019-06-23T00:15:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,327
|
r
|
knn_MSL.R
|
# Logistic Regression Intuition
# Lecture 85 https://www.udemy.com/machinelearning/learn/lecture/6270024
# K-Nearest Neighbors Intuition - sort of a clustering idea where we group to a datapoints neighbors
# Lecture 99 https://www.udemy.com/machinelearning/learn/lecture/5714404
getwd() # check Working directory
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
# lets look
dataset
# User.ID Gender Age EstimatedSalary Purchased
# 1 15624510 Male 19 19000 0
# 2 15810944 Male 35 20000 0
# we are after the age and salary and the y/n purchased
# so in R that's columns 3-5
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling - for classification it's better to do feature scalling
# additionally we have variables where the units are not the same
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# ============================================================================================
# Fitting classifier to the Training set
# ============================================================================================
# lecture 94 https://www.udemy.com/machinelearning/learn/lecture/5684978
# we'll use the glm function; glm is used to fit generalized linear models,
# specified by giving a symbolic description of the linear predictor and a
# description of the error distribution.
# formula is Dependent variable and ~ then Independent variable, in this case all so '.'
# classifier = # here we'll apply a dif modeling tool depending on what we need
# Step 1 import necessary model and/or functions
# Step 2 create our object
# Step 3 fit object to our data
# Step 4 predicting ALL IN ONE!!
library(class)
?knn
# note we are removing the known dependent variable in train and test
# in this example we are training on the training set
y_pred = knn(train = training_set[, -3], # training set without the Dependent Variable
## -3 means remove the 3rd column
test = test_set[, -3],
cl = training_set[, 3], # here we are providing the Truth of the
## training set which is where the model will learn
k = 5,
prob = TRUE)
# play with column stuff
train12 = training_set[,1:2] # provides same as [, -3]
# the [,] box is basically [rows,columns]
# from HELP - KNN
# k-nearest neighbour classification for test set from training set.
# For each row of the test set, the k nearest (in Euclidean distance) training
# set vectors are found, and the classification is decided by majority vote,
# with ties broken at random. If there are ties for the kth nearest vector, all
# candidates are included in the vote.
# Usage knn(train, test, cl, k = 1, l = 0, prob = FALSE, use.all = TRUE)
# Arguments
# train - matrix or data frame of training set cases.
# test - matrix or data frame of test set cases. A vector will be interpreted
## as a row vector for a single case.
# cl - factor of true classifications of training set
# k - number of neighbours considered.
# prob - If this is true, the proportion of the votes for the winning class
## are returned as attribute prob.
# ==========================================================================================
# create classifier above
# ============================================================================================
# Predicting the Test set results WRAPPED INTO ABOVE
# K-NN R Lecture 102 https://www.udemy.com/machinelearning/learn/lecture/5736648
# Making the Confusion Matrix
# Lecture 95 https://www.udemy.com/machinelearning/learn/lecture/5685396
# this is a comparison of real data test_set col 3 and the predictions y_pred
cm = table(test_set[, 3], y_pred)
# > cm = table(test_set[, 3], y_pred)
# > cm
# y_pred
# 0 1
# 0 59 5
# 1 6 30
cm = table(test_set[, 3], y_pred > 0.5)
cm # not bad 83 correct predictions and 17 incorrect predictions
# FALSE TRUE
# 0 57 7
# 1 10 26
# FOR RMD FILE TO INCLUDE CM EXPLANATION
# ```{r pressure, echo=FALSE, fig.cap="A caption", out.width = '100%'}
# knitr::include_graphics("/Users/markloessi/Machine_Learning/Confusion_Matrix_Explained.png")
#```
# Visualising the Training set results
#
# for K-NN we will need to change some things
#
# install.packages('ElemStatLearn')
library(ElemStatLearn)
# think of this bit as a declaration
set = training_set
# this section creates the background region red/green. It does that by the 'by' which you
# can think of as the steps in python, so each 0.01 is interpreted as 0 or 1 and is either
# green or red. The -1 and +1 give us the space around the edges so the dots are not jammed
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
# just giving a name
colnames(grid_set) = c('Age', 'EstimatedSalary')
# this is the MAGIC
# here we use the classifier to predict the result of each of each of the pixel bits noted above
#
# this piece here gets changed
#
# take out => prob_set = predict(classifier, type = 'response', newdata = grid_set)
# change => y_grid = ifelse(prob_set > 0.5, 1, 0) to use the mess from above ;)
y_grid = knn(train = training_set[, -3],
test = grid_set, # and we want to use the grid here
cl = training_set[, 3],
k = 5,
prob = TRUE)
# that's the end of the background
# now we plat the actual data
plot(set[, -3],
main = 'K-NN (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2)) # this bit creates the limits to the values plotted
# this is also a part of the MAGIC as it creates the line between green and red
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
# here we run through all the y_pred data and use ifelse to color the dots
# note the dots are the real data, the background is the pixel by pixel determination of y/n
# graph the dots on top of the background give you the image
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = knn(train = training_set[, -3],
test = grid_set,
cl = training_set[, 3],
k = 5,
prob = TRUE)
plot(set[, -3],
main = 'K-NN (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
6bfbea84eea50ee05b5d3d2c1fb6653dba4e3e66
|
3732780f0c98f8ff8b13805355c17fcfe3d25170
|
/man/g2plot.Rd
|
3591116722f7c4224a7d061e527dc58042658c6b
|
[] |
no_license
|
GOUYONGCHAO/g2r
|
6a616bc265c5f64c422af6db996528cb3a1f6657
|
4427d2db4f2c8c83867b233fe89ca95fd884b32b
|
refs/heads/master
| 2021-07-19T00:02:16.020711
| 2021-03-16T14:27:35
| 2021-03-16T14:27:35
| 243,919,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 303
|
rd
|
g2plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/g2plot.R
\name{g2plot}
\alias{g2plot}
\title{g2plot main function}
\usage{
g2plot(data, width = NULL, height = NULL, elementId = NULL, ...)
}
\arguments{
\item{data}{data you will be plot}
}
\description{
<Add Description>
}
|
cab2f87fbe9689f8b5ed0b30cdc6ef8830cca4de
|
feabcc19c0457cdd946433dd0869d0b4b9885384
|
/man/multinomial.Rd
|
3df7be1f417e5077cb3e3d85512fa3030f229c13
|
[] |
no_license
|
cran/lava
|
e9dd8f8dcdceb987b8a27e62a2b1663b3b060891
|
b731197dbd9edb76987ccacf94dd95c6a54e4504
|
refs/heads/master
| 2023-03-05T00:01:23.232939
| 2023-02-27T07:12:30
| 2023-02-27T07:12:30
| 17,697,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,999
|
rd
|
multinomial.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multinomial.R
\name{multinomial}
\alias{multinomial}
\alias{kappa.multinomial}
\alias{kappa.table}
\alias{gkgamma}
\title{Estimate probabilities in contingency table}
\usage{
multinomial(
x,
data = parent.frame(),
marginal = FALSE,
transform,
vcov = TRUE,
IC = TRUE,
...
)
}
\arguments{
\item{x}{Formula (or matrix or data.frame with observations, 1 or 2 columns)}
\item{data}{Optional data.frame}
\item{marginal}{If TRUE the marginals are estimated}
\item{transform}{Optional transformation of parameters (e.g., logit)}
\item{vcov}{Calculate asymptotic variance (default TRUE)}
\item{IC}{Return ic decomposition (default TRUE)}
\item{...}{Additional arguments to lower-level functions}
}
\description{
Estimate probabilities in contingency table
}
\examples{
set.seed(1)
breaks <- c(-Inf,-1,0,Inf)
m <- lvm(); covariance(m,pairwise=TRUE) <- ~y1+y2+y3+y4
d <- transform(sim(m,5e2),
z1=cut(y1,breaks=breaks),
z2=cut(y2,breaks=breaks),
z3=cut(y3,breaks=breaks),
z4=cut(y4,breaks=breaks))
multinomial(d[,5])
(a1 <- multinomial(d[,5:6]))
(K1 <- kappa(a1)) ## Cohen's kappa
K2 <- kappa(d[,7:8])
## Testing difference K1-K2:
estimate(merge(K1,K2,id=TRUE),diff)
estimate(merge(K1,K2,id=FALSE),diff) ## Wrong std.err ignoring dependence
sqrt(vcov(K1)+vcov(K2))
## Average of the two kappas:
estimate(merge(K1,K2,id=TRUE),function(x) mean(x))
estimate(merge(K1,K2,id=FALSE),function(x) mean(x)) ## Independence
##'
## Goodman-Kruskal's gamma
m2 <- lvm(); covariance(m2) <- y1~y2
breaks1 <- c(-Inf,-1,0,Inf)
breaks2 <- c(-Inf,0,Inf)
d2 <- transform(sim(m2,5e2),
z1=cut(y1,breaks=breaks1),
z2=cut(y2,breaks=breaks2))
(g1 <- gkgamma(d2[,3:4]))
## same as
\dontrun{
gkgamma(table(d2[,3:4]))
gkgamma(multinomial(d2[,3:4]))
}
##partial gamma
d2$x <- rbinom(nrow(d2),2,0.5)
gkgamma(z1~z2|x,data=d2)
}
\author{
Klaus K. Holst
}
|
18af75afbbb2eeaef8c44f2ca15d8eedb703e042
|
ba9c2741339f66bfd24c6dda7cd40b30919a0984
|
/chapter_3/analysis_scripts/002_format_plink_egwas.R
|
4fe113440979c4c4e52d670bf3906f4856252073
|
[
"Unlicense"
] |
permissive
|
cjfiscus/2022_Fiscus_Dissertation
|
928b38d55135e125ace47088eddad4615613cc00
|
25110d7120d52d94c99616ebd4eed40da5aed3bf
|
refs/heads/main
| 2023-04-17T19:35:12.737005
| 2022-09-05T20:26:09
| 2022-09-05T20:26:09
| 532,716,601
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,987
|
r
|
002_format_plink_egwas.R
|
#!/usr/bin/env Rscript
# prep eGWAS
# cjfiscus
# 2021-12-07
# edited 2022-07-09
library(pacman)
p_load(readxl, raster, rgdal)
# import data and slice out relevant info
df<-read_excel("../data/IITA-Cowpea collection.xls", sheet="Accession passport data")
df<-as.data.frame(df)
df<-df[,c("Accession name", "Latitude", "Longitude", "Collecting/acquisition source", "Biological status of accession")]
## subset to accessions we have
acc<-read.table("../data/samples.txt")
df<-df[df$`Accession name` %in% acc$V1,]
## remove market collected
rm<-"Market or shop"
df1<-df[!df$`Collecting/acquisition source` %in% rm,]
rm<-"Breeding / Research material"
df1<-df1[!df1$`Biological status of accession` %in% rm,]
df1<-df1[,1:3]
df1<-na.omit(df1)
df1<-df1[,c("Accession name", "Longitude", "Latitude")]
# compile wc data
files<-list.files(path="../data", pattern=".tif", full.names=T)
rasterStack<-stack(files)
row.names(df1)<-df1$`Accession name`
df1$`Accession name`<-NULL
worldclimData<-as.data.frame(cbind(row.names(df1), extract(rasterStack, df1)))
names(worldclimData)<-c("ID", "BIO1", "BIO10", "BIO11", "BIO12", "BIO13", "BIO14",
"BIO15", "BIO16", "BIO17", "BIO18", "BIO19", "BIO2",
"BIO3", "BIO4", "BIO5", "BIO6", "BIO7", "BIO8", "BIO9")
worldclimData<-worldclimData[,c("ID", paste0("BIO", seq(1,19)))]
## merge with coordinates
names(df)[1]<-"ID"
m<-merge(df, worldclimData, by="ID")
write.table(worldclimData, "../data/cowpea_worldclim2_1.txt", sep="\t", quote=F, row.names=F)
#####
# write out data as tfam
names(acc)<-"ID"
m<-merge(acc, worldclimData, by="ID", all.x=T)
m<-m[match(acc$ID, m$ID),]
info<-as.data.frame(cbind(m$ID, m$ID, 0, 0, 0))
m<-as.data.frame(cbind(info, m[,2:ncol(m)]))
write.table(m, "../data/cowpea_envgwas.fam", sep=" ", quote=F, row.names=F, col.names=F)
## export climate vars
vars<-names(m)[6:ncol(m)]
write.table(vars, "../data/worldclim_vars.txt", sep="\t", quote=F, row.names=F, col.names=F)
|
c388ea434f6f57a3559a48847a8e6693265ae473
|
1aeef86073188e1298feada1d91ace747438f04a
|
/man/PValue.Rd
|
f506f1f6845bddd7c2aa1ba2945617eebffecba3
|
[
"MIT"
] |
permissive
|
hollina/scul
|
c5ff1a79fec29073a7c56d94318fc65cd0931ac2
|
24c025ff4ead667c3d369739399307785ed019d5
|
refs/heads/master
| 2021-06-11T09:59:36.572123
| 2021-05-18T23:44:49
| 2021-05-18T23:44:49
| 171,319,964
| 21
| 6
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,330
|
rd
|
PValue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PValue.R
\name{PValue}
\alias{PValue}
\title{Calculate a simple bounded p-value for the treatment effect reporting the larger end of the bound}
\usage{
PValue(
x.PlaceboPool.full = SCUL.inference$y.placebo.StandardizedDifference.Full,
x.PlaceboPool.CohensD = SCUL.inference$y.placebo.CohensD,
TreatmentBeginsAt = SCUL.input$TreatmentBeginsAt,
OutputFilePath = SCUL.input$OutputFilePath,
CohensD = SCUL.input$CohensDThreshold,
y.actual = SCUL.output$y.actual,
y.scul = SCUL.output$y.scul,
StartTime = SCUL.input$TreatmentBeginsAt,
EndTime = nrow(SCUL.output$y.scul)
)
}
\arguments{
\item{x.PlaceboPool.full}{A (T by L), where L<=J) data frame containing all products that are included in the placebo distribution
Default is SCUL.inference$y.placebo.StandardizedDifference.Full}
\item{x.PlaceboPool.CohensD}{A (1 by L) data frame containing all pre-period Cohen's D fit statistic for each placebo unit.
Default is SCUL.inference$y.placebo.CohensD,}
\item{TreatmentBeginsAt}{An integer indicating which row begins treatment. Default is SCUL.output$TreatmentBeginsAt.}
\item{OutputFilePath}{Output file path. Default is SCUL.input$OutputFilePath.}
\item{CohensD}{A real number greater than 0, indicating the Cohen's D threshold at which
fit is determined to be "poor". The difference is in standard deviation units. Default is SCUL.input$CohensDThreshold.}
\item{y.actual}{The actual (target) data. Default is SCUL.output$y.actual.}
\item{y.scul}{Synthetic data created by SCUL procedure. Default is SCUL.output$y.scul.}
\item{StartTime}{The begining time period for which the average pseduo treatment effect is calculated. T}
\item{EndTime}{The end time period for which the average pseduo treatment effect is calculated. T}
}
\value{
list The bounds for the rank based p-value based upon rank of mean absolute value of post-treatment standardized effect against null distribution.
}
\description{
Determines the rank of the mean absolute value of the standardized treatment effect
of the target product relative to the placebo distribution. There are many alternative
ways to use the placebo distribution to calculate a mean value (e.g., mean p-value or another test statistic).
This is a simple example of one such way.
}
|
3c382b70ae753579cc477cee2116c0e8bee57fa1
|
44ee2f0921befbc6883ddb21250f9ceadb9366bc
|
/1. Import_Report_Data.R
|
df993b01e5ab7b50553def1832e422726127cb61
|
[] |
no_license
|
MarinoSanLorenzo/PricingGame
|
cae1c9f20a40e46cc50e98a3b161eec2910fabed
|
de3bfa0dd74bab49eb981e7eb3a9be1e4c7dbc5a
|
refs/heads/master
| 2020-05-23T19:19:48.923505
| 2019-05-15T22:16:25
| 2019-05-15T22:16:25
| 186,910,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 366
|
r
|
1. Import_Report_Data.R
|
##############################################
######### IMPORT DATA
###############################################
library(readxl)
library(DataExplorer)
training_set <- read.csv("C:/Machine Learning/Pricing Game/Pricing Game (UCL-ULB)/pg2019/training_set.csv")
create_report(training_set, y= "Nclaim")
create_report(training_set, y= "Cclaim")
|
6c37d5fb731fb2ee8d9b67e50f8dd78712ba020a
|
7247441629aaa7cf277cbe9e8896b42ebc1d921b
|
/4-data_import.R
|
0a91969de66b33eb893fd3529b4a25bcd3ec85bb
|
[] |
no_license
|
johnros/practicing_R
|
91333d781f33582c23c931b8012875711cc17bbc
|
0f69d2f659ae3747c81591c29221d898bc5400b7
|
refs/heads/master
| 2021-01-10T19:25:07.421332
| 2014-08-24T12:04:36
| 2014-08-24T12:04:36
| 21,349,335
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,528
|
r
|
4-data_import.R
|
# For a complete review see:
# http://cran.r-project.org/doc/manuals/R-data.html
# or
# "Import and Export Manual" in help.start()
# Whenever possible, try using .csv format!
# ------------- On-line files--------------#
web.data<- read.table('http://www-stat.stanford.edu/~tibs/ElemStatLearn/datasets/bone.data',
header=T)
# web.data<- read.table(file='webdata.txt')
web.data
head(web.data)
tail(web.data)
View(web.data)
fix(web.data)
?read.table # Get help on importing options
# Note: if the data has a header, use header=TRUE switch of read.table() and read.csv()
# --------------- Local .csv file------------#
# Note:
# R is portable over platforms but *path specification* differs.
# This means import functions are *platform depedant*.
# The directories in this will will need to be adapted to the local machine.
getwd()
setwd('~/Dropbox/Maccabi Workshop/')
women<- read.csv(file='what women want.csv')
# --------------Local txt file ------------#
# Same operation. Different syntax.
women<- read.table(file='C:\\Documents and Settings\\Jonathan\\My Documents\\R Workshop\\what women want.txt')
#-----------Writing Data------------------#
#What is the working directory?
getwd()
#Setting the working directory
setwd('/Documents and Settings/Jonathan/My Documents/Dropbox/Maccabi Workshop/')
write.csv(x=women,
file='write_women.csv',
append=F,
row.names=F
)
?write.table
#---------------.XLS files-------------------#
# Strongly recommended to convert to .csv
# If you still insist see:
# http://cran.r-project.org/doc/manuals/R-data.html#Reading-Excel-spreadsheets
#------------- SAS XPORT files--------------#
#See read.xport in package "foreign"
install.packages('foreign')
library(foreign)
?read.xport
#--------------SPSS .sav files-----------#
#See read.spss in package "foreign"
install.packages('foreign')
library(foreign)
?read.spss
#------------------- MASSIVE files-----------------#
# scan() is faster then read.table() but less convenient:
start<- proc.time() #Initializing stopper
A<- read.table('matrix.txt', header=F)
proc.time()-start #Stopping stopper
dim(A)
start<- proc.time()
A <- matrix(scan("matrix.txt", n = 20*2000), 20, 2000, byrow = TRUE)
proc.time()-start
# On Linux/Mac differences are less notable.
#----------------MySQL-----------------#
# MySQL is the best integrated relational database.
# This is done with package RMySQL
# Here is an example assuming you have a MySQL server setup with a database named "test".
install.packages('RMySQL')
library(RMySQL) # will load package DBI as well
# open a connection to a MySQL database names "test".
con <- dbConnect(dbDriver("MySQL"), dbname = "test")
## list the tables in the database
dbListTables(con)
## load a data frame named "USAarrests" into database "test", deleting any existing copy
data(USArrests)
dbWriteTable(con, "arrests", USArrests, overwrite = TRUE)
## get the whole table
dbReadTable(con, "arrests")
## Send SQL query to database
dbGetQuery(con,
paste("select row_names, Murder from arrests",
"where Rape > 30 order by Murder"))
dbRemoveTable(con, "arrests") #Removed the table "arrests" from database "test"
dbDisconnect(con) #Closes the connection to database "test"
# For more information see:
# http://cran.r-project.org/doc/manuals/R-data.html#Relational-databases
#-------------- HTML & XML Parsing -----------#
# Note: installing scapeR will require libxml.
install.packages('XML')
install.packages('scrapeR')
library(scrapeR)
[To Be Completed]
|
27547d981a012a4947e9862b48b82c65d29101ee
|
ef9d62a5f8a7cd39de7b92063bce2f476eb22acf
|
/threshold/natvnurt_stage_jpeg_thresh_Mar5.r
|
2b856d52fca91f6c60052eab47fe3f030536a7ca
|
[] |
no_license
|
dbo99/nurturenature
|
59a8d2df50aa4eac3d447657ca01fef0a3f50b44
|
a4de464bf2611aff0c960f012258e815822ff4d5
|
refs/heads/master
| 2020-06-02T16:42:33.669175
| 2019-08-21T04:54:07
| 2019-08-21T04:54:07
| 191,232,255
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,357
|
r
|
natvnurt_stage_jpeg_thresh_Mar5.r
|
###################### one off slides ##############################
#am_fcast <- ymd_hms(fcasts_firstam)
#fcasts_firstam_date <- date(fcasts_firstam)
## plot inputs
#am_fcast <- c("2016-01-13 07:37:00 PST") #fcasts[4]
#p1 <- ggplot(data = filter(dtrmnstc, fcast_i_pst == fcasts[5]), aes(x = fcast_t_pst, y = kcfs)) + geom_line()
#ggplotly(p1)
rm(list = ls())
{
setwd("~/R/proj/natvnat/threshold")
source("vanduzen_ahps_natvnatjan16_12.19.r")
#am_fcast <- fcasts[1]
#fcast_start_date <- mdy(c("01-15-2016"))
#fcast_end_date <- mdy(c("01-17-2016"))
#fcaststartdate <- fcast_start_date
#fcastenddate <- fcast_end_date
#q_type <- 6
#obs_start_date <- mdy(c("10-23-2014"))
}
plot_s_5daypeakstage_web<- function(df_hefs, dtrmnstc, am_fcast, q_type) {
dtrmnstc <- dtrmnstc %>% filter (fcast_i_pst == am_fcast)
firstdtrmnstc <- min(dtrmnstc$fcast_t_pst)
lastdtmrnstc <- max(dtrmnstc$fcast_t_pst)
df_hefs <- df_hefs %>% filter (fcast_i_pst == am_fcast)
df_hefs <- df_hefs %>% filter(fcast_t_pst >= firstdtrmnstc)
df_hefs <- df_hefs %>% filter(fcast_t_pst <= lastdtmrnstc)
midpoint <- as.POSIXct((as.numeric(firstdtrmnstc) +
as.numeric(lastdtmrnstc)) / 2, origin = '1970-01-01') %>% round_date("hours")
df_qntls_p <- create_permax_quants_hourly(df_hefs, firstdtrmnstc, lastdtmrnstc, "mefp_yr", "feet", q_type)
p1 <- ggplot() +
geom_line(data = df_hefs, aes(x = fcast_t_pst, y = feet, group = mefp_yr), size = 0.1, color = "gray50", show.legend = F) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `90%`, ymax = `max`,fill='<10%'), alpha = 0.3) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `75%`, ymax = `90%`,fill='10-25%'), alpha = 0.3) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `50%`, ymax = `75%`,fill='25-50%'), alpha = 0.3) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `25%`, ymax = `50%`,fill='50-75%'), alpha = 0.3) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `10%`, ymax = `25%`,fill='75-90%'), alpha = 0.3) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `min`, ymax = `10%`,fill='>=90%'), alpha = 0.3) +
geom_vline(xintercept = firstdtrmnstc, linetype = "dashed", color = "grey40", show.legend = F, size = 0.75) +
geom_hline(yintercept = monitorstage_feet, linetype = "dashed", color = "gray", show.legend = F, size = 1.5) + #deeppink
geom_hline(yintercept = floodstage_feet, linetype = "dashed", color = "gray", show.legend = F, size = 1.5) + #deeppink4
geom_line(data = dtrmnstc, aes( x = fcast_t_pst, y = feet,color = "Forecast stage\n(deterministic)" ), size = 1) +
geom_label(data = filter(notify_levs, unit == "feet"), aes(x = midpoint, y = value, label = paste0(type," (",value_t,")")), show.legend = F, alpha = 0.4) +
scale_x_datetime(expand = c(0.015, 0.015),
date_breaks = "1 day", date_minor_breaks = "12 hours", date_labels = "%b %d\n%H:%M pst") +
scale_fill_manual(name = "Exceedance Probability\n(ensemble)",
values = c(
'>=90%' = 'red',
'75-90%' = 'yellow',
'50-75%' = 'chartreuse4',
'25-50%' = 'dodgerblue',
'10-25%' = 'blue2',
'<10%' = 'purple'),
breaks = c(
'<10%' ,
'10-25%' ,
'25-50%' ,
'50-75%' ,
'75-90%' ,
'>=90%' )) +
scale_color_manual(name = NULL,
values= c(
"Forecast stage\n(deterministic)" = 'black'),
guide = guide_legend(override.aes = list(
linetype = c("solid"),
shape = c( NA)))) +
scale_y_continuous(name = "feet") +
#sec.axis = dup_axis(name = NULL)) +
labs( x = NULL) + ggtitle("5-Day Maximum Peak Stage Probabilities")
p1
}
{
pday5 <- plot_s_5daypeakstage_web(df_hefs, dtrmnstc, fcasts[1], 6) + ggtitle("5 Days Out, Morning Forecast\n5-Day Maximum Peak Stage Probabilities") +
scale_y_continuous(breaks = c(4,8,12,16))
pday5
ggsave("stage_thres_5.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
pday5 <- plot_s_5daypeakstage_web(df_hefs, dtrmnstc, fcasts[2], 6) + ggtitle("4 Days Out, Morning Forecast\n5-Day Maximum Peak Stage Probabilities")
pday5
ggsave("stage_thres_4.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
pday5 <- plot_s_5daypeakstage_web(df_hefs, dtrmnstc, fcasts[3], 6) + ggtitle("3 Days Out, Morning Forecast\n5-Day Maximum Peak Stage Probabilities")
pday5
ggsave("stage_thres_3.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
pday5 <- plot_s_5daypeakstage_web(df_hefs, dtrmnstc, fcasts[4], 6) + ggtitle("2 Days Out, Morning Forecast\n5-Day Maximum Peak Stage Probabilities")
pday5
ggsave("stage_thres_2.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
pday5 <- plot_s_5daypeakstage_web(df_hefs, dtrmnstc, fcasts[5], 6) + ggtitle("0-1 Days Out, Morning Forecast\n5-Day Maximum Peak Stage Probabilities")
pday5
ggsave("stage_thres_1.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
}
##################################################################################################
####################################### Flow #####################################################
##################################################################################################
plot_s_5daypeakflow_web<- function(df_hefs, dtrmnstc, am_fcast, q_type) {
dtrmnstc <- dtrmnstc %>% filter (fcast_i_pst == am_fcast)
firstdtrmnstc <- min(dtrmnstc$fcast_t_pst)
lastdtmrnstc <- max(dtrmnstc$fcast_t_pst)
df_hefs <- df_hefs %>% filter (fcast_i_pst == am_fcast)
df_hefs <- df_hefs %>% filter(fcast_t_pst >= firstdtrmnstc)
df_hefs <- df_hefs %>% filter(fcast_t_pst <= lastdtmrnstc)
midpoint <- as.POSIXct((as.numeric(firstdtrmnstc) +
as.numeric(lastdtmrnstc)) / 2, origin = '1970-01-01') %>% round_date("hours")
df_qntls_p <- create_permax_quants_hourly(df_hefs, firstdtrmnstc, lastdtmrnstc, "mefp_yr", "kcfs", q_type)
p1 <- ggplot() +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `90%`, ymax = `max`,fill='<10%', alpha = 0.3)) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `75%`, ymax = `90%`,fill='10-25%', alpha = 0.3)) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `50%`, ymax = `75%`,fill='25-50%', alpha = 0.3)) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `25%`, ymax = `50%`,fill='50-75%', alpha = 0.3)) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `10%`, ymax = `25%`,fill='75-90%', alpha = 0.3)) +
geom_rect(data = df_qntls_p, aes(xmin=as.POSIXct(firstdtrmnstc), xmax= as.POSIXct(lastdtmrnstc), ymin= `min`, ymax = `10%`,fill='>=90%', alpha = 0.3)) +
geom_vline(xintercept = firstdtrmnstc, linetype = "dashed", color = "grey40", show.legend = F, size = 0.75) +
geom_hline(yintercept = monitorstage_kcfs, linetype = "dashed", color = "gray", show.legend = F, size = 1.5) +
geom_hline(yintercept = floodstage_kcfs, linetype = "dashed", color = "gray", show.legend = F, size = 1.5) +
geom_line(data = dtrmnstc, aes( x = fcast_t_pst, y = kcfs,color = "Forecast flow\n(deterministic)" ), size = 1) +
geom_label(data = filter(notify_levs, unit == "kcfs"), aes(x = midpoint, y = value, label = paste0(type," (",value_t,")")), show.legend = F, alpha = 0.4) +
scale_x_datetime(expand = c(0.015, 0.015),
date_breaks = "1 day", date_minor_breaks = "12 hours", date_labels = "%b %d\n%H:%M pst") +
scale_fill_manual(name = "Exceedance Probability\n(ensemble)",
values = c(
'>=90%' = 'red',
'75-90%' = 'yellow',
'50-75%' = 'chartreuse4',
'25-50%' = 'dodgerblue',
'10-25%' = 'blue2',
'<10%' = 'purple'),
breaks = c(
'<10%' ,
'10-25%' ,
'25-50%' ,
'50-75%' ,
'75-90%' ,
'>=90%' )) +
scale_color_manual(name = NULL,
values= c(
"Forecast flow\n(deterministic)" = 'black'),
guide = guide_legend(override.aes = list(
linetype = c("solid"),
shape = c( NA)))) +
scale_y_continuous(name = "kcfs") + #, sec.axis = dup_axis(name = NULL)) +
labs( x = NULL) + ggtitle("5-Day Maximum Peak Flow Probabilities")
p1
}
{
pday5 <- plot_s_5daypeakflow_web(df_hefs, dtrmnstc, fcasts[1], 6) + ggtitle("5 Days Out, Morning Forecast\n5-Day Maximum Peak Flow Probabilities")
pday5
ggsave("flow_thres_5.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
pday5 <- plot_s_5daypeakflow_web(df_hefs, dtrmnstc, fcasts[2], 6) + ggtitle("4 Days Out, Morning Forecast\n5-Day Maximum Peak Flow Probabilities")
pday5
ggsave("flow_thres_4.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
pday5 <- plot_s_5daypeakflow_web(df_hefs, dtrmnstc, fcasts[3], 6) + ggtitle("3 Days Out, Morning Forecast\n5-Day Maximum Peak Flow Probabilities")
pday5
ggsave("flow_thres_3.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
pday5 <- plot_s_5daypeakflow_web(df_hefs, dtrmnstc, fcasts[4], 6) + ggtitle("2 Days Out, Morning Forecast\n5-Day Maximum Peak Flow Probabilities")
pday5
ggsave("flow_thres_2.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
pday5 <- plot_s_5daypeakflow_web(df_hefs, dtrmnstc, fcasts[5], 6) + ggtitle("0-1 Days Out, Morning Forecast\n5-Day Maximum Peak Flow Probabilities")
pday5
ggsave("flow_thres_1.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
}
#########################################
######### Build step by step ############
#########################################
######## just deterministic #########
df_hefs_edu <- df_hefs %>% filter(fcast_i_pst == fcasts[3], fcast_t_pst >= ymd("2016-01-14"), fcast_t_pst <= ymd("2016-01-19"))
dtrmnstc_edu <- dtrmnstc %>% filter(fcast_i_pst == fcasts[3], fcast_t_pst >= ymd("2016-01-14"), fcast_t_pst <= ymd("2016-01-19"))
p_dtrm <- ggplot(dtrmnstc_edu, aes(x = fcast_t_pst, y = feet)) + geom_line()
p_dtrm
ggsave("x.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
p_hefs <- ggplot(df_hefs_edu, aes(x = fcast_t_pst, y = feet, group = mefp_yr, color = factor(mefp_yr))) + geom_line() +
scale_y_continuous(limits = c(0,25))
p_hefs
ggsave("y.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
f_hefs_edu_peak_ft <- df_hefs_edu %>% group_by(mefp_yr) %>% mutate(peak = ifelse(feet ==max(feet), feet, NA))
p_hefs_peak <- ggplot(df_hefs_edu_peak_ft, aes(x = fcast_t_pst, y = peak, group = mefp_yr, color = factor(mefp_yr))) + geom_point() +
scale_y_continuous(limits = c(0,25))
p_hefs_peak
ggsave("z.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
df_qntls_p <- df_hefs_edu_peak_ft_sum %>% summarize(
#`95%`=quantile(peak, probs=0.05),
`90%`=quantile(peak, probs=0.10),
`75%`=quantile(peak, probs=0.25),
`50%`=quantile(peak, probs=0.50),
`25%`=quantile(peak, probs=0.75),
`10%`=quantile(peak, probs=0.9))
# `5%`=quantile(peak, probs=0.95))
p_hefs_peak_ribs <- p_hefs_peak +
geom_hline(yintercept = 5.65) +
geom_hline(yintercept = 8.76) +
geom_hline(yintercept = 11.37) +
geom_hline(yintercept = 14.28) +
geom_hline(yintercept = 18.41)
p_hefs_peak_ribs
ggsave("z2.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
p_dtrm2 <- ggplot(dtrmnstc_edu, aes(x = fcast_t_pst, y = feet)) + geom_line() +
geom_hline(yintercept = 5.65) +
geom_hline(yintercept = 8.76) +
geom_hline(yintercept = 11.37) +
geom_hline(yintercept = 14.28) +
geom_hline(yintercept = 18.41) + scale_y_continuous(limits = c(0,25))
p_dtrm2
ggsave("z3.jpg", dpi = 300, width = 7.5, height = 4, units = "in")
|
58a1aa770e611f50554d1b51de154e37689c878e
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/gdalcubes/inst/testfiles/libgdalcubes_set_threads/libFuzzer_libgdalcubes_set_threads/libgdalcubes_set_threads_valgrind_files/1609874518-test.R
|
a5a40c524847622b7d420cf39665c00c1a28a84d
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 110
|
r
|
1609874518-test.R
|
testlist <- list(n = 1056964608L)
result <- do.call(gdalcubes:::libgdalcubes_set_threads,testlist)
str(result)
|
b6b6b747b23242414a26b4b8374bcf041fd3189b
|
67222f69dd1a5b5ced1d28df833a303924dbde35
|
/1. Practice/Other/prc bear.R
|
b7bf5473ad83689b33dd5a7ee7a536867e3abeae
|
[] |
no_license
|
mandarmakhi/DataScience-R-code
|
4f75906507e303fb9b438b99a5eab0a74bcc77f6
|
8c1728b306e53668b1814283da9936503e0554b9
|
refs/heads/master
| 2023-01-19T04:55:11.171455
| 2020-11-28T07:59:55
| 2020-11-28T07:59:55
| 263,417,867
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,558
|
r
|
prc bear.R
|
#airquality=read.csv
#In R Console extract data set
airquality<-datasets::beaver2
head(beaver2) #Run
tail(beaver2) #Run
beaver2$time #Run
#descriptive_stat
#It's show's median, mean, min, max
summary(beaver2$time)
summary(beaver2$temp)
summary(beaver2$activ)
#Ploat
plot(beaver2$temp)
plot(beaver2$temp,beaver2$time)
plot(beaver2) #Its scatter plot graph
summary(beaver2)
#point' and Line
plot(beaver2$Ozone, type='l')
plot(beaver2$Ozone, xlab='ozone Concentration',
ylab='no of instances', main='ozone level in NY city',
col='red')
#Horizontal bar plot
barplot(beaver2$Ozone, main='Ozone concenteration in air'
,xlab='ozone level', col='red', horiz=FALSE)
#histogram
hist(beaver2$Solar.R)
hist(beaver2$Solar.R,main='solar radiation value in air'
,xlab='solar rad',col='red')
#Box plot
boxplot(beaver2$Solar.R)
#multiple box plot
boxplot(beaver2[,1:4],main="Multiple", col = 'blue')
#multiple graph at one window
par(mfrow=c(4,4),mar=c(2,5,2,1),las=0,bty='n')
plot(beaver2$temp)
plot(beaver2$temp,beaver2$time)
plot(beaver2) #Its scatter plot graph
summary(beaver2)
plot(beaver2$temp, type='l')
plot(beaver2$tempe, xlab='temp Concentration',
ylab='no of instances', main='ozone level in NY city',
col='red')
barplot(beaver2$Ozone, main='Ozone concenteration in air'
,xlab='ozone level', col='red', horiz=FALSE)
hist(beaver2$Solar.R)
hist(beaver2$Solar.R,main='solar radiation value in air'
,xlab='solar rad',col='red')
boxplot(beaver2$Solar.R)
boxplot(beaver2[,1:4],main="Multiple", col = 'blue')
|
a617388059213e19ea4d048cd66d485c579598c5
|
71ebbd1395dbe39c89742d42138080d45324afa4
|
/Day 7/Day 7.R
|
68de93f8792681201ffe991121269a995eb9d7a4
|
[] |
no_license
|
Kendrick-Onyango/100DaysCodeChallenge
|
b937c4d9e062f174ad60683ba459c4e9335d3bbc
|
734dffa3f48d40af8f910b974f3dc3ba7ad22f4b
|
refs/heads/master
| 2023-02-21T07:18:46.528088
| 2021-01-26T14:21:37
| 2021-01-26T14:21:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,552
|
r
|
Day 7.R
|
#Evaluating Scoring Models
rm(list = ls(all=TRUE))
cat('\014')
# Please have an up to date version of R (3.5.*, or newer)
# Answer "no" to:
# Do you want to install from sources the packages which need compilation?
update.packages(ask = FALSE, checkBuilt = TRUE)
pkgs <- c(
"arules", "bitops", "caTools", "cdata", "data.table", "DBI",
"dbplyr", "DiagrammeR", "dplyr", "e1071", "fpc", "ggplot2",
"glmnet", "glmnetUtils", "gridExtra", "hexbin", "kernlab",
"igraph", "knitr", "lime", "lubridate", "magrittr", "MASS",
"mgcv", "pander", "plotly", "pwr", "randomForest", "readr",
"readxl", "rmarkdown", "rpart", "rpart.plot", "RPostgres",
"rqdatatable", "rquery", "RSQLite", "scales", "sigr", "sqldf",
"tidypredict", "text2vec", "tidyr", "vtreat", "wrapr", "WVPlots",
"xgboost", "xts", "webshot", "zeallot", "zoo")
install.packages(
pkgs,
dependencies = c("Depends", "Imports", "LinkingTo"))
#fit a regression model and make predictions
crickets <- read.csv('https://raw.githubusercontent.com/WinVector/PDSwR2/master/cricketchirps/crickets.csv', sep=',', header = T)
View(crickets)
str(crickets)
cricket_model <- lm(temperatureF ~ chirp_rate, data = crickets)
summary(cricket_model)
crickets$temp_pred <- predict(cricket_model, newdata = crickets)
#RMSE tells us by how much is the predicted temprature OFF??
error_sq <- (crickets$temp_pred - crickets$temperatureF)^2
(RMSE <- sqrt(mean(error_sq)))
#double density plots - evaluating probability models
#install rquery and wrapr first (download from CRAN and install locally)
install.packages("WVPlots")
library(WVPlots)
DoubleDensityPlot(spamTest,
xvar = 'pred',
truthVar = 'spam',
title = 'Distribution of scores for spam filter')
#Receiver Operating Characteristic curve
ROCPlot(spamTest,
xvar = 'pred',
truthVar = 'spam',
truthTarget = 'spam',
title = 'spam filter test performance')
library(sigr)
calcAUC(spamTest$pred, spamTest$spam == 'spam')
#call in Day 5 work first to perform the steps above
spamD <- read.table('http://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data', sep = ',', header = FALSE)
spamCols <- c(
'word.freq.make', 'word.freq.address', 'word.freq.all',
'word.freq.3d', 'word.freq.our', 'word.freq.over', 'word.freq.remove',
'word.freq.internet', 'word.freq.order', 'word.freq.mail',
'word.freq.receive', 'word.freq.will', 'word.freq.people',
'word.freq.report', 'word.freq.addresses', 'word.freq.free',
'word.freq.business', 'word.freq.email', 'word.freq.you',
'word.freq.credit', 'word.freq.your', 'word.freq.font',
'word.freq.000', 'word.freq.money', 'word.freq.hp', 'word.freq.hpl',
'word.freq.george', 'word.freq.650', 'word.freq.lab',
'word.freq.labs', 'word.freq.telnet', 'word.freq.857',
'word.freq.data', 'word.freq.415', 'word.freq.85',
'word.freq.technology', 'word.freq.1999', 'word.freq.parts',
'word.freq.pm', 'word.freq.direct', 'word.freq.cs',
'word.freq.meeting', 'word.freq.original', 'word.freq.project',
'word.freq.re', 'word.freq.edu', 'word.freq.table',
'word.freq.conference', 'char.freq.semi', 'char.freq.lparen',
'char.freq.lbrack', 'char.freq.bang', 'char.freq.dollar',
'char.freq.hash', 'capital.run.length.average',
'capital.run.length.longest', 'capital.run.length.total',
'spam'
)
colnames(spamD) <- spamCols
spamD$spam <- as.factor(ifelse(spamD$spam>0.5, 'spam', 'non-spam'))
set.seed(18012020)
spamD$rgroup <- floor(100*runif(dim(spamD)[[1]]))
write.table(spamD, file='spamD.tsv',quote = F, sep = '\t', row.names = F)
#### Classification problems - Multicategory | Two-category Classification
#read data into R
spamD <- read.table('spamD.tsv', header= TRUE, sep = '\t')
#partion data into training and test datasets
spamTrain <- subset(spamD,spamD$rgroup >= 10)
spamTest <- subset(spamD, spamD$rgroup < 10)
#Create a formula that describes the model
spamVars <- setdiff(colnames(spamD), list('rgroup','spam'))
spamFormula <- as.formula(paste('spam == "spam"',
paste(spamVars, collapse = '+'), sep = '~'))
#Fit the logistic regression model
spamModel <- glm(spamFormula, family = binomial(link = 'logit'),
data = spamTrain, maxit = 100)
#Make predictions on the training and test sets
spamTrain$pred <- predict(spamModel, newdata = spamTrain,
type = 'response')
spamTest$pred <- predict(spamModel, newdata = spamTest,
type = 'response')
|
0626a6b3419f558e1bcfc8efc472ed2280337573
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nandb/examples/number_timeseries_folder.Rd.R
|
302554f29453835db726dfd3806a53408e7f47f2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
r
|
number_timeseries_folder.Rd.R
|
library(nandb)
### Name: number_timeseries_folder
### Title: Number time-series calculations for every image in a folder.
### Aliases: number_timeseries_folder
### ** Examples
## Not run:
##D setwd(tempdir())
##D img <- ijtiff::read_tif(system.file('extdata', '50.tif', package = 'nandb'))
##D ijtiff::write_tif(img, 'img1.tif')
##D ijtiff::write_tif(img, 'img2.tif')
##D number_timeseries_folder(def = "n", thresh = "Huang", frames_per_set = 20)
## End(Not run)
|
699865ff1566009703c7f7ba3aa1ed8711119aff
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/sourcecode/member-order/struct-type-table.R
|
6882c32049e3f55a99374ef0ec6aa7e127a01a58
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,558
|
r
|
struct-type-table.R
|
#
# struct-type-table.R, 2 Jan 18
# Data from:
# ???
# Derek Jones
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library("ascii")
# num members,type seq,occurrences,num grouped
# 4,1 1 2,239,185
type_comb=read.csv(paste0(ESEUR_dir, "sourcecode/member-order/C-type-comb.csv.xz"),
as.is=TRUE)
# Probability of a sequence matching type_seq occurring at random
# given all permutations of types denoted by type_seq
grouped_prob=function(type_seq)
{
num_type=as.integer(unlist(strsplit(type_seq, split=" ")))
return(factorial(length(num_type))/(factorial(sum(num_type))/
prod(factorial(num_type))))
}
type_comb$rand_prob=sapply(1:nrow(type_comb),
function(X) grouped_prob(as.vector(type_comb$type.seq[X])))
type_comb$actual_prob=type_comb$num.grouped/type_comb$occurrences
# -1 because lower.tail=FALSE tests X > x (rather than X >= x)
type_comb$seq_prob=pbinom(type_comb$num.grouped-1, type_comb$occurrences,
type_comb$rand_prob, lower.tail=FALSE)
book_table=with(type_comb, data.frame(num.members, type.seq, occurrences,
num.grouped, rand_prob,
paste0("&zs;", signif(seq_prob, 3), "&ze;")))
names(book_table)=c("Total members", "Type sequence", "structs seen",
"Grouped occurrences", "Random probability",
"Occurrence probability")
print(ascii(head(book_table), include.rownames=FALSE,
format=c("d", "s", "d", "d", "f", "s")))
|
4ccef7d15a3bb5f803d6a715bcd84b806b5be759
|
13792528e8e0e5fdbca9bd553086e796f9c78f25
|
/code/scripts/training-testing-sets.R
|
77e859103302a0a9d0f091aaf89f252fd336d455
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
xiaoqian-zhu/stat159-fall2016-project2
|
efcac8c19100735c44b5bf5964a18f4e97b9151c
|
d5f626b0a07ba13ad56d97811a153cf4dad10f79
|
refs/heads/master
| 2021-01-12T12:33:46.176095
| 2016-11-05T07:06:23
| 2016-11-05T07:06:23
| 72,565,193
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 503
|
r
|
training-testing-sets.R
|
# Training and Testing Sets: this script is used for building the training and test sets.
# Load the data
scaled_credit <- read.csv('../../data/datasets/scaled-credit.csv', header= TRUE)
scaled_credit$X <- NULL
set.seed(0)
sampleset <- sample(1:400, 300, replace = FALSE)
# Create the train set
train_set <- scaled_credit[sampleset, ]
# Create the test set
test_set <- scaled_credit[-sampleset, ]
# Save to the data file
save(train_set, test_set, file = "../../data/output/train-test-sets.RData")
|
e0e021626a53697adfcb89d99ee10828c57666e7
|
4603a0ae8dc16f871d35c7738e123918ff4ba326
|
/plot3.R
|
49ba475e9d9edeb4a583d2e9b5ff1bde62da6b27
|
[] |
no_license
|
kenjimatsuzawa/ExData_Plotting2
|
5c93fa39ac71c5fd4e967c3150644e9dc709dfa6
|
233459711a9348c74f2fb94808132f2ad4a37b11
|
refs/heads/master
| 2023-06-27T06:39:12.763941
| 2021-07-26T06:25:44
| 2021-07-26T06:25:44
| 389,433,600
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 757
|
r
|
plot3.R
|
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# load dplyr package
library(dplyr)
library(ggplot2)
library(reshape2)
# filter the data with Baltimore
filterbybalt <- filter(NEI, fips=="24510")
yeartypebalt <- with(filterbybalt, tapply(Emissions, list(type, year), sum))
# set the output device to PNG
png("plot3.png", width = 480, height = 480)
# plot emission by year and the type (by differnet line)
y <- melt(yeartypebalt)
names(y) <- c("type", "year", "Emission")
g <- ggplot(y, aes(x = year, y=Emission, color=type))
g <- g + geom_line()
g <- g + labs(title="Emission by type in Baltimore", y="Emission (ton)")
plot(g)
# output png
dev.off()
|
6f5d20a101dc64914bc8e7326104eeda27041a53
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pairwiseCI/examples/sodium.Rd.R
|
902914871e5cd1419d8b97b0a250512817d440d4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 224
|
r
|
sodium.Rd.R
|
library(pairwiseCI)
### Name: sodium
### Title: Sodium contents in transgenic and isogenic corn
### Aliases: sodium
### Keywords: datasets
### ** Examples
data(sodium)
boxplot(Sodiumcontent ~Treatment, data=sodium)
|
9208503520f6ac405c65e5281c74ede3f8d3ece5
|
562bec580adc7d612d0e9a3d0be2f30237396367
|
/milestoneReport/nGrams.R
|
f263dd8659bab11f34e37c735fb43ebe4cfae239
|
[] |
no_license
|
bms63/Coursera_Data_Sci_Capstone
|
3f21ac1941abeac212c8957a1be24d776d922e5f
|
157317b147efecb5e2f128ce59ddb5c9fb36dfeb
|
refs/heads/master
| 2021-04-09T17:36:56.322379
| 2018-03-23T20:43:12
| 2018-03-23T20:43:12
| 125,875,128
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,412
|
r
|
nGrams.R
|
library(RWeka);library(rJava);library(tm)
final <- readRDS("./SampleCorpus.RData")
# Unigram
unigram <- NGramTokenizer(final, Weka_control(min = 1, max = 1, delimiters = " \\r\\n\\t.,;:\"()?!"))
unigram <- data.frame(table(unigram))
unigram <- unigram[order(unigram$Freq, decreasing = TRUE),][1:10,]
colnames(unigram) <- c("String","Count")
saveRDS(unigram, file = "./unigram.RData")
# Bigram
bigram <- NGramTokenizer(final, Weka_control(min = 2,
max = 2, delimiters = " \\r\\n\\t.,;:\"()?!"))
bigram <- data.frame(table(bigram))
bigram <- bigram[order(bigram$Freq, decreasing = TRUE),]
colnames(bigram) <- c("String","Count")
saveRDS(bigram, file = "./bigram.RData")
# Trigram
trigram <- NGramTokenizer(final, Weka_control(min = 3,
max = 3, delimiters = " \\r\\n\\t.,;:\"()?!"))
trigram <- data.frame(table(trigram))
trigram <- trigram[order(trigram$Freq, decreasing = TRUE),]
colnames(trigram) <- c("String","Count")
saveRDS(trigram, file = "./trigram.RData")
# Fourgram
fourgram <- NGramTokenizer(final, Weka_control(min = 4,
max = 4, delimiters = " \\r\\n\\t.,;:\"()?!"))
fourgram <- data.frame(table(fourgram))
fourgram <- fourgram[order(fourgram$Freq, decreasing = TRUE),]
colnames(fourgram) <- c("String","Count")
saveRDS(fourgram, file = "./fourgram.RData")
|
739d2d45aec1465301db7180e88c561965322137
|
93005dac4be25d7fb42cc09a08ab303439c32c3c
|
/man/gzAzimuth.Rd
|
c5552065eca1e67b6d04807b9ddb691563b92d64
|
[] |
no_license
|
cran/maptools
|
826901e049878087bdc9deb1b2e8076b43d5f79b
|
729e48b3254b5d8ccb85ca4bbbb32bbd7d07b026
|
refs/heads/master
| 2023-07-24T12:12:31.900627
| 2023-07-18T19:10:02
| 2023-07-18T20:30:36
| 17,697,281
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,370
|
rd
|
gzAzimuth.Rd
|
\name{gzAzimuth}
\alias{gzAzimuth}
\alias{trackAzimuth}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Find azimuth for geographical coordinates}
\description{
The function finds azimuth values for geographical coordinates given as decimal degrees from the \code{from} coordinates to the \code{to} coordinate. In function \code{trackAzimuth}, the azimuth values are found between successive rows of the input coordinate matrix.
}
\usage{
gzAzimuth(from, to, type = "snyder_sphere")
trackAzimuth(track, type="snyder_sphere")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{from}{a two column matrix of geographical coordinates given as decimal degrees (longitude first)}
\item{track}{a two column matrix of geographical coordinates given as decimal degrees (longitude first)}
\item{to}{a one row, two column matrix or two element vector of geographical coordinates given as decimal degrees (longitude first)}
\item{type}{default is \code{"snyder_sphere"}, otherwise \code{"abdali"}; the results should be identical with slightly less trigonometry in \code{"abdali"}}
}
\details{
The azimuth is calculated on the sphere, using the formulae given by Snyder (1987, p. 30) and Abdali (1997, p. 17). The examples use data taken from Abdali (p. 17--18). There is a very interesting discussion of the centrality of azimuth-finding in the development of mathematics and mathematical geography in Abdali's paper. Among others, al-Khwarizmi was an important contributor. As Abdali puts it, "This is a veritable who's who of medieval science" (p. 3).
}
\value{
values in decimal degrees - zero is North - of the azimuth from the \code{from} coordinates to the \code{to} coordinate.
}
\references{Snyder JP (1987) Map projections - a working manual, USGS Professional Paper 1395; Abdali SK (1997) "The Correct Qibla", formerly at http://patriot.net/users/abdali/ftp/qibla.pdf}
\author{Roger Bivand, with contributions by Sebastian Luque}
\examples{
name <- c("Mecca", "Anchorage", "Washington")
long <- c(39.823333, -149.883333, -77.0166667)
lat <- c(21.423333, 61.2166667, 38.9)
x <- cbind(long, lat)
row.names(x) <- name
crib <- c(-9.098363, 56.575960)
r1 <- gzAzimuth(x[2:3,], x[1,])
r1
all.equal(r1, crib)
r2 <- gzAzimuth(x[2:3,], x[1,], type="abdali")
r2
all.equal(r2, crib)
trackAzimuth(x)
}
\keyword{spatial}
|
8eb988378b2b2b0eebb7866e9e345c1b453ca31f
|
e13beecf3ee11cbe5ffcd2916bbdc13f22e70748
|
/loading data.R
|
47a33eb89c793105cfb662a45cd3460e9d3aedbd
|
[] |
no_license
|
ljpollack/BWS_Disturbance
|
0d9101a1f7d31cbac943a3a7bb50c97a373ad121
|
db021704d1cb7332e2d7e4b4fc56d22a44d5f322
|
refs/heads/master
| 2020-03-17T16:33:57.750037
| 2018-05-29T19:10:20
| 2018-05-29T19:10:20
| 133,753,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,125
|
r
|
loading data.R
|
#Brown-Black widow web competition script
rm(list = ls())
#library(lme4)
#library(bbmle)
#library(dplyr)
#library(rptR)
#library(MASS)
#library(boot)
#library(beepr)
#library(lsmeans)
#library(piecewiseSEM)
#library(MuMIn)
#library(data.table)
#working directory already set, below code did not work-###
#setwd(Users/leapollack/Documents/gitHub/BWS_Disturbance)
bb<-read.csv(file="web_comp_data.csv",header=TRUE,na.strings=c(""))
#=================================Loading and formating the data=============================
#The dataset as it's formatted needs some work. There are currently two observations
#per trial (resident and intruder). Need to make it so that each trial is one observation:
go<-reshape(bb,
idvar="filename",
timevar="resident",
direction="wide")
#subset out the relevant columns
bb<-subset(go,select=c(filename,focalID.1,blackwidow.0,trial.1,weight.1,
egg_present.1,mated.1,contest_ID.1,contest_black.1,
contest_weight.1,contest_mated.1,structural.1,
gumfooted.1,bodyshakes.1,abdomenpulse.1,contact.1,
time_to_contact.1,leg_touch.1,chelicerae_touch.1,
silk_spray.1,silk_attempt.1,kerfuffle.1,retreat.1,
time_to_retreat.1,left_web.1,
victory.1,vic_tie.1,bodyshakes.0,abdomenpulse.0,retreat.0,time_to_retreat.0,
left_web.0,time_to_leave_web.0
))
#rename so the variable names are easy to work with..this chunk is ugly but it gets it done
bb$focalID<-bb$focalID.1
bb$trial<-bb$trial.1
bb$weight<-bb$weight.1
bb$egg_present<-bb$egg_present.1
bb$mated<-bb$mated.1
bb$contest_ID<-bb$contest_ID.1
bb$contest_black<-bb$contest_black.1
bb$contest_weight<-bb$contest_weight.1
bb$contest_mated<-bb$contest_mated.1
bb$structural<-bb$structural.1
bb$gumfooted<-bb$gumfooted.1
bb$bodyshakes<-bb$bodyshakes.1
bb$abdomenpulse<-bb$abdomenpulse.1
bb$contest_abdomenpulse<-bb$abdomenpulse.0
bb$contest_bodyshake<-bb$bodyshakes.0
bb$contact<-bb$contact.1
bb$time_to_contact<-bb$time_to_contact.1
bb$leg_touch<-bb$leg_touch.1
bb$chelicerae_touch<-bb$chelicerae_touch.1
bb$silk_spray<-bb$silk_spray.1
bb$silk_attempt<-bb$silk_attempt.1
bb$kerfuffle<-bb$kerfuffle.1
bb$retreat<-bb$retreat.1
bb$contest_retreat<-bb$retreat.0
bb$time_to_retreat<-bb$time_to_retreat.1
bb$contest_time_to_retreat<-bb$time_to_retreat.0
bb$left_web<-bb$left_web.1
bb$contest_left_web<-bb$left_web.0
bb$time_to_leave_web<-bb$time_to_leave_web.0
bb$victory<-bb$victory.1
bb$vic_tie<-bb$vic_tie.1
#Create new column called duration of trial
bb$time_to_leave_web[is.na(bb$time_to_leave_web)] <- 600
bb$trial_duration<-bb$time_to_leave_web
#Change IDs into factors, create variable for weight differences, and body shakes as binomial
bb$weight.diff <- bb$weight - bb$contest_weight
bb$contest_ID<-as.factor(bb$contest_ID)
bb$focalID<-as.factor(bb$focalID)
bb$vic_tie<-as.factor(bb$vic_tie)
bb$bs.binom <- ifelse(bb$bodyshakes==0,0,1)
bb$contest_bs.binom<-ifelse(bb$contest_bodyshake==0,0,1)
bb$contest_black<-as.factor(bb$contest_black)
#Make "NA" all victory values that are a tie (this creates a column of just winning and losing
#and a column for winning, losing, and tying)
bb$victory <- ifelse(bb$vic_tie==0,NA,bb$victory)
#There are trials that ended in a victory or loss, however we don't have the duration of the trial.
#Currently their listed as "600" which is inaccurate. So need to replace as NA.
#if duration equals 600 and victory is a 0 or 1, make NA
bb$trial_duration<-ifelse((bb$trial_duration==600)&(bb$vic_tie!=0),NA,bb$trial_duration)
#Create new column called duration of trial
bb$time_to_leave_web[is.na(bb$time_to_leave_web)] <- 600
bb$trial_duration<-bb$time_to_leave_web
widow_inv<-subset(bb,select=c(filename,focalID,contest_black,trial,weight,
egg_present,mated,contest_ID,bs.binom,contest_bs.binom,
contest_weight,contest_mated,structural,
gumfooted,bodyshakes,abdomenpulse,contact,
leg_touch,chelicerae_touch,weight.diff,
silk_spray,silk_attempt,kerfuffle,retreat,
time_to_retreat,left_web,
contest_abdomenpulse,contest_bodyshake,
time_to_contact,
contest_retreat, contest_time_to_retreat,
contest_left_web,time_to_leave_web,
victory,vic_tie,trial_duration))
#===========================================================================================================
#graph outcomes (win-tie-lose) for resident as a function of number of structural lines
#view table
View(widow_inv)
#reorder 0,1,2 (tie, win, lose) to be 2,0,1 (lose, tie, win)
widow_inv$vic_tie <- factor(widow_inv$vic_tie , levels=c("2","0","1"))
#boxplot the outcomes
boxplot(structural~vic_tie, data=widow_inv, names=c("lose","tie","win"), horizontal=TRUE, main="Contest Outcome", xlab = "Number of structural lines", ylab = "Outcome")
|
c9fe6023f56f8d6cbd4892801ee4a174154ae864
|
4b457d64d1e40b33010f9a4674a06131b223fcb4
|
/man/getCalibrationInfo.Rd
|
450001c40fa88733d6d8f01d8d307beb0a822db8
|
[] |
no_license
|
roliveros-ramos/calibrar
|
96d5727f895dbf80f6881d744f6b515a304315d6
|
90b38e0bb0ca76021edc81d6b6f53b81cd2683a8
|
refs/heads/master
| 2023-07-20T00:12:59.140976
| 2023-07-07T14:26:36
| 2023-07-07T14:26:36
| 20,023,815
| 12
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,047
|
rd
|
getCalibrationInfo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calibrar-DEPRECATED.R
\name{getCalibrationInfo}
\alias{getCalibrationInfo}
\title{Get information to run a calibration using the \code{calibrar} package.}
\usage{
getCalibrationInfo(
path,
file = "calibrationInfo.csv",
stringsAsFactors = FALSE,
...
)
}
\arguments{
\item{path}{The path to look for the file.}
\item{file}{The file with the calibration information, see details.}
\item{stringsAsFactors}{To be passed to \code{read.csv}.}
\item{\dots}{Additional arguments to \code{read.csv} function.}
}
\value{
A data.frame with the information for the calibration of a
model, to be used with the \code{\link{createObjectiveFunction}}
and \code{\link{getObservedData}}.
}
\description{
A wrapper for \code{read.csv} checking column names and data types
for the table with the calibration information.
}
\seealso{
\code{\link{createObjectiveFunction}}, \code{\link{getObservedData}}.
}
\author{
Ricardo Oliveros-Ramos
}
|
6a017e5e2d2758222bcaaef62b1eca2dd1444834
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mvLSW/examples/rmvLSW.Rd.R
|
df429b97aa3b906d7b2424b0f8f7eddc3fba8fd1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,180
|
r
|
rmvLSW.Rd.R
|
library(mvLSW)
### Name: rmvLSW
### Title: Sample a Multivariate Locally Stationary Wavelet Process
### Aliases: rmvLSW simulate.mvLSW
### Keywords: rmvLSW, simulate.mvLSW
### ** Examples
## Define evolutionary wavelet spectrum, structure only on level 2
Spec <- array(0, dim = c(3, 3, 8, 256))
Spec[1, 1, 2, ] <- 10
Spec[2, 2, 2, ] <- c(rep(5, 64), rep(0.6, 64), rep(5, 128))
Spec[3, 3, 2, ] <- c(rep(2, 128), rep(8, 128))
Spec[2, 1, 2, ] <- Spec[1, 2, 2, ] <- punif(1:256, 65, 192)
Spec[3, 1, 2, ] <- Spec[1, 3, 2, ] <- c(rep(-1, 128), rep(5, 128))
Spec[3, 2, 2, ] <- Spec[2, 3, 2, ] <- -0.5
## Define Haar wavelet function and create mvLSW object
EWS <- as.mvLSW(x = Spec, filter.number = 1, family = "DaubExPhase",
min.eig.val = NA)
plot(EWS, style = 2, info = 2)
## Sample with Gaussian innovations
set.seed(10)
X <- rmvLSW(Spectrum = EWS)
plot(X)
## Alternatively:
X1 <- simulate(object = EWS)
plot(X1)
## Define smoother wavelet function and create mvLSW object
EWS2 <- as.mvLSW(x = Spec, filter.number = 10, family = "DaubExPhase")
## Sample with logistic innovations
set.seed(10)
X2 <- rmvLSW(Spectrum = EWS2, noiseFN = rlogis, scale = sqrt(3)/pi)
plot(X2)
|
d2b0a1ce4d04540f11d7c9f6272993a7eacd6c8f
|
b0f7d5d2489e761646c6363d2958e9d3a1b75747
|
/Analytics Edge/Unit4_Assignment.R
|
3715a8f8fd0277b1b6031cded59488bb61be22d7
|
[] |
no_license
|
krishnakalyan3/Edge
|
4cd5cb55677ed662dda4d4acdf7fba4c7e813735
|
ad070911bd36c26ff7c612b4adc4150a53579676
|
refs/heads/master
| 2021-01-21T04:46:50.714837
| 2016-07-03T10:48:34
| 2016-07-03T10:48:34
| 53,034,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,714
|
r
|
Unit4_Assignment.R
|
data(state)
statedata = data.frame(state.x77)
str(statedata)
attach(statedata)
model1 = lm(Life.Exp ~ Population+Income+Illiteracy+Murder + HS.Grad + Frost+
Area)
summary(model1)
pred = predict(model1,statedata)
sse = sum((pred-statedata$Life.Exp)^2)
sse
model2 = lm( Life.Exp ~Population +Murder+ Frost+ HS.Grad )
summary(model2)
sse2 = sum(model2$residuals^2)
sse2
# Cart Model
model3 = rpart(Life.Exp ~ Population+Income+ Illiteracy+ Murder+
HS.Grad+ Frost+ Area)
prp(model3)
pre3 =predict(model3,statedata)
sse3 = sum((pre3-statedata$Life.Exp)^2)
sse3
model4 = model3 = rpart(Life.Exp ~ ., statedata,minbucket = 5)
prp(model4)
# Larger why? 2.4
pre4 =predict(model4,statedata)
sse4 = sum((pre4-statedata$Life.Exp)^2)
sse4
model5 = rpart(Life.Exp ~ Area,minbucket =1)
prp(model5)
sse5 = sum((predict(model5,statedata)-statedata$Life.Exp)^2)
sse5
library(caret)
library(e1071)
set.seed(111)
cartGrid = expand.grid(.cp = seq(0.01, 0.5, 0.01) )
cp.grid
tr.contrl = trainControl(method ="cv", number =10)
tr = train(Life.Exp ~ ., statedata,
method ="rpart", trControl = tr.contrl, tuneGrid = cartGrid)
best.tree = tr$finalModel
best.tree
# 3.2
model6 = model3 = rpart(Life.Exp ~ ., statedata,minbucket = 5,cp = 0.12)
prp(model6)
model6
sse6 = sum((predict(model6,statedata)-statedata$Life.Exp)^2)
sse6
set.seed(111)
train(Life.Exp ~ Area, data=statedata, method="rpart", trControl = tr.contrl, tuneGrid = cartGrid )
# cp = 0.02
model7 = rpart(Life.Exp ~ Area, data=statedata, cp=0.02)
model7
prp(model7)
sse7 = sum((predict(model7,statedata)-statedata$Life.Exp)^2)
sse7
set.seed(111)
CARTmodel5 = rpart(Life.Exp ~ Area, data=statedata, cp=0.02)
prp(CARTmodel5)
|
5fae3762e082b7459f2ada33fc70fe244481fee2
|
2b51d5c7b103ae838bf532b7b16b2c32c5726a72
|
/cachematrix.R
|
83c1c9bf6cf5ced2370f0e7d4491f4aa2b054666
|
[] |
no_license
|
cjinlee/clee_Rprogram
|
2accff4d36a014df4271b9baeb4dafbb5c40ce45
|
fc59b3e0b0b0763e01a60a9034b07b96efbd8507
|
refs/heads/master
| 2016-08-11T16:40:01.756976
| 2016-03-18T15:37:04
| 2016-03-18T15:37:04
| 54,212,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,276
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## In makeCacheMatrix function, we can set or get a matrix.
## In cacheSolve function, we can calculate inverse of a matrix.
## But if the inverse has already been founded,
## it returns the inverse from cache without computing again.
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) { # Set the matrix
x <<- y
m <<- NULL
}
get <- function() x # Get the matrix
setinv <- function(inv) m <<- inv # Set the inverse of the matrix
getinv <- function() m # Get the inverse of the matrix
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) { #check if the inverse has already been founded
message("getting cached data")
return(m) #return the inverse from the cache
}
data <- x$get()
m <- solve(data, ...) #find the inverse
x$setinv(m)
m #return the inverse from calculation
}
|
b75822453adc94ec9a9c0fb31ffcf6149d5b0e40
|
1f978f98990e0158a100f75a69a28895470d0dbe
|
/analysis/simfunctions.R
|
b901c05f90e90d39d557b464d47f37ef69d48094
|
[] |
no_license
|
nick-fournier/ped-transit-priority
|
6bb43ff23cb560fc1b76fcf50f26927363cde145
|
c2b488383163384d5ae43081c1e3f1157262143b
|
refs/heads/master
| 2021-11-07T02:21:19.112904
| 2021-10-29T18:10:56
| 2021-10-29T18:10:56
| 243,087,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,506
|
r
|
simfunctions.R
|
#Flow across network at distance r from center
fun.qr_a <- function(r) ((14*R*lambda_b) / (15*delta)) + (lambda_c/(2*sqrt(2)*r*delta))*(R^2 - r^2)
#Flow around perimeter road
fun.qg_p <- function(g) lambda_b*g*(R - g)*(R^2 - g^2) / R^2
#(2*lambda_b*g*(6*R*(1 + sqrt(2)) - 11*g)*(R^2 - g^2)) / ((R-2)*delta*R^2) #old
#Traffic flow, parabolic
fun.qk <- function(k) (q_c*(k*(2*k_c - k)))/(k_c^2)
#Traffic density, parabolic
fun.kq <- function(q) k_c*(1-sqrt(1-(q/q_c)))
#Average flow experienced
fun.qbar <- function(r) ((14*R*lambda_b)/(15*delta)) + (lambda_c/(8*delta*(R-r))) * (2*(R^2)*log(R/r) - R^2 + r^2)
#Base traffic travel time
fun.ttr <- function(r, method = "paraexp") {
q = fun.qbar(r)
switch(method,
"bipara" = {
# Bi-parabolic
if(q < q_c)
k_c*(1 - sqrt(1 - (q/q_c))) / q
else
( k_c + sqrt( (-2*(q - 2*q_c)*(k_c - k_j) * (k_j - k_c) + 2*q*(k_c - k_j)^2 ) / (2*q_c - q)) ) / q
},
"paraexp" = {
# Parabolic-exponential
if(q < q_c)
k_c*(1 - sqrt(1 - (q/q_c))) / q
else
(k_c/q_c)*(q/q_c)^20
},
"bilinear" = {
# Bi-linear
if(q < q_c)
1/v_m
else
- (k_j - ((2*k_j - k_c) / (2*q_c))*(2*q_c - q)) / (q - 2*q_c)
})
}
#Optimal ped zone size
fun.gamma_int <- function(g) {
# A = 11/R^2
# B = -6*(1+sqrt(2))/R
# C = -11
# D = (48*(1+sqrt(2))*R*lambda_b + lambda_c*(R-2))/(8*lambda_b)
# E = -14*R*(R-2)/30
# H = (-lambda_c*(R-2)*R^2) / (8*lambda_b)
#
# A*g^5 + B*g^4 + C*g^3 + D*g^2 + E*g + H
lambda_b*((1/R^2)*(g^5) - (1/R)*(g^4) - (g^3)) +
(lambda_b*R + (lambda_c/(4*delta)))*(g^2) -
((14*R*lambda_b)/(15*delta))*g -
(lambda_c*(R^2)/(4*delta))
}
#Optimal transit zone size as function of mode choice P_D
fun.tau <- function(P) {
if(P > 0)
(56*P*lambda_b - 60*delta*q_T + sqrt( (56*P*lambda_b - 60*delta*q_T)^2 + 900*(R^2)*(P*lambda_c)^2))/(30*P*lambda_c)
else
0
}
#### DISTANCES
#Average driving distance
fun.L_D <- function(g) (R - g)*(14*lambda_b + 10*lambda_c) / (15*(lambda_b + lambda_c))
#Average transit distance in mixed traffic
fun.L_TM <- function(tau) (R - tau)*(14*lambda_b + 10*lambda_c) / (15*(lambda_b + lambda_c))
#Average transit priority distance
fun.L_TP <- function(tau) tau*(14*lambda_b + 10*lambda_c) / (15*(lambda_b + lambda_c))
#Average walk distance
fun.L_W <- function(g)
# old
#((R^2)*(g^3)*(10*lambda_b + 3*lambda_c - 3) - 6*lambda_b*g^5 + 3*g*R^4) /
#(3*(R^2)*(g^2)*(2*lambda_b + lambda_c - 1) - 3*lambda_b*g^4 + 3*R^4)
#New
(-32*(g^5)*lambda_b + 60*(g^3)*(R^2)*lambda_b + 5*(g^3)*(R^2)*lambda_c + 15*g*(R^4)*lambda_c) /
(15*((4*(g^2)*(R^2)*lambda_b - 2*(g^4)*lambda_b + 2*(R^4)*lambda_c)))
#### TRAVEL TIMES
#Average driving travel time
fun.tt_Dbar <- function(g) {
# (R-g)*(14*lambda_b+10*lambda_c)/(15*(lambda_b + lambda_c))
fun.L_D(g)*fun.ttr(g)
}
#Average mixed-traffic transit travel time
fun.tt_TMbar<- function(tau) {
# (R-tau)*(14*lambda_b+10*lambda_c)/(15*(lambda_b + lambda_c))
fun.L_TM(tau)*(fun.ttr(tau) + (1/v_m) + (t_s/s))
}
#Transit priority travel time
fun.tt_TPbar <- function(tau) {
lbar <- fun.L_TP(tau) # ((tau*(14*lambda_b + 10*lambda_c))/(15*(lambda_b + lambda_c)))
(lbar/v_m) + (lbar/s)*t_s
}
#Average travel time within ped zone
fun.tt_Wbar <- function(g, maxwalk = 0.5) {
lbar <- fun.L_W(g)
WT = lbar/v_w
#Allows for transit in ped zone, fucks shit up kinda
# TT = lbar/v_m + (lbar/s)*t_s + (maxwalk/(2*v_w))
tt <- WT
return(tt)
}
#Probability as function of tau
fun.Ptau <- function(tau) {
-(60*delta*tau*q_T)/(15*lambda_c*(tau^2) - 56*tau*lambda_b - 15*lambda_c*R^2)
}
#Logit for driving
fun.PD <- function(tdiff) 1/(1 + exp(beta*tdiff))
#Average total drive time plus walking
fun.tt_drive <- function(g) fun.tt_Dbar(g) + fun.tt_Wbar(g)
#Average total transit time with priority
fun.tt_transit <- function(tau) fun.tt_TPbar(tau) + fun.tt_TMbar(tau)
#Total average travel time
fun.tt_total <- function(g,tau,bounded = F) {
Ptau = fun.Ptau(tau)
if(bounded == T) {
Ptau = ifelse(Ptau > 1, 1, Ptau)
}
Ptau*(fun.tt_Dbar(g) + fun.tt_Wbar(g)) + (1-Ptau)*(fun.tt_TPbar(tau) + fun.tt_TMbar(tau))
}
#Travel time difference [ transit - driving]
fun.ttdiff <- function(g,tau) { (fun.tt_Dbar(g) + fun.tt_Wbar(g)) - (fun.tt_TPbar(tau) + fun.tt_TMbar(tau)) }
|
d172a1094d596791bbb19dd5f0168029da7bccf7
|
b8233ffee6997c68f6e1ef57fbf040da5a4134b9
|
/Practica2/faces_lr/printRes.R
|
d44d106297010c1c2726058a67a8d8c2e3efaa2d
|
[] |
no_license
|
palonso22/Machine-Learning
|
77189e79d12f19595c913d98e98240483b302bc3
|
674a9ada34b016511727c4b9f811acbdf76b2409
|
refs/heads/master
| 2022-11-17T12:10:46.993534
| 2020-07-18T15:12:20
| 2020-07-18T15:12:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
printRes.R
|
setwd("/home/pablo/Desktop/Facultad/5to\ año/Machine-Learning/Practica2/faces_lr/")
datos <- read.table("faces_lr.mse",sep = "\t",header = FALSE)
epocas <- 1:nrow(datos)
plot(epocas,datos[,5],col="red",type="o",xlab="Épocas",ylab="Error de clasificación",ylim=c(0,1))
points(epocas,datos[,7],col="yellow",type="o")
|
b17cc7956ee984426ad4abf408fc9136a0a432fb
|
3059b99e4aa4b3ddbdf19ab0570ad322c5604a88
|
/Quizii_V7/Globals.R
|
e0d47c5f609aad10ed3e9389feccbcbd2045d1a8
|
[] |
no_license
|
witusj/Quizii
|
f75f540d8b1c41c9c5b5c22043b7860c8541be18
|
ca053f1cdb576d32941c1dddbc6ea4b9ccec39d8
|
refs/heads/master
| 2020-06-04T20:05:51.638894
| 2014-09-23T12:03:43
| 2014-09-23T12:03:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 394
|
r
|
Globals.R
|
#Load questions
questions <- read.csv(file='Vragen.csv', colClasses = 'character', header = TRUE)
questionsMC <- questions[questions$Onderwerp == 'Financial Accounting' & questions$Subcategorie == 'Vermogensbehoefte' & questions$Type == 'MC',]
questionsOpen <- questions[questions$Onderwerp == 'Financial Accounting' & questions$Subcategorie == 'Vermogensbehoefte' & questions$Type == 'Open',]
|
4209f983d4d4795058c259dde0dfb2d64d367391
|
8e127d47ba1ade5e098cf03561b519da1eca9e85
|
/tests/testthat/test_outputStream.R
|
d0f611b7047540a6ff6268e4d88acf6c2d993a64
|
[] |
permissive
|
allekai/R-streamgenerator
|
6bff4cd3cd3fe18764e0ab79e93d9aaf794e7a81
|
65a67337364103d46681172e95a0b62fcf3372b7
|
refs/heads/master
| 2021-04-15T03:41:07.917081
| 2018-11-02T15:11:17
| 2018-11-02T15:11:17
| 126,463,944
| 0
| 0
|
BSD-3-Clause
| 2019-09-30T13:23:16
| 2018-03-23T09:38:16
|
R
|
UTF-8
|
R
| false
| false
| 55
|
r
|
test_outputStream.R
|
library(streamgenerator)
context("Test Output Stream")
|
a9b80534e1e11bc67101f685e8e02477087de564
|
d133105fe8d3ffad2ca3a76c3a7fa32c5042fca7
|
/cachematrix.R
|
f798f937e5ac4799ac1e6226b20de2afedd21a7e
|
[] |
no_license
|
gyanjoshi/ProgrammingAssignment2
|
83e1fdc458e9194e5d7acc8d5df44a35d820ffce
|
3e5561f72fe422bd33ab1fddd9432d8c32d2762e
|
refs/heads/master
| 2020-11-26T02:42:38.000162
| 2019-12-20T17:02:27
| 2019-12-20T17:02:27
| 228,941,294
| 0
| 0
| null | 2019-12-18T23:59:57
| 2019-12-18T23:59:56
| null |
UTF-8
|
R
| false
| false
| 1,791
|
r
|
cachematrix.R
|
## makeCacheMatrix creates a special object, which caches the inverse of a matrix
##
## This function will create a list of functions nedded to create cache of Inverse of matrix.
## x is a matrix, which is square invertible matrix.
## i is calculated inverse of the matrix x
## set function sets the matrix object x of the calling function to be its argument y
## .. and inverse (i), which is accesible in the calling function to be NULL initially.
## get function is just opposite, i.e. it returns the matrix object x
## setInverse function sets the value of i (which is accessible to calling environment) as the inverse matrix, which is passed as argument
## getInverse returns value of i which is inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve: given the special cache matrix object, created with the function makeCacheMatrix,
## first, try to fetch Inverse from the cache. If the cache contains required matrix, return the same,
## otherwise, get the matrix from cache matrix object and use solve function to calculate its inverse.
## Store the calculate inverse i in the cache and return the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
d07c1e561320d9e3ce64404a469215aa8ed32864
|
82c6118ca4b0d28c8162975c73cadc598350c1f4
|
/plot_analyse.r
|
88ba4946dab0a446ca42d38dee2a6454440b0550
|
[] |
no_license
|
yishuo/Text_Social_Mining
|
72604d33a85a67551e4134b2779fa5719a88bd8f
|
ba92b72041d218490e7832bf61c4c799db6cc83e
|
refs/heads/master
| 2021-01-10T01:06:23.939719
| 2016-02-01T21:49:19
| 2016-02-01T21:49:19
| 50,871,736
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,896
|
r
|
plot_analyse.r
|
Sys.setenv(JAVA_HOME='D:/Program Files (x86)/jdk1.7/jre')
library(XLConnect)
library(igraph)
table <- read.csv("C:/Users/lv/Desktop/Data mining/R_Projet/table.csv")
tableno <- read.csv("C:/Users/lv/Desktop/Data mining/R_Projet/tableno.csv")
tableArc <- diag(0, nrow=56, ncol=2)
index <- 1
#row numero
for (i in seq(from=1,to=20)){
# row numero compare
for(n in seq(from=1, to=20)){
if(table[i,n]==1){
tableArc[index,1] <- i
tableArc[index,2] <- n
index<-index+1
}
}
}
tableArcno <- diag(0, nrow=28, ncol=2)
indexno <- 1
#row numero
for (i in seq(from=1,to=20)){
# row numero compare
for(n in seq(from=i+1, to=20)){
if(tableno[i,n]==1){
tableArcno[indexno,1] <- i
tableArcno[indexno,2] <- n
indexno<-indexno+1
}
}
}
tableArcno
#go2 <- graph(c(tableArc),n=20)
go2 <- graph(t(tableArc))
gno2 <- graph(t(tableArcno),directed = FALSE)
tkplot(go2) #avec direction
tkplot(gno2) #non direction
#the number of vertices
V(go2)
#the number of edges
E(go2)
#the degree of every vertice
degree(go2)
#the distribution of degrees of vertices
degree.distribution(go2)
#the shortest path of every vertices(the number of stop vertices)
shortest.paths(go2)
get.shortest.paths(go2,2)
#the similarity between vertices
similarity.jaccard(go2)
#the centrality
closeness(go2)
#the betweeness
betweenness(go2)
edge.betweenness(go2)
#the connexity
is.connected(go2)
clusters(go2)
no.clusters(go2)
#the communitty detection
wtcgo2 <- walktrap.community(go2)
ebc <- edge.betweenness.community(go2, directed = TRUE, edge.betweenness = TRUE, merges = TRUE, bridges = TRUE)
#display the communities
wtcgo2$membership
#modularity of the associated partition
modularity(wtcgo2)
modularity(go2,membership(wtcgo2))
#graphs generators
g <- erdos.renyi.game(1000, 1/1000)
g <- barabasi.game(10000)
#g <- graph.ring(10)as.directed(g, "mutual")
|
adbc426453e7dccc7a2e6bf622221d1f843b8d7f
|
c6c9b0c0ee14337e2d46176bba525c0d85decae6
|
/ki_simulation.R
|
5f0efd8cbe873ebbb6f8c9eae12cee24fd54bad1
|
[] |
no_license
|
benearnthof/FFFT
|
dcde3cc0727a9d6a896c2575433750473bffb142
|
4881f744bb83f9187299caf75e9d09f2ad06bbae
|
refs/heads/master
| 2023-02-09T20:27:04.229070
| 2021-01-04T15:57:14
| 2021-01-04T15:57:14
| 261,377,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,602
|
r
|
ki_simulation.R
|
# lets get done with the confidence intervals for the simulation, how hard can it be?
# if the checkbox KI? is ticked we simply run the simulation again with the "optimal" and "worst case" parameters.
# The fitdistrplus package allows us to fit not only the parameters to the data, but also obtains KIs while doing so.
#
library(fitdistrplus)
?fitdistcens
# need to hand the data over in a censdata dataframe with left and right columns
#
# hand the params over to simulation and just run calcerrorsfast with the new params in each step
# censdata = a dataframe of two columns respectively named left and right, describing each observed value as
# an interval.
# The left column contains either NA for left censored observations, the left bound of the interval for
# interval censored observations, or the observed value for non-censored observations.
# The right column contains either NA for right censored observations, the right bound of the interval
# for interval censored observations, or the observed value for non-censored observations.
# lets write a function to construct such a censdata frame out of failure and suspension data.
failures <- scan("I:\\airworthiness.p\\01_Mitarbeiter\\Benedikt Arnthof\\WeibullApp\\Failure1.csv")
suspensions <- scan("I:\\airworthiness.p\\01_Mitarbeiter\\Benedikt Arnthof\\WeibullApp\\Suspensions_Iststand.csv")
get_censdata <- function(fal, sus) {
ret <- data.frame(left = c(fal, sus), right = c(fal, rep(NA, times = length(sus))))
}
test <- get_censdata(failures, suspensions)
res <- fitdistcens(test, distr = "weibull")
bootstrapsample1 <- test[sample(nrow(test), replace = TRUE),]
bootres <- fitdistcens(bootstrapsample1, dist = "weibull")
# one can then use the quantile method for an object of class "fitdistcens"
# to return bootstrap confidence intervals
quantile(res)
boot <- bootdistcens(res)
plot(boot)
summary(boot)
# the weibullr package calculates confidence bounds by default.
?WeibullR::LRbounds()
mframe <- mleframe(failures, s = suspensions)
LRbounds(mframe)
WeibullR::MRRw2p(failures, suspensions, bounds = TRUE, show = TRUE)
# use MRRw2p to calculate bounds, then use b50 life bounds as parameter for KI
?MRRw2p
test <- MRRw2p(failures, suspensions, bounds = TRUE)[[2]][17,]
dta <- test[[2]]
params <- dta[17, ]
params
# the structure of dta is always the same since we use the quickfit method of weibullR
# the params in the 17th row are the lower and upper bounds
# do this quick estimation and save the expected errors in corresponding vectors
# then return them and or add them to the plot as lines. simple as that
#
|
0b35719f4d4aea43388ff12f6bd84a7bbbe2b47a
|
241c99c7dbd3eba623d72ffdfdaa1a652be1bafb
|
/R/NP-DanielTrend.R
|
1ddb468498d7b1c35914a0cd68491fe263ad8c33
|
[] |
no_license
|
JacintoCC/rNPBST
|
29b64f3230f5fc905f358f65e2b176e29c517324
|
633e7d2ba56707c721c4e6f6e1cfa3c67a79c703
|
refs/heads/master
| 2022-06-07T22:32:56.036084
| 2022-04-12T15:31:45
| 2022-04-12T15:31:45
| 80,525,887
| 11
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,991
|
r
|
NP-DanielTrend.R
|
#' @title Daniel Trend test for bivariated samples
#'
#' @export
#' @description This function performs the Daniel Trend test
#' @param matrix Matrix of data
#' @examples
#' x <- 1:10
#' m <- matrix(c(x, 2*x+rnorm(length(x))), ncol = 2)
#' danielTrend.test(m)
#' @return A list with pvalues for alternative hypothesis, statistics,
#' method and data name
danielTrend.test <- function(matrix){
# Check Conditions
checkBivariateConditions(matrix)
n <- nrow(matrix)
# Ascending rank for both columns
rank.first <- rank(matrix[ ,1])
rank.second <- rank(matrix[ ,2])
# Compute sum D statistic
sumD <- sum( (rank.first - rank.second)^2 )
# Compute R statistic
R <- 1 - (6 * sumD) / (n * (n*n - 1))
# Compute P-Values
if(n <= 10){
pvalue <- computeExactProbability(SpearmanExactTable, n, R)
}
else if(n <= 30){
pvalue <- computeAproximatedProbability(SpearmanQuantileTable, n, R)
}
# Compute asymptotic p-value
Z <- R * sqrt(n-1)
positive.dependence.pvalue <- 1 - stats::pnorm(Z)
negative.dependence.pvalue <- stats::pnorm(Z)
no.dependence.pvalue <- 2 * min(positive.dependence.pvalue,
negative.dependence.pvalue)
statistic <- c(D = sumD, R = R, Z = Z)
if(n <= 30){
pvalues <- c("pvalue" = pvalue,
"Positive Dependence pvalue" = positive.dependence.pvalue,
"Negative Dependence pvalue" = negative.dependence.pvalue,
"No Dependence pvalue" = no.dependence.pvalue)
}
else{
pvalues <- c("Positive Dependence pvalue" = positive.dependence.pvalue,
"Negative Dependence pvalue" = negative.dependence.pvalue,
"No Dependence pvalue" = no.dependence.pvalue)
}
htest <- list(data.name = deparse(substitute(matrix)),
statistic = statistic, p.value = pvalues,
method = "Daniel Trend")
return(htest)
}
|
afe34064a98ce666b559132717071f91745a7b13
|
cfb424fefe1cf296c16923e21bd025d14fd306ae
|
/R/eqSS.R
|
34af41cf965a3db8d96ccced0328e725164c1890
|
[] |
no_license
|
aalfons/laeken
|
50f196461589ff83c804abdab2c58eb332cf763b
|
31db96b2f190aa200d5a9bf3cde1b59517d0b56b
|
refs/heads/master
| 2021-11-24T01:18:01.259305
| 2021-11-23T12:47:48
| 2021-11-23T12:47:48
| 23,109,818
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,669
|
r
|
eqSS.R
|
# ---------------------------------------
# Author: Andreas Alfons
# Vienna University of Technology
# ---------------------------------------
# TODO: error handling
#' Equivalized household size
#'
#' Compute the equivalized household size according to the modified OECD scale
#' adopted in 1994.
#'
#' @param hid if \code{data=NULL}, a vector containing the household ID.
#' Otherwise a character string specifying the column of \code{data} that
#' contains the household ID.
#' @param age if \code{data=NULL}, a vector containing the age of the
#' individuals. Otherwise a character string specifying the column of
#' \code{data} that contains the age.
#' @param year if \code{data=NULL}, a vector containing the year of the survey.
#' Otherwise a character string specifying the column of \code{data} that
#' contains the year.
#' @param data a \code{data.frame} containing EU-SILC survey data, or
#' \code{NULL}.
#'
#' @return A numeric vector containing the equivalized household size for every
#' observation in \code{data}.
#'
#' @author Andreas Alfons
#'
#' @seealso \code{\link{eqInc}}
#'
#' @references Working group on Statistics on Income and Living Conditions
#' (2004) Common cross-sectional EU indicators based on EU-SILC; the gender pay
#' gap. \emph{EU-SILC 131-rev/04}, Eurostat.
#'
#' @keywords survey
#'
#' @examples
#' data(eusilc)
#'
#' # calculate equivalized household size
#' eqSS <- eqSS("db030", "age", data=eusilc)
#'
#' # combine with household ID and household size
#' tmp <- cbind(eusilc[, c("db030", "hsize")], eqSS)
#'
#' # show the first 8 rows
#' head(tmp, 8)
#'
#' @export
eqSS <- function(hid, age, year = NULL, data = NULL) {
## initializations
if(is.null(data)) {
data <- data.frame(hid=hid)
hid <- "hid"
if(!is.null(year)) {
data <- cbind(year=year, data)
year <- "year"
}
} else {
age <- data[, age]
data <- data[, c(year, hid), drop=FALSE]
}
## calculations
i <- if(is.null(year)) 2 else 3
tmp <- as.data.frame(table(data)) # number of household members
hm14p <- as.data.frame(table(data[age >= 14,]))[, i] # at least 14 years
hm13m <- tmp[, i] - hm14p # younger than 14
tmp[, i] <- 1 + 0.5*(hm14p-1) + 0.3*hm13m # eqSS for househoulds
names(tmp) <- c(year, hid, ".eqSS")
data <- cbind(data, .ID=1:nrow(data)) # add ID to original data
data <- merge(data, tmp, sort=FALSE) # merge with original data set
## order according to original data and extract eqSS
data$.eqSS[order(data$.ID)]
}
|
e067a6be3da166ba962c464ed76d087b40a4cc13
|
34b2fa58e5093355db849964f500e202b2bcac3f
|
/plot3.R
|
d7896ff3805a310a9569db0e75e4f2c0aa52465e
|
[] |
no_license
|
gvillemsr/ExData_Plotting1
|
3e4558859f08e0f8fb211f9e2d9a00d1d34dfe70
|
da08f2cd17d57dfc7052c7a7f159706a8fc0ba6c
|
refs/heads/master
| 2021-01-18T06:19:21.523840
| 2015-09-10T20:05:52
| 2015-09-10T20:05:52
| 20,627,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 875
|
r
|
plot3.R
|
eleccons<-read.table("household_power_consumption.txt",
header=TRUE, sep=";",skip=66636, nrows=2880)
#Just reading the two days worth of data
summary(eleccons)
tit<-read.table("household_power_consumption.txt", header=TRUE, sep=";", nrows=1)
get.names<-names(tit)
names(eleccons)<-get.names
summary(eleccons)
eleccons$t <- strptime(paste(eleccons$Date, eleccons$Time), "%d/%m/%Y %H:%M:%S")
summary(eleccons)
trange<-range(eleccons$t)
yrange<-range(eleccons[,7])
png(file = "plot3.png", width=480,height=480)
plot(trange,yrange,type="n",xlab="",ylab="Electrical sub metering")
lines(eleccons[,10],eleccons[,7],col="black")
lines(eleccons[,10],eleccons[,8],col="red")
lines(eleccons[,10],eleccons[,9],col="blue")
legend("topright", lty = 1, col = c("black", "red","blue"),
legend = c("Sub_Metering_1", "Sub_Metering_2","Sub_Metering_3"))
dev.off()
|
e28719377e6d19bc289a20e802489bf46f6e11bd
|
715853d1dd2e10d62d28b1b9d725853d7fd51929
|
/R/csortb-package.r
|
8662eb2f007c649623db0b987e39afb4bfad2672
|
[] |
no_license
|
mryap/rtbdata
|
acaf87431177fbeedcf5186788a745e3b34d630b
|
51a8fa511b416267c5cc53ea192fa4b3e27f49be
|
refs/heads/master
| 2021-04-25T22:08:08.132315
| 2017-11-04T16:56:27
| 2017-11-04T16:56:27
| 109,512,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
csortb-package.r
|
#' csortb.
#'
#' Residential Tenancies Board (RTB) Average Monthly Rent Report by
#' Property Type, Location, Number of Bedrooms and Year
#' 2008 to 2016 hosted by Ireland's Central Statistics Offices
#'
#'
#' @name csortb
#' @docType package
NULL
|
c0551a9ef956f21f445bd3fa275ea2cddd9c40ac
|
e64a9c27e7c89fad358cb86861796316e3f143c0
|
/01-R_basic/01-data_structure/07-data_frame.R
|
6b9ab06f70ad1acd5754b320625fa70a8bfb5b46
|
[] |
no_license
|
gcl-sg/rstudio-demo
|
129abacc9a37672c5b7aee5abf1e6de40d968d79
|
2eae606bcf5079f4358870a91e7606844fc02e47
|
refs/heads/master
| 2021-01-20T03:34:59.478479
| 2017-05-26T08:03:25
| 2017-05-26T08:03:25
| 89,558,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 487
|
r
|
07-data_frame.R
|
# data frame
# 存储表格数据(tabular data)
# 视为各元素长度相同的列表
# 每个元素代表一列数据
# 每个元素的长度代表行数
# 元素类型可以不同
df <- data.frame(id = c(1, 2, 3, 4), name = c("a", "b", "c", "d"), gender = c(TRUE, TRUE, FALSE, FALSE))
l <- list(id = c(1, 2, 3, 4), name = c("a", "b", "c", "d"), gender = c(TRUE, TRUE, FALSE, FALSE))
nrow(df)
ncol(df)
df2 <- data.frame(id = c(1,2,3,4), score = c(80,86,90, 100))
data.matrix(df2)
|
3cc6cd2692bac54f353bf6ef9aba8c01de2f0e87
|
317360cc997480b99ee0b5f29d4f682d5c5493e5
|
/R/random_forest.R
|
bf41dbc440c9ab96e4b977144ca61a06e86921e4
|
[] |
no_license
|
StevenBoys/Ensemblelearn
|
408fcd88b533bbcf4e57de6518a10dd8cf5bee7c
|
db4c1da875020970597d38f607f5af0e34a70f8b
|
refs/heads/master
| 2020-09-05T12:10:11.829363
| 2019-12-08T04:26:06
| 2019-12-08T04:26:06
| 220,099,511
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,613
|
r
|
random_forest.R
|
#' Function that builds weak model on Decision Tree for regression task
#'
#' @param x - input independent variables x for the training
#' @param y - input dependent variable y for the traisning
#'
#' @return The trained Decision Tree model based on the input data.
#' @export
#'
#' @examples
#' x <- matrix(rnorm(4000), 200, 20)
#' beta <- rnorm(5)
#' y <- x[, 1:length(beta)] %*% beta + rnorm(200)
#' dt_reg(x, y)
dt_reg <- function(x, y){
rpart_mod <- rpart(y ~ x, method="anova")
return(rpart_mod)
}
#' Function that implement one weak model of Random Forest based on the resample of the features
#'
#' @param data - list of data that fweak need
#' @param fweak - function that generates estimate from weak model based on input, its default value is dt_reg
#' @param fea_len - the number of features we sample each time, its default value is null
#'
#' @return A trained model function based on the one implementation of the weak model, whose input is the independent variables.
#' @export
#'
#' @examples
#' x <- matrix(rnorm(4000), 200, 20)
#' beta <- rnorm(5)
#' y <- x[, 1:length(beta)] %*% beta + rnorm(200)
#' data <- list(x = x, y = y)
#' randomforest_fit1(data)
randomforest_fit1 <- function(data, fweak = dt_reg, fea_len = NULL){
# Set the number of features we sample each time if it's initial value is null
if(is.null(fea_len)){
fea_len <- floor(ncol(data$x)/2)
}else if(floor(fea_len) <= 0){
# Check the compatibility of fea_len
stop("The value of the floor of fea_len should be positive.")
}
# Check the compatibility of x.
if(ncol(data$x) < 2){
stop("The number of features on x should be bigger than 1.")
}
# Get the resample index
index_resample <- sample(1:ncol(data$x), fea_len, replace = F)
# Get the part of data resampled
data$x <- data$x[, index_resample]
# Fit the weak model based on the resampled data
rpart_mod <- fit_model(fweak, T, data)
# Construct the trained model based on the rpart_mod
model_train <- function(x){
predict(rpart_mod, x[, index_resample])
}
# Returen the trained function
model_train
}
#' Function that implement the algorithm of Ensemble learning in Random Forest
#'
#' @param data - list of data that fweak need
#' @param model_num - the number of weak models you want to train and combine
#' @param fweak - function that generates estimate from weak model based on input
#'
#' @return A list of
#' \item{fitted_value}{ - fitted value on the training dataset based on the trained model}
#' \item{model_train}{ - a list of trained weak models}
#' @export
#'
#' @examples
#' x <- matrix(rnorm(4000), 200, 20)
#' beta <- rnorm(5)
#' y <- x[, 1:length(beta)] %*% beta + rnorm(200)
#' data <- list(x = x, y = y)
#' model_num <- 100
#' Randomforest(data, model_num)
Randomforest <- function(data, model_num, fweak = dt_reg){
# Check the compatibility of model_num
if(model_num <= 0){
stop("The value of model_num should be positive.")
}
# Initialize multi_est for storing the fitting results of weak models
model_train <- list()
length(model_train) <- model_num
# Fit the weak models
for(i in 1:model_num){
model_train[[i]] <- randomforest_fit1(data = data, fweak = fweak)
}
# Get the multiple estimation based on the trained models
data$x <- as.data.frame(data$x)
multi_est <- prediction(data$x, model_train, parallel = T)
# Combine the multiple estimation
comb_out <- Comb_parallel(multi_est, rep(1, model_num))
# Return the fitted values on training data and the list of weak models
list(fitted_values = comb_out, model_train = model_train)
}
|
13c02687e70da83641808c1561a645089799df7d
|
76be26397cc5cec4902b7a001abf9eaf1095df19
|
/R/install_packages.R
|
0357c6ec0abcdb2d4fa8768e26806a9cc91c3471
|
[] |
no_license
|
pfistfl/mlr-benchmark-docker
|
632ef31ad7a60c069d8d19ec44774533593f3d4d
|
3578281fccf6a60320883f316825e4b52a018d9a
|
refs/heads/master
| 2020-03-24T23:46:58.889600
| 2018-08-01T14:26:43
| 2018-08-01T14:26:43
| 143,154,002
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,055
|
r
|
install_packages.R
|
## Setting up the Benchmark
# 1)
# Install required R-Packages for the benchmark (add yours here)
# install.packages("devtools")
# install latest mlr version
install.packages("devtools")
# Required for benchmark
install.packages("farff")
# install.packages("mlrMBO")
# install.packages("DiceKriging")
# install.packages("randomForest")
# install.packages("parallelMap")
# install.packages("batchtools")
# install.packages("stringi")
# install.packages("dplyr")
# install.packages("brew")
# # Semimetric packages
# install.packages("quadprog")
# install_version("proxy", version = "0.4-16")
# install_version("fdasrvf", version = "1.6.0")
# install.packages("dtw")
# install.packages("rucrdtw")
# # classiFunc
# install_git("git://github.com/pfistfl/classiFunc.git", branch = "fix_metricChoices_names")
# # install current fda_lte branch
# install_git("git://github.com/pfistfl/mlr.git", branch = "classiFunc")
# 2) Edit the file path in local_gitignore so it points to the data folder
# 3) Edit main.R and all other files according to requirements
|
890487f760847b0795132ff493873c767a3aff59
|
9c6e923123835d093e0a32d05014dbd1c4d21d7d
|
/R/getTrainHistory.character.R
|
e21936d017dbcf4da39bbb54a72e16dfd28a888d
|
[] |
no_license
|
MarkusBonsch/mxLSTM
|
bb80ac8e683ee9c5bbd03444827051bbef49af87
|
7a4caa4171815a284bb7a8c01096e01f61a00d6f
|
refs/heads/master
| 2021-09-09T10:28:22.704051
| 2018-03-15T06:50:53
| 2018-03-15T06:50:53
| 110,008,435
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 817
|
r
|
getTrainHistory.character.R
|
#' @title getTrainHistory.character
#' @description returns the logged performance metrics of an stored model object.
#' @param model the path to a model that has been saved for example with \code{\link{saveLstmModel}}
#' @return data.frame with train and eval performance
#' @export getTrainHistory.character
#'
getTrainHistory.character <- function(model){
dir <- model
if(!dir.exists(dir)) stop("Input is not a valid directory")
if(!file.exists(file.path(dir, "modelType.txt"))){
stop("Input directory is not a valid model directory")
}
modelType <- readLines(file.path(dir, "modelType.txt"))
if(modelType == "mxLSTM"){
loader <- loadLstmModel
} else {
stop("modelType not supported")
}
model <- loader(dir)
return(getTrainHistory(model))
}
|
cb7e66b1dc3eb8104eda77b8696047f9338ebb0e
|
cb2e9f97913785f7d79bf1c6f6fea93298b19cf2
|
/man/SegmentCGH.Rd
|
50c1850f693061996f0663edc93bd61ed557b186
|
[] |
no_license
|
brian-bot/rCGH
|
5aa6d08ea708f94c8e70e90a6549b919637819d8
|
4c27a21d4d84a5a45168369281ab6d5fe85df705
|
refs/heads/master
| 2016-09-07T01:52:14.147611
| 2015-02-03T19:50:43
| 2015-02-03T19:50:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,495
|
rd
|
SegmentCGH.Rd
|
\name{SegmentCGH}
\alias{SegmentCGH}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
SegmentCGH(object, Smooth = TRUE, UndoSD = NULL, minMarks = 8)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
%% ~~Describe \code{object} here~~
}
\item{Smooth}{
%% ~~Describe \code{Smooth} here~~
}
\item{UndoSD}{
%% ~~Describe \code{UndoSD} here~~
}
\item{minMarks}{
%% ~~Describe \code{minMarks} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (object, Smooth = TRUE, UndoSD = NULL, minMarks = 8)
{
cnSet <- getCNset(object)
cnSet <- cnSet[order(cnSet$ChrNum, cnSet$ChrStart), ]
params = getParam(object)
if (is.null(UndoSD))
params$UndoSD <- 0.25 + sqrt(max(getParam(object3)$SigmaSq))
else params$UndoSD <- UndoSD
L2R <- cnSet$Log2Ratio
Chr <- cnSet$ChrNum
Pos <- cnSet$ChrStart
sampleName <- getInfo(object, "sampleName")
if (is.na(sampleName))
sampleName <- "sample_x"
cat("Computing standard segmentation...\n")
segTable <- .computeSegmentation(L2R, Chr, Pos, sampleName,
params, Smooth)
segTable <- .smoothSeg(segTable, minMarks)
segTable <- .computeMedSegm(segTable, L2R)
segTable <- .mergeLevels(segTable)
probeValues.left <- .probeSegValue(segTable, use.medians = TRUE)
cat("Number of segments:", nrow(segTable), "\n")
object@param <- params
object@segTable = segTable
object@cnSet = cbind.data.frame(cnSet, Segm = probeValues.left)
return(object)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
9a7e56e106913b91a23481b56f09b78f743a77ba
|
9668d4d06dec8b4b1349961168a811806f0ca8b5
|
/plot2.R
|
c39949a855336e08e40a9ec1be41f96834b6264a
|
[] |
no_license
|
pcharala/ExData_Plotting1
|
9406318a5a7bbbf3841c9d8e95e4b1c97f7c572a
|
708e97cf4dd235c4b9a361321273eace10c7c7bb
|
refs/heads/master
| 2021-01-24T02:38:41.235360
| 2014-06-06T22:01:57
| 2014-06-06T22:01:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
plot2.R
|
# load necessary subset of 'Household Power Consumption' data
data <- read.table(file='household_power_consumption.txt',header=T,sep=";",quote="",na.strings="?")
idx <- ((data$Date == '1/2/2007') | (data$Date == '2/2/2007'))
data <- data[idx,]
# open png device
png(file='plot2.png', bg = 'transparent')
# draw plot
datetime <- strptime(paste(data$Date,data$Time),format='%d/%m/%Y %H:%M:%S')
plot(datetime,data$Global_active_power,typ='l', xlab ='', ylab = 'Global Active Power (kilowatts)')
# close png device
dev.off()
|
fb000a9bd1a5a584052c6569064861e9348d6d43
|
f9d607dd79d2e3a1da2b32a319726cbf4ef36f80
|
/man/update_quiz.Rd
|
e8450e72cf3d6a45512a66336ed2b68d465a2f60
|
[
"MIT"
] |
permissive
|
stibu81/WordBox
|
4652f7e0177a4ed421e5eccc0bd3d97727135509
|
f9336ca69d6128caaaf7f9fc980f86b63f9ca0f2
|
refs/heads/master
| 2022-12-26T05:01:37.754675
| 2022-12-10T12:07:10
| 2022-12-10T12:07:10
| 205,691,037
| 6
| 0
|
NOASSERTION
| 2020-11-01T17:04:31
| 2019-09-01T14:51:42
|
R
|
UTF-8
|
R
| false
| true
| 1,180
|
rd
|
update_quiz.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mark_words.R
\name{update_quiz}
\alias{update_quiz}
\title{Update Quiz After Question Was Answered}
\usage{
update_quiz(question, quiz, wl, success)
}
\arguments{
\item{question}{the \code{\link{wordquestion}} object that
has been quizzed.}
\item{quiz}{the \code{\link{wordquiz}} from which the question
was taken and which will be modified.}
\item{wl}{the \code{\link{wordlist}}, on which the quiz is
based.}
\item{success}{logical indicating whether the question was
answered correctly or not.}
}
\description{
After a question from a quiz was answered, the
\code{wordquiz} object must be updated. This function
performs the update depending on the success of the answer
and the type of the quiz.
}
\details{
If an wrong answer was given, the \code{wordquiz} object
is returned unchanged. If the answer was correct, the
word that has been quizzed is removed from the quiz.
If the quiz type is \code{"newwords"}, a word is only
removed from the quiz once it has reached a count of 2.
Also, when a word is removed, the weights are adapted,
such that an additional word is included into the quiz.
}
|
c8b380eac96719f04b6228966dd8db9bd8f21207
|
b68f34133c24ce1bff6f5de2aa9da15ad839a2b7
|
/rjmcmc/rjmcmc.R
|
3eb2d58674f0c590999130399eb3ab759207cae0
|
[] |
no_license
|
bob-skowron/SU19-Independent-Study
|
16d7bbd3fda3bf3fa424696faa7e7a3ce42181b7
|
54f4bcf986d798c95ab229e4e2548961f13c2592
|
refs/heads/master
| 2020-04-25T01:54:07.740363
| 2019-08-04T12:33:54
| 2019-08-04T12:33:54
| 172,421,661
| 0
| 1
| null | 2019-07-18T22:34:57
| 2019-02-25T02:41:50
|
Python
|
UTF-8
|
R
| false
| false
| 7,285
|
r
|
rjmcmc.R
|
###########################################################################################
# Author: Nick Syring
# Contact: nasyrin@gmail.com, https://github.com/nasyring
# Title: R codes for Bayesian dirichlet/gaussian regression, posterior sampling via RJ-MCMC
###########################################################################################
# Directions for use:
#
# 1. Install and load the packages below: MCMCpack.
# 2. Run all the codes. Output from a previous run (seed set at 54321) is at the bottom.
#
# Necessary packages
library(MCMCpack)
set.seed(54321)
# True regression function
mean.fun <- function(X) 0.2*X[,1] + 0.4*X[,3] + 0.4*X[,5]
# The design matrix is a set of fake returns
x.sample <- function(n, size=6){
return(matrix(rnorm(n*size,0.08,.02),n,size))
}
# Likelihood ratio, used in MCMC sampling
lr_func <- function(X,Y,betas1,betas2){
l.old = sum(dnorm(Y,X%*%matrix(betas1,length(betas1),1),sigma, log = TRUE))
l.new = sum(dnorm(Y,X%*%matrix(betas2,length(betas2),1),sigma, log = TRUE))
lr = exp(l.new-l.old)
return(lr)
}
# Metropolis Hastings steps for beta, given selection
mh_beta <- function( dprop, rprop, X,Y,betas) {
beta_prop <- betas
where.zeroes <- which(betas==0)
len.zero <- length(where.zeroes)
if(len.zero>0){
betas.nonzero <-betas[-where.zeroes]
}else {
betas.nonzero <-betas
}
u <- rprop(betas.nonzero)
if(len.zero>0){
beta_prop[-where.zeroes]<-u
}else {
beta_prop<-u
}
r <- log(lr_func(X,Y,betas,beta_prop))
if(len.zero>0){
r <-r+log(dprop(beta_prop[-where.zeroes] , u))-log(dprop(u, beta_prop[-where.zeroes]))
}else {
r <-r+log(dprop(beta_prop , u))-log(dprop(u, beta_prop))
}
R <- min(exp(r), 1)
if(runif(1) <= R & min(beta_prop)>.001) {
betas <- beta_prop
}
return(betas)
}
# MH proposal distribution
dprop <- function(x,theta){
theta<-ifelse(theta<.01,.01,theta)
return(ddirichlet(x, 1*theta))
}
rprop <- function(theta){
theta<-ifelse(theta<.01,.01,theta)
return(rdirichlet(1, 1*theta))
}
#Looping rx Sweeps through the RJ-MCMC sampler
crsp.data <- read.csv("../data/1985-1991/spx-combined-data.csv", header = TRUE)
Y <- crsp.data[,1]
X <- as.matrix(crsp.data[,2:25])
colnames(X) <- NULL
incCash <- FALSE
# response samples
cashAsset <- matrix(0, nrow(X),1)
if(incCash){
X <- cbind(X, cashAsset)
}
n.MCMC <- 30000
n.burn <- 5000
n<-nrow(X)
#size <- ncol(X)
size<-ncol(X)
sigma <- .01
x.samples <- X*100 #x.sample(n,size)
# x.samples <- X*100
y.sample <- Y*100 #mean.fun(x.samples) + rnorm(n,0,sigma)
rx = n.MCMC + n.burn
betas_results = matrix(0,rx,size)
# Construction of an RJ-MCMC Sweep
# Initial parameter values
I = rep(TRUE,size)# initial selection
betas = rdirichlet(1,rep(30,size)) # initial beta
bir = 0
dea = 0
rel = 0
for(kx in 1:(n.burn+n.MCMC)){
betas<-round(betas,5)
# Metropolis-within Gibbs updates to Beta
betas <- mh_beta( dprop, rprop, x.samples,y.sample,betas)
# Birth, Death, and Relocate moves
mu <- 3 #if small then penalty on inclusion
b <- (1/3)*ifelse(sum(I)<size,1,0)*dpois(sum(I)+1,mu)/dpois(sum(I),mu)
d <- (1/3)*ifelse(sum(I)>1,1,0)*dpois(sum(I)-1,mu)/dpois(sum(I),mu)
r <- max(1-b-d,0)
move <- rmultinom(1,1,c(b,d,r))
# Birth
if(move[1] == 1){
#determine new covariate and reset beta
where.zero <- which(betas==0)
betas.new<-betas
if(length(where.zero)>0){
betas.nonzero <- betas[-where.zero]
u <- runif(1, 0, min(1,sum(I)*min(betas.nonzero)))
new.loc <- ifelse(sum(!I)==1,which(!I==TRUE),sample((1:size)[!I],1))
jump.prob <- b*(1/(size-sum(I)))*(1/(min(1,sum(I)*min(betas.nonzero))))*lr_func(x.samples,y.sample,betas,betas.new)
betas.new[-where.zero] <- betas[-where.zero]-u/sum(I)
}else {
u <- runif(1, 0, min(1,sum(I)*min(betas)))
new.loc <- ifelse(sum(!I)==1,which(!I==TRUE),sample((1:size)[!I],1))
jump.prob <- b*(1/(size-sum(I)))*(1/(min(1,sum(I)*min(betas))))*lr_func(x.samples,y.sample,betas,betas.new)
betas.new <- betas-u/sum(I)
}
betas.new[new.loc]<-u
accept <- runif(1)<jump.prob
if(accept & min(betas>.001)){
betas <- betas.new
I[new.loc]<-TRUE
bir<-bir+1
}
}
# Death
if(move[2] == 1){
#determine location of dying covariate and reset beta
death.loc <- ifelse(sum(I)==1,which(I==TRUE),sample((1:size)[I],1))
betas.new <- betas
where.zero <- which(betas==0)
if(length(where.zero)>0){
betas.new[-where.zero] <- betas[-where.zero]+betas[death.loc]*(1/(sum(I)-1))
}else {
betas.new <- betas+betas[death.loc]*(1/(sum(I)-1))
}
betas.new[death.loc]<-0
jump.prob <- min(d*(1/(size-sum(I)+1))*(1/(min(1,(sum(I)-1)*min(betas.new[-death.loc])))),1)*lr_func(x.samples,y.sample,betas,betas.new)
accept <- runif(1)<jump.prob
if(accept & min(betas>.001)){
betas<-betas.new
I[death.loc]<-FALSE
dea<-dea+1
}
}
# Relocate
if(move[3] == 1){
#determine locations to swap
swap <- sample((1:size),2)
betas.temp <- betas
betas.temp[swap[1]]<-betas.temp[swap[2]]
betas.temp[swap[2]]<-betas[swap[1]]
betas.new <- betas.temp
jump.prob <- r*(1/size)*(1/(size-1))*2*lr_func(x.samples,y.sample,betas,betas.new)
accept <- runif(1)<jump.prob
if(accept){
betas <- betas.new
I.temp <- I
I.temp[swap[1]]<-I.temp[swap[2]]
I.temp[swap[2]]<-I[swap[1]]
I <- I.temp
rel<-rel+1
}
}
betas_results[kx,] = betas
print(c(kx, dea, bir, rel, round(betas,3)))
#kx=kx+1
}
# Posterior results
colMeans(betas_results[(n.burn+1):(n.burn+n.MCMC),])
[1] 0.190360000 0.002166285 0.279306968 0.132237146 0.391993032 0.003946569 # note the average should nearly sum to 1
# basic regression
summary(lm(y.sample~x.samples-1))
Call:
lm(formula = y.sample ~ x.samples - 1)
Residuals:
Min 1Q Median 3Q Max
-0.0253110 -0.0064324 0.0000671 0.0059664 0.0220677
Coefficients:
Estimate Std. Error t value Pr(>|t|)
x.samples1 0.18046 0.04349 4.149 7.32e-05 ***
x.samples2 -0.03733 0.05587 -0.668 0.506
x.samples3 0.44219 0.04367 10.125 < 2e-16 ***
x.samples4 0.02360 0.04671 0.505 0.615
x.samples5 0.41385 0.04491 9.216 8.56e-15 ***
x.samples6 -0.03813 0.04868 -0.783 0.435
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 0.009886 on 94 degrees of freedom
Multiple R-squared: 0.9851, Adjusted R-squared: 0.9842
F-statistic: 1038 on 6 and 94 DF, p-value: < 2.2e-16
# need to think about how to compare these a bit more...
|
8ff66624deca364f42c2c98c079f98a896718aaa
|
4a90a8d09f1e4b8c79d3982e0f235c4d064e74c1
|
/R/plot-add-run.R
|
2a7e729ba2c1d145442d8e048c8b3f540402d6bc
|
[] |
no_license
|
bsnouffer/atlantistools
|
3696d68837ac61256305e68a5a2ce49ca9cf9e86
|
3bcdb27696f9b16ce76869371658e41ddce1d374
|
refs/heads/master
| 2021-01-15T11:14:33.996939
| 2016-08-31T21:13:51
| 2016-08-31T21:13:51
| 67,061,727
| 0
| 0
| null | 2016-08-31T18:09:12
| 2016-08-31T18:09:11
| null |
UTF-8
|
R
| false
| false
| 1,376
|
r
|
plot-add-run.R
|
#' Low level plotting function to apply color coding to plots based on run variable!
#'
#' This function can be used to add colour coding to plotting routines without color specification
#' e.g. \code{plot_ts()}, \code{plot_calibrate()} or \code{plot_physics()}. The colorcoding is
#' based on the column run so you need to apply \code{combine_runs} to your data first!
#' Please note that \code{plot_ts()} and \code{plot_calibrate()} only work if they are created
#' with non-age based data.
#'
#' @param plot ggplot2 object.
#' @return ggplot2 plot.
#' @export
#' @family low-level-plot functions
#'
#' @examples
#' dummy <- preprocess_setas
#' # Change output of dummy simulation!
#' dummy$biomass$atoutput <- dummy$biomass$atoutput * 1.5
#' dfs <- combine_runs(list(preprocess_setas, dummy), runs = c("setas", "dummy"))
#' plot <- plot_ts(dfs$biomass)
#' plot <- plot_add_run(plot)
#' plot
plot_add_run <- function(plot) {
# Check input
# Otherwise the error first occurs when the plot is drawn!
if (!("run" %in% names(plot$data))) stop("Variable run not found in plot!")
if (!ggplot2::is.ggplot(plot)) stop("Plot is no ggplot2 object.")
if ("colour" %in% names(plot$mapping)) {
warning("Color coding in plot already present. You probably don't want to overwrite this with model 'run'!")
}
plot <- plot + ggplot2::aes_(colour = ~run)
return(plot)
}
|
d1ffef3c99b3626562677f2232479941c02463bf
|
0cf0b5ff1a7e1b2913472ebd198059df0d77e028
|
/Exam.r
|
6790fe1f2786e72ac104e0c3d2836b1dec669bc2
|
[
"MIT"
] |
permissive
|
CSUBioinformatics1801/R_Bioinformatics_ZYZ
|
ab913182ba5253c43ad1b7fb90da042554dcff83
|
2b5eb49522aecf4bb5cf6e15f945e4c9e6b54cf2
|
refs/heads/main
| 2023-07-16T18:11:57.592795
| 2021-08-22T04:02:41
| 2021-08-22T04:02:41
| 358,190,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,666
|
r
|
Exam.r
|
cepath="C:/Users/stu/Desktop/exam"
if(getwd()!=cepath)setwd(cepath)
# -------------1-----------------------
#temp=read.csv2(file = "metadata.csv",header=TRUE,seq="\t")
pattern_str_C="Control Neurones"
pattern_str_D="Diabetic Neurones"
library(stringr)
judge_Cneu=!is.na(str_extract(pattern = pattern_str_C,metadata$Sample_organism))
judge_Dneu=!is.na(str_extract(pattern = pattern_str_D,metadata$Sample_organism))
C_sample_names=metadata$Sample_status[judge_Cneu]
D_sample_names=metadata$Sample_status[judge_Dneu]
C_sample=genematrix[C_sample_names]
D_sample=genematrix[D_sample_names]
T1_result=rbind(D_sample,C_sample)
rownames(T1_result)=genematrix$ENSEMBL
save(T1_result,file="C_D.csv")
# -------------2-----------------------
C_avg=apply(C_sample,1,mean)
D_avg=apply(D_sample,1,mean)
Fold_change=D_avg/C_avg
p_list=c()
for(i in 1:nrow(C_sample)){
p_list=append(p_list,t.test(C_sample[i,],D_sample[i,],var.equal=TRUE)$p.value)
}
T2_result=data.frame('Control_avg'=C_avg,'Diabetes_avg'=D_avg,'Fold_change'=Fold_change,'p_list'=p_list)
rownames(T2_result)=genematrix$ENSEMBL
save(T2_result,file="T2_result.csv")
# -------------3-----------------------
symbol_ID=bitr(genematrix$ENSEMBL,fromType = "ENSEMBL",toType = "SYMBOL",
OrgDb = org.Hs.eg.db,drop=FALSE)
T2_result$SYMBOL=symbol_ID$SYMBOL
# -------------4-----------------------
T2_result$Fold_change=log2(T2_result$Fold_change)
library(ggplot2)
cut_off_pvalue = 0.05
cut_off_logFC = log2(1/1.2)
# get meaningful expressions
T2_result$change = ifelse(T2_result$p_list < cut_off_pvalue & abs(T2_result$Fold_change) >= cut_off_logFC,
ifelse(T2_result$Fold_change> -cut_off_logFC ,'Up',
ifelse(T2_result$Fold_change < cut_off_logFC,'Down','Stable')),
'Stable')
# annotate gene labels
T2_result$delabel <- NA
library(stringr)
library(ggrepel)
for (i in 1:nrow(T2_result)){
if(T2_result$Fold_change[i]!="Stable"){
T2_result$delabel[i] =
str_extract_all(T2_result$SYMBOL[i],"([A-Z0-9]+)")[[1]][1]
}
}
# draw volcano graph
p=ggplot(
data=T2_result, aes(x=Fold_change, y=-log10(p_list),col=change)) +
geom_point(alpha=0.4, size=2.5) +
# set color
scale_color_manual(values=c("blue", "grey","red"))+
theme_minimal()+
# add annotation
geom_label_repel(
data = subset(T2_result, T2_result$p_list < 0.05 & abs(T2_result$Fold_change) >= cut_off_logFC),
aes(label = delabel),
size = 4,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.3, "lines"))+
# draw lines
geom_vline(xintercept=c(-cut_off_logFC, cut_off_logFC),lty=4, col="grey",lwd=0.8) +
geom_hline(yintercept=-log10(cut_off_pvalue),lty=4, col="grey",lwd=0.8)+
# draw labels
labs(x="log2(fold change)",
y="-log10(p-value)")+
theme_bw()+
theme(plot.title = element_text(hjust = 0.5),
legend.position="right",
legend.title = element_blank())
# save file
ggsave(p,filename = 'result.jpg',dpi = 300, width = 12,height = 9)
# -------------5-----------------------
dif_gene=T2_result$SYMBOL[T2_result$change!="Stable"]
if(!requireNamespace("clusterProfiler",quietly = TRUE))
install.packages("clusterProfiler")#this might be slow to be loaded
library(clusterProfiler)
if(!requireNamespace("org.Hs.eg.db",quietly = TRUE))
BiocManager::install("org.Hs.eg.db")#this might be slow to be loaded
library(org.Hs.eg.db)
ego=enrichGO(gene = dif_gene,
OrgDb='org.Hs.eg.db',
keyType = "SYMBOL",
ont = 'BP',
universe=T2_result$SYMBOL,
pvalueCutoff = 1,
pAdjustMethod = "none",
minGSSize = 10,
maxGSSize = 20,
readable = FALSE)#此处有bug,true会报错,需要反馈给clusterProfiler开发者
if(!requireNamespace("GOplot",quietly = TRUE))
install.packages("GOplot")
library(GOplot)
if(!requireNamespace("enrichplot",quietly = TRUE))
install.packages("enrichplot")
library(enrichplot)
library(DOSE)
library(ggnewscale)
# draw fancy graphs that I can't depict if I did it again
g1=dotplot(ego,showCategory=30)
ggsave(g1,filename = "dotplot.png",dpi = 600, width = 12,height = 9)
g2=heatplot(ego)# no fold change here or generate randomly
ggsave(g2,filename = "heatplot.png",dpi = 600, width = 12,height = 9)
g3=cnetplot(ego,categorySize="pvalue",circular = TRUE, colorEdge = TRUE)
ggsave(g2,filename = "cneplot.png",dpi = 600, width = 12,height = 9)
g4=emapplot(ego)
# cut redundancy
ego2=simplify(ego,cutoff=0.7,by="p.adjust",select_fun=min)
|
7f5bb022a3bb5110fcf4aa7de95fcba3b748d8e7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pollen/examples/pollen_season.Rd.R
|
f63893625c6f074aa0e73e2d4ef3da5ad30e9288
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 534
|
r
|
pollen_season.Rd.R
|
library(pollen)
### Name: pollen_season
### Title: A Pollen Season Function
### Aliases: pollen_season
### Keywords: pollen pollen, season
### ** Examples
data(pollen_count)
df <- subset(pollen_count, site=='Oz')
pollen_season(value=df$birch, date=df$date, method="95")
df2 <- subset(pollen_count, site=='Atlantis')
pollen_season(value=df2$alder, date=df2$date, method="95")
library('purrr')
pollen_count %>% split(., .$site) %>%
map_df(~pollen_season(value=.$hazel, date=.$date, method="95"), .id="site")
|
5822691e7b1b05d835a75309eb5b6c0b7223f0af
|
f8eb55c15aec611480ede47d4e15e5a6e472b4fa
|
/analysis/xxxx_rebalance_during_dd.R
|
b206803afeec0c6b94f0450219016e1b2d2c5132
|
[] |
no_license
|
nmaggiulli/of-dollars-and-data
|
a4fa71d6a21ce5dc346f7558179080b8e459aaca
|
ae2501dfc0b72d292314c179c83d18d6d4a66ec3
|
refs/heads/master
| 2023-08-17T03:39:03.133003
| 2023-08-11T02:08:32
| 2023-08-11T02:08:32
| 77,659,168
| 397
| 32
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,678
|
r
|
xxxx_rebalance_during_dd.R
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(scales)
library(readxl)
library(lubridate)
library(zoo)
library(ggrepel)
library(tidyverse)
folder_name <- "xxxx_rebalance_during_dd"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
#Bring in raw data
raw <- read.csv(paste0(importdir, "/xxxx_bond_stock_rets/bond_stock_monthly_rets.csv"),
skip = 7) %>%
rename(date = X,
ret_sp500 = USD,
ret_bond = USD.1) %>%
select(-`X.1`) %>%
filter(!is.na(ret_sp500)) %>%
mutate(date = as.Date(date, format = "%m/%d/%Y")) %>%
filter(date >= "1926-11-01")
# Do Growth of dollar
for(i in 1:nrow(raw)){
if(i == 1){
raw[i, "index_sp500"] <- 1
raw[i, "index_bond"] <- 1
} else{
raw[i, "index_sp500"] <- raw[(i-1), "index_sp500"] * (1 + raw[i, "ret_sp500"])
raw[i, "index_bond"] <- raw[(i-1), "index_bond"] * (1 + raw[i, "ret_bond"])
}
}
# Get all years
all_years <- sort(unique(year(raw$date)))
final_results <- data.frame()
build_data <- function(n_years, dd_limit, w_sp500){
n_sims <- length(all_years) - n_years
tmp_all_years <- all_years[1:n_sims]
counter <- 1
for(y in tmp_all_years){
print(y)
tmp <- raw %>%
filter(date >= paste0(y, "-11-01"),
date <= paste0(y+n_years, "-11-01"))
dd_sp500 <- tmp %>%
select(date, index_sp500) %>%
drawdown_path() %>%
rename(dd_sp500 = pct)
tmp <- tmp %>%
left_join(dd_sp500)
dd_rebal <- 0
for(i in 1:nrow(tmp)){
mt <- month(tmp[i, "date"])
curr_dd <- tmp[i, "dd_sp500"]
if(i == 1){
tmp[i, "rebal_sp500"] <- w_sp500
tmp[i, "rebal_bond"] <- (1-w_sp500)
tmp[i, "dd_rebal_sp500"] <- w_sp500
tmp[i, "dd_rebal_bond"] <- (1-w_sp500)
tmp[i, "dd_rebalance"] <- 0
} else{
if(curr_dd < dd_limit & dd_rebal == 0){
tmp[i, "dd_rebalance"] <- 1
tmp[i, "dd_rebal_sp500"] <- tmp[(i-1), "port_dd"] * w_sp500 * (1 + tmp[i, "ret_sp500"])
tmp[i, "dd_rebal_bond"] <- tmp[(i-1), "port_dd"] * (1-w_sp500) * (1 + tmp[i, "ret_bond"])
# Set dd rebalance timer to 1 until new ATH
dd_rebal <- 1
} else{
tmp[i, "dd_rebalance"] <- 0
tmp[i, "dd_rebal_sp500"] <- tmp[(i-1), "dd_rebal_sp500"] * (1 + tmp[i, "ret_sp500"])
tmp[i, "dd_rebal_bond"] <- tmp[(i-1), "dd_rebal_bond"] * (1 + tmp[i, "ret_bond"])
}
if(mt == 6){
tmp[i, "rebal_sp500"] <- tmp[(i-1), "port_rebal"] * w_sp500 * (1 + tmp[i, "ret_sp500"])
tmp[i, "rebal_bond"] <- tmp[(i-1), "port_rebal"] * (1-w_sp500) * (1 + tmp[i, "ret_bond"])
} else{
tmp[i, "rebal_sp500"] <- tmp[(i-1), "rebal_sp500"] * (1 + tmp[i, "ret_sp500"])
tmp[i, "rebal_bond"] <- tmp[(i-1), "rebal_bond"] * (1 + tmp[i, "ret_bond"])
}
}
# Wait for new ATH before resetting DD
if(curr_dd == 0){
dd_rebal <- 0
}
tmp[i, "port_rebal"] <- tmp[i, "rebal_sp500"] + tmp[i, "rebal_bond"]
tmp[i, "port_dd"] <- tmp[i, "dd_rebal_sp500"] + tmp[i, "dd_rebal_bond"]
tmp[i, "dd_premium"] <- tmp[i, "port_dd"]/tmp[i, "port_rebal"] - 1
}
final_results[counter, "start_year"] <- y
final_results[counter, "n_years"] <- n_years
final_results[counter, "dd_limit"] <- dd_limit
final_results[counter, "w_sp500"] <- w_sp500
final_results[counter, "final_dd_premium"] <- tmp[nrow(tmp), "dd_premium"]
final_results[counter, "n_dd_rebals"] <- sum(tmp$dd_rebalance, na.rm = TRUE)
counter <- counter + 1
}
return(final_results)
}
n_yr <- 10
final_results_10yr <- build_data(n_yr, -0.2, 0.8) %>%
bind_rows(build_data(n_yr, -0.3, 0.8))
n_yr <- 20
final_results_20yr <- build_data(n_yr, -0.2, 0.8) %>%
bind_rows(build_data(n_yr, -0.3, 0.8))
to_plot <- final_results_10yr %>%
bind_rows(final_results_20yr) %>%
mutate(dd_limit = case_when(
dd_limit == -0.2 ~ "20%+",
dd_limit == -0.3 ~ "30%+",
TRUE ~ "Error"
))
dd_limit_results <- to_plot %>%
group_by(n_years, dd_limit) %>%
summarise(n_obs = n(),
mean_dd_premium = mean(final_dd_premium),
mean_rebals = mean(n_dd_rebals)) %>%
ungroup()
file_path <- paste0(out_path, "/rebal_by_years.jpeg")
source_string <- paste0("Source: Returns 2.0, (OfDollarsAndData.com)")
note_string <- str_wrap(paste0("Note: Assumes annual rebalance is done each June."),
width = 85)
plot <- ggplot(to_plot, aes(x = final_dd_premium, fill = as.factor(dd_limit))) +
geom_density(alpha = 0.5) +
facet_wrap(~n_years) +
of_dollars_and_data_theme +
ggtitle(paste0("Drawdown Rebalance Premium by Time")) +
theme(legend.title = element_blank(),
legend.position = "bottom") +
labs(x = "Premium After 10 Years" , y = paste0("Frequency"),
caption = paste0(source_string, "\n", note_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
# ############################ End ################################## #
|
280ce0878fa6751624891a51cca076e39051afd2
|
c87d57b2a65caa9fed284f80ded4f81b117d846b
|
/Exercise/app.R
|
c07c4d8d09e3556ff9943db9bd0559325044dadb
|
[] |
no_license
|
avijandiran/My-shiny-dashboards
|
7bdb77b114bc92d542049627c6a63f2221a08d9f
|
3ce2059d4510e009bd95d74684461ae0c9b79b94
|
refs/heads/master
| 2020-05-05T06:34:00.910896
| 2019-06-13T09:56:39
| 2019-06-13T09:56:39
| 179,793,508
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,840
|
r
|
app.R
|
library(ggplot2)
library(shiny)
ui <- fluidPage(
titlePanel("Basic DataTable"),
fluidRow(
# Create a new Row in the UI for selectInputs
fluidRow(
column(4,
selectInput("man",
"Manufacturer:",
c("All",
unique(as.character(mpg$manufacturer))))
),
column(4,
selectInput("trans",
"Transmission:",
c("All",
unique(as.character(mpg$trans))))
),
column(4,
selectInput("cyl",
"Cylinders:",
c("All",
unique(as.character(mpg$cyl))))
)
),
# Create a new row for the table.
fluidRow (
column(12,
DT::dataTableOutput("table"))
),
fluidRow(column(12,
verbatimTextOutput("default")
))))
server <- function(input, output) {
# Filter data based on selections
output$table <- DT::renderDataTable(DT::datatable({
data <- mpg
if (input$man != "All") {
data <- data[data$manufacturer == input$man,]
}
if (input$cyl != "All") {
data <- data[data$cyl == input$cyl,]
}
if (input$trans != "All") {
data <- data[data$trans == input$trans,]
}
data1 <<- data
},
options = list(
searching = TRUE,
autoWidth=TRUE,
paging=FALSE,
scrollX=T,
scrollY=T,
scrollCollapse = T,
fixedHeader=TRUE)
))
observeEvent(input$table_rows_selected,{
a <- input$table_rows_selected
selected_df <- data1[a, ]
output$default <- renderPrint({selected_df})
})
}
shinyApp(ui=ui,server = server)
|
2be24568a8c3c94d758a87a4fb2a16a9e13ba3ef
|
8d119d59dedf7994c9b14a637fc069d3a3e0494c
|
/RPackageSource/man/ModelSummary.Rd
|
6e27f2efd86aa62af76c2fbc159ff560eaa84cf7
|
[] |
no_license
|
CBIIT/R-cometsAnalytics
|
3f77bf818050eefbcef4e5a74a5cdab0e17157dc
|
bc0da393319a7fc2ec53275f9545d24b982faabc
|
refs/heads/master
| 2023-08-24T04:34:16.888409
| 2023-08-04T20:28:26
| 2023-08-04T20:28:26
| 64,139,217
| 8
| 9
| null | 2023-09-13T18:43:04
| 2016-07-25T14:02:36
|
HTML
|
UTF-8
|
R
| false
| true
| 2,660
|
rd
|
ModelSummary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outputColumns.R
\name{ModelSummary}
\alias{ModelSummary}
\title{ModelSummary table}
\description{
The ModelSummary data frame contains one row of model
summary results for each exposure/outcome combination.
Depending on the model run and options specified,
all the below names may not appear in the data frame.
\itemize{
\item{\code{adjspec}}{ Original adjustment variables specified}
\item{\code{adjvars}}{ Adjustment variables included in the model}
\item{\code{adjvars.removed}}{ Adjustment variables removed from the model}
\item{\code{adj_uid}}{ Adjustment variable universal ids}
\item{\code{adj.r.squared}}{ Adjusted R-squared}
\item{\code{aic}}{ Akaike information criterion}
\item{\code{bic}}{ Bayesian information criterion}
\item{\code{cohort}}{ String passed into \code{\link{runModel}}}
\item{\code{converged}}{ TRUE or FALSE for model convergence}
\item{\code{deviance}}{ Deviance of the fitted model}
\item{\code{df.null}}{ NULL model degrees of freedom}
\item{\code{df.residual}}{ Residual degrees of freedom}
\item{\code{exposure}}{ Exposure variable}
\item{\code{exposure_uid}}{ Exposure universal id}
\item{\code{exposurespec}}{ Exposure variable}
\item{\code{loglik}}{ Log-likelihood of the fitted model}
\item{\code{message}}{ Error message produced from the modeling function}
\item{\code{model}}{ Model label from \code{\link{getModelData}}}
\item{\code{model_function}}{ Model function used in \code{\link{runModel}}}
\item{\code{model_number}}{ Model number used in \code{\link{runAllModels}}}
\item{\code{nobs}}{ Number of observations used}
\item{\code{null.deviance}}{ Deviance of the NULL model}
\item{\code{outcome}}{ Outcome variable}
\item{\code{outcomespec}}{ Outcome variable}
\item{\code{outcome_uid}}{ Outcome universal id}
\item{\code{run}}{ Run number that can be used to link with the
\code{\link{Effects}} table}
\item{\code{runmode}}{ "Batch" or "Interactive"}
\item{\code{r.squared}}{ R-squared, the fraction of variance explained by the model}
\item{\code{sigma}}{ Square root of the estimated variance of the random error}
\item{\code{stratavar}}{ Stratum variable(s)}
\item{\code{strata}}{ Stratum level(s)}
\item{\code{term}}{ Variable in the model}
\item{\code{wald.pvalue}}{ P-value from the Wald test of the exposure variable.
Note that this test may be a multi-df test if the
exposure is categorical.}
}
}
\details{
Missing values will appear if a model did not converge, produced an error,
or not run because of too many missing values in the outcome.
}
|
61f25ff26a60be0060f3f1e485c3f1199b445058
|
795be8da46ed5bbb3fe29898bd9e18ac9c068916
|
/AlgaePhyloAntibio.R
|
d5deb6a4f79f7183cb38839332268a9e2e7a895c
|
[] |
no_license
|
antropoteuthis/phylo_erythro
|
e39459d568808939275a998f13e0efe909129654
|
7b92fb61e90b0cfa4689da7fd94056989856c495
|
refs/heads/master
| 2020-03-18T04:16:24.045723
| 2018-07-24T17:27:21
| 2018-07-24T17:27:21
| 134,279,402
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,973
|
r
|
AlgaePhyloAntibio.R
|
##Microalgal Resistance Evolution##
rm(list=ls())
par(ask=F)
###Load libraries
library(reshape2)
library(dplyr)
library(tidyverse)
library(stringr)
library(vegan)
library(ape)
library(phangorn)
library(phytools)
library(geiger)
library(phylobase)
library(fields)
library(phylosignal)
library(picante)
library(geomorph)
library(FactoMineR)
library(factoextra)
library(phylopath)
library(Rphylip)
library("arbutus", lib.loc="/Library/Frameworks/R.framework/Versions/3.3/Resources/library")
###Set working directory
setwd("~/Dropbox/Microalgas-antibioticos/")
getwd()
###Load raw data
read.csv("base_de_datos/erythro_data.csv") -> raw_data
read.csv("base_de_datos/controls.csv") -> controls
rownames(controls) = controls$Species
controls = controls[,-1]
names(controls) = c("cGrowth","cEQY", "cChlA", "cETR")
#Extract the mean effect across concentrations
keyvars = c("Species","dGrowth", "dEQY", "dChlA", "dETR")
d_data = raw_data[,keyvars]
d_byspp = split(d_data, d_data$Species)
d_means = keyvars
d_SEs = keyvars
std.err <- function(a){sd(a)/sqrt(length(a))}
d_byspp$`Nannochloropsis gaditana` <- d_byspp$`Nannochloropsis gaditana`[-12,]
for(i in 1:length(d_byspp)){
d_byspp[[i]] %>% .[,-1] %>% apply(.,2,FUN=mean) %>% c(names(d_byspp[i]),.) %>% rbind(d_means,.) -> d_means
d_byspp[[i]] %>% .[,-1] %>% apply(.,2,FUN=std.err) %>% c(names(d_byspp[i]),.) %>% rbind(d_SEs,.) -> d_SEs
}
d_means = d_means[-1,]
d_SEs = d_SEs[-1,]
colnames(d_means)[1] = "Species"
colnames(d_SEs)[1] = "Species"
rownames(d_means) = d_means[,1]
rownames(d_SEs) = d_SEs[,1]
d_means = as.data.frame(d_means[,-1])
d_SEs = as.data.frame(d_SEs[,-1])
for(i in 1:ncol(d_means)){
d_means[,i] <- d_means[,i] %>% as.character() %>% as.numeric()
d_SEs[,i] <- d_SEs[,i] %>% as.character() %>% as.numeric()
}
#Extracting the differences between minimal and maximal Erythromycin concentrations
d_change = keyvars
d_changeSEs = keyvars
for(i in 1:length(d_byspp)){
DSP <- d_byspp[[i]]
dSPchange = c(names(d_byspp)[i],1:4)
for(j in 2:5){
dSPchange[j] <- round((mean(DSP[(nrow(DSP)-3):(nrow(DSP)),j])-mean(DSP[1:3,j]))/mean(DSP[,j]),3)
}
d_change <- rbind(d_change, dSPchange)
}
d_change <- d_change[-1,]
colnames(d_change) = str_replace_all(keyvars, "d", "m")
d_change = d_change[,-1]
d_change = as.data.frame(apply(d_change,2, as.numeric))
rownames(d_change) = sort(unique(d_data$Species))
endosym = c(3,2,2,1,2,2,2,1)
d_all <- cbind(controls,d_means, d_change, endosym)
###Load phylogenies
Nphylo <- read.tree("Fasta/Concatenado_nuclear/extra_species_conca_nuclear")
#plot(Nphylo); nodelabels(frame = "none", font = 0.5); tiplabels(); edgelabels(frame = "none", font = 0.2, offset = 3)
Nphylo <- drop.tip(Nphylo, c(15,13,8,7,3,2,1))
Nphylo = reroot(Nphylo, 8)
Nphylo = drop.tip(Nphylo, 8)
Nphylo$tip.label <- c("Isochrysis galbana", "Dunaliella salina", "Tetraselmis suecica", "Nannochloropsis gaditana", "Cylindrotheca closterium", "Phaeodactylum tricornutum", "Chaetoceros gracilis", "Amphidinium carterae")
Nphylo$edge.length[c(5,11)] = mean(Nphylo$edge.length)/1000
Nultra = chronos(Nphylo)
d_all_N = d_all[match(Nultra$tip.label, rownames(d_all)),]
SE_N = d_SEs[match(Nultra$tip.label, rownames(d_SEs)),]
PSBphylo <- read.tree("Fasta/psbA/psbA_tree_grupo externo")
#plot(PSBphylo); nodelabels(); tiplabels()
PSBphylo = reroot(PSBphylo, 9)
PSBphylo <- drop.tip(PSBphylo, 9)
PSBphylo$tip.label = c("Isochrysis galbana", "Dunaliella salina", "Tetraselmis suecica", "Nannochloropsis gaditana", "Cylindrotheca closterium", "Phaeodactylum tricornutum", "Chaetoceros gracilis", "Amphidinium carterae")[c(7,4,6,5,3,2,1,8)]
PSBphylo$edge.length[c(3,4,6)] = mean(PSBphylo$edge.length)/1000
PSBultra = chronos(PSBphylo)
d_all_PSB = d_all[match(PSBultra$tip.label, rownames(d_all)),]
SE_PSB = d_SEs[match(PSBultra$tip.label, rownames(d_SEs)),]
Ch16Sphylo <- read.tree("Fasta/16S_cloroplastos/16S_tree_grupo_externo")
#plot(Ch16Sphylo); nodelabels(); tiplabels()
Ch16Sphylo <- reroot(Ch16Sphylo, 8)
Ch16Sphylo <- drop.tip(Ch16Sphylo, 8)
Ch16Sphylo$tip.label = c("Isochrysis galbana", "Dunaliella salina", "Tetraselmis suecica", "Nannochloropsis gaditana", "Cylindrotheca closterium", "Phaeodactylum tricornutum", "Chaetoceros gracilis", "Amphidinium carterae")[c(6,5,7,4,1,2,3,8)]
Ch16Sphylo$edge.length[c(4,9)] = mean(Ch16Sphylo$edge.length)/1000
Ch16Sultra = chronos(Ch16Sphylo)
d_all_16S = d_all[match(Ch16Sultra$tip.label, rownames(d_all)),]
SE_16S = d_SEs[match(Ch16Sultra$tip.label, rownames(d_SEs)),]
Ch23Sphylo <- read.tree("Fasta/23s/23s_tree")
#plot(Ch23Sphylo); nodelabels(); tiplabels()
Ch23Sphylo <- reroot(Ch23Sphylo, 7)
Ch23Sphylo <- drop.tip(Ch23Sphylo, 7)
Ch23Sphylo$tip.label = c("Isochrysis galbana", "Dunaliella salina", "Tetraselmis suecica", "Nannochloropsis gaditana", "Cylindrotheca closterium", "Phaeodactylum tricornutum", "Chaetoceros gracilis", "Amphidinium carterae")[c(1,4,3,6,7,5,8,2)]
Ch23Sphylo$edge.length[c(12,4,5,6)] = mean(Ch23Sphylo$edge.length)/1000
Ch23Sultra = chronos(Ch23Sphylo)
d_all_23S = d_all[match(Ch23Sultra$tip.label, rownames(d_all)),]
SE_23S = d_SEs[match(Ch23Sultra$tip.label, rownames(d_SEs)),]
### Pack-up data and trees into iterable lists
treelist <- list(Nultra, PSBultra, Ch16Sultra, Ch23Sultra)
data_list = list(d_all_N, d_all_PSB, d_all_16S, d_all_23S)
SElist = list(SE_N, SE_PSB, SE_16S, SE_23S)
names(data_list) = c("Nuclear", "PSBA", "16S", "23S")
names(SElist) = c("Nuclear", "PSBA", "16S", "23S")
### Estimate RF distance between trees
RFdiffs = matrix(nrow = 4, ncol = 4)
colnames(RFdiffs) = names(data_list)
rownames(RFdiffs) = names(data_list)
BSdiffs = RFdiffs
for(i in 1:4){
tree_i <- treelist[[i]]
for(j in 1:4){
tree_j <- treelist[[j]]
RFdiffs[i,j] = treedist(tree_i, tree_j)[1]
BSdiffs[i,j] = treedist(tree_i, tree_j)[2]
}
}
#write.csv(RFdiffs, "deliverables/Tree_difference/RF_tree_distances.csv")
#write.csv(BSdiffs, "deliverables/Tree_difference/BranchScore_differences.csv")
###Model testing and Phylogenetic Signals
AICc_list = list()
for(tree in 1:4){
i_tree <- treelist[[tree]]
startree <- rescale(i_tree, "lambda", 0)
class(i_tree) = "phylo"
i_dat <- data_list[[tree]]
i_SE <- data.frame(rep(0, nrow(i_dat)), rep(0, nrow(i_dat)),rep(0, nrow(i_dat)),rep(0, nrow(i_dat)), SElist[[tree]],SElist[[tree]], rep(0, nrow(i_dat)))
names(i_SE)=names(i_dat)
print(names(data_list)[tree])
AIC_treei = matrix(nrow=ncol(i_dat), ncol = 7)
colnames(AIC_treei) = c("Variable", "Tree", "Best_model", "2nd_Best_model", "1_2LoglikRatio_Pvalue", "K", "p_K")
for(c in 1:ncol(i_dat)){
C = i_dat[,c]
names(C) = rownames(i_dat)
Cse = i_SE[,c]
names(Cse) = rownames(i_SE)
PSS_c <- phylosig(i_tree, C, se=Cse, test=T)
PSS_c <- c(PSS_c$K, PSS_c$P)
model_matrix = matrix("NA", nrow = 10, ncol = 2)
colnames(model_matrix) = c("aicc","lnL")
row.names(model_matrix) = c("BM", "white", "drift", "EB", "OU", "trend", "delta", "lambda", "kappa", "starBM")
for(j in 1:dim(model_matrix)[1]){
if(j==nrow(model_matrix)){
temp_model <- fitContinuous(startree, C, model="BM", SE = Cse)$opt
}
else{
temp_model <- fitContinuous(i_tree, C, model=row.names(model_matrix)[j], SE = Cse)$opt
}
model_matrix[j, "aicc"] <- temp_model$aicc
model_matrix[j, "lnL"] <- temp_model$lnL
}
model_matrix %>% as.data.frame() -> model_df
model_df$aicc <- as.numeric(as.character(model_df$aicc))
model_df$lnL <- as.numeric(as.character(model_df$lnL))
model_df <- model_df[order(model_df$aicc),]
print(model_df)
Pchi <- (2*(model_df$lnL[1] - model_df$lnL[2])) %>% pchisq(df=1, lower.tail = F)
print(Pchi)
string_c <- c(names(i_dat)[c], names(data_list)[tree], rownames(model_df)[1], rownames(model_df)[2], Pchi, PSS_c[1:2])
names(string_c) = colnames(AIC_treei)
AIC_treei[c,] <- string_c
}
AICc_list[[tree]] <- as.data.frame(AIC_treei)
print(AIC_treei)
}
fullModelTesting <- rbind(AICc_list[[1]], AICc_list[[2]], AICc_list[[3]], AICc_list[[4]])
#write.csv(fullModelTesting, "deliverables/model_support/BestModels_wSE_wLL.csv")
### Arbutus model adequacy
supportBM <- read.csv("deliverables/model_support/BestModels_wSE_wLL.csv", row.names = 1) %>% .[which(.$Best_model == "BM"),]
names(treelist) = unique(supportBM$Tree)
madlist=list()
for(tree in 1:length(treelist)){
i_tree <- treelist[[tree]]
class(i_tree) = "phylo"
i_dat <- data_list[[tree]] %>% .[,which(names(.) %in% supportBM$Variable[supportBM$Tree == names(treelist)[tree]])]
MADtable = as.data.frame(matrix(ncol=8, nrow=nrow(supportBM[which(supportBM$Tree==names(treelist)[tree]),])))
names(MADtable) = c("Variable", "Tree", "msig", "cvar", "svar", "sasr", "shgt", "dcfd")
for(c in 1:ncol(i_dat)){
MADtable$Variable[c] <- names(i_dat)[c]
MADtable$Tree[c] <- names(treelist)[tree]
C <- i_dat[,c]
names(C)=rownames(i_dat)
fitC <- fitContinuous(i_tree, C, model="BM")
UTC <- make_unit_tree(fitC)
picstat_data <- calculate_pic_stat(UTC)
sim <- simulate_char_unit(UTC)
picstat_sim <- calculate_pic_stat(sim)
compare_pic_stat(picstat_data, picstat_sim) %>% .$p.values -> MADtable[,c(3:8)]
}
madlist[[tree]] <- MADtable
}
madBMtable <- rbind(madlist[[1]], madlist[[2]], madlist[[3]], madlist[[4]])
#dGrowth in 23S, EB
dGr23S = d_all_23S$dGrowth
names(dGr23S) = rownames(d_all_23S)
fC_EB_dGr23S = fitContinuous(Ch23Sultra, dGr23S, model="EB")
UTEB_dGr23S = make_unit_tree(fC_EB_dGr23S)
picstatEB_dGr23S = calculate_pic_stat(UTEB_dGr23S)
simcharEB_dGr23S = simulate_char_unit(UTEB_dGr23S)
simpicstatEB_dGr23S = calculate_pic_stat(simcharEB_dGr23S)
compare_pic_stat(picstatEB_dGr23S, simpicstatEB_dGr23S) %T>% plot() %>% .$p.values %>% as.data.frame() -> MAD_EB_dGr23S
#mEQY in 23S, EB
mEQY23S = d_all_23S$mEQY
names(mEQY23S) = rownames(d_all_23S)
fC_EB_mEQY23S = fitContinuous(Ch23Sultra, mEQY23S, model="EB")
UTEB_mEQY23S = make_unit_tree(fC_EB_mEQY23S)
picstatEB_mEQY23S = calculate_pic_stat(UTEB_mEQY23S)
simcharEB_mEQY23S = simulate_char_unit(UTEB_mEQY23S)
simpicstatEB_mEQY23S = calculate_pic_stat(simcharEB_mEQY23S)
comparison = compare_pic_stat(picstatEB_mEQY23S, simpicstatEB_mEQY23S)
compare_pic_stat(picstatEB_mEQY23S, simpicstatEB_mEQY23S) %T>% plot() %>% .$p.values %>% as.data.frame() -> MAD_EB_mEQY23S
MADs = data.frame(MAD_EB_dGr23S, MAD_EB_mEQY23S)
names(MADs) = c("dGrowth", "mEQY")
###Contmaps
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]]
evol_vars <- which(AICc_list[[tree]]$Best_model != "WN")
i_dat = i_dat[,evol_vars]
print(names(data_list)[tree])
for(i in 1:ncol(i_dat)){
coloring = as.numeric(i_dat[,i])
names(coloring) = rownames(i_dat)
coloring = coloring[!is.na(coloring)]
obj_i <- contMap(i_tree,coloring, plot=F)
obj_i$cols[1:length(obj_i$cols)] = colorRampPalette(c("blue","red"), space="Lab")(length(obj_i$cols))
pdf(paste("deliverables/contMaps/", names(data_list)[tree], colnames(i_dat)[i],".pdf",sep=""),width=6,height=10)
plot(obj_i, fsize=0.5)
title(names(i_dat)[i], cex.main = 0.5)
dev.off()
}}
###Phylomorphospace scattergrams with contMaps in the diagonal
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]]
evol_vars <- which(AICc_list[[tree]]$Best_model != "WN")
i_dat = i_dat[,evol_vars]
print(names(data_list)[tree])
pdf(paste("deliverables/phylomorphograms/", names(data_list)[tree],".pdf",sep=""),width=24,height=16)
fancyTree(i_tree, type="scattergram",X=as.matrix(i_dat),control=list(spin=FALSE, fsize=0.1), label = 'horizontal')
dev.off()
}
###Phylomorphospaces in focus for 23S
vars = c("mGrowth", "mETR", "dChlA")
coloring = as.numeric(d_all_23S[,vars[3]])
names(coloring) = rownames(d_all_23S)
obj_i <- contMap(i_tree,coloring, plot=F)
obj_i$cols[1:length(obj_i$cols)] = colorRampPalette(c("blue","red"), space="Lab")(length(obj_i$cols))
pdf(paste("deliverables/chromophylomorphospaces/23S",vars[1], vars[2], vars[3], ".pdf",sep=""),width=16,height=12)
phylomorphospace(obj_i$tree, d_all_23S[,vars[1:2]], colors=obj_i$cols, label="horizontal", fsize=0.8)
title(main="X: mGrowth . Y: mETR. Color: dChla")
dev.off()
###Phylogenetic Generalized Least-Squares Regression Models
PGLSp_list = list()
logLs_list = list()
PGLStrunc_list = list()
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]]
print(names(data_list)[tree])
#evol_vars <- which(AICc_list[[tree]]$Best_model != "WN")
#i_dat = i_dat[,evol_vars]
PGLS_pvalues = matrix(nrow=ncol(i_dat), ncol=ncol(i_dat))
colnames(PGLS_pvalues)=rownames(PGLS_pvalues)=colnames(i_dat)
logLs = PGLS_pvalues
for(i in 1:ncol(i_dat)){
CH_I=as.numeric(i_dat[,i])
names(CH_I) = rownames(i_dat)
CH_I = CH_I[!is.na(CH_I)]
for(j in 1:ncol(i_dat)){
CH_J=as.numeric(i_dat[,j])
names(CH_J) = rownames(i_dat)
if(!(i==j)){
if(!identical(CH_I,CH_J)){
print(colnames(i_dat)[c(i,j)])
gls(CH_I ~ CH_J, correlation = corBrownian(phy = i_tree), data = as.data.frame(cbind(CH_I,CH_J)), method = "ML") %>% summary() %>% .$tTable %>% as.data.frame() %>% .$p %>% .[2] -> PGLS_pvalues[i,j]
pgls.Ives(i_tree, rep(CH_I,2), rep(CH_J,2)) %>% .$logL -> logLs[i,j]
}
}
}}
PGLS_pvaluesTRUNC = PGLS_pvalues
PGLS_pvaluesTRUNC[PGLS_pvaluesTRUNC>0.05] = NA
PGLSp_list[[tree]]<-PGLS_pvalues
logLs_list[[tree]]<-logLs
PGLStrunc_list[[tree]]<-PGLS_pvaluesTRUNC
}
for(tree in 1:4){
write.csv(PGLSp_list[[tree]], paste("deliverables/PGLS/PGLSpvalues_", names(data_list)[tree], ".csv", sep=""))
write.csv(logLs_list[[tree]], paste("deliverables/PGLS/PGLSlogLikelihoods_", names(data_list)[tree], ".csv", sep=""))
write.csv(PGLStrunc_list[[tree]], paste("deliverables/PGLS/PGLSpvaluesTRUNC_", names(data_list)[tree], ".csv", sep=""))
}
DG23S = d_all_23S$dGrowth
names(DG23S) = rownames(d_all_23S)
MEQY23S = d_all_23S$mEQY
names(MEQY23S) = rownames(d_all_23S)
DG23S_SE = SE_23S$dGrowth
names(DG23S_SE) = rownames(d_all_23S)
MEQY23S_SE = SE_23S$dEQY
names(MEQY23S_SE) = rownames(d_all_23S)
pglsDATA = data.frame(DG23S, MEQY23S)
rownames(pglsDATA) = rownames(d_all_23S)
star23S = rescale(Ch23Sultra,"lambda",0)
Me_Dg <- pgls.SEy(model = MEQY23S~DG23S, data = pglsDATA, se=MEQY23S_SE, tree = Ch23Sultra)
Me_Dg_star <- pgls.SEy(model = MEQY23S~DG23S, data = pglsDATA, se=MEQY23S_SE, tree = star23S)
pgls23tree = Ch23Sultra
class(pgls23tree) <- "phylo"
PIC_meqy_dgrowth = Rcontrast(pgls23tree, pglsDATA, path="~/Downloads/phylip-3.695/exe")
PIC_meqy_dgrowth$Contrasts %>% cor.table()
cor.table(pglsDATA)
###Phenograms with uncertainty
Phenogram <- function(a,b,c){
contTrait <- a
names(contTrait) = rownames(b)
contTrait = contTrait[!is.na(contTrait)]
treeI = drop.tip(c,which(!(c$tip.label %in% names(contTrait))))
A<-fastAnc(treeI,contTrait,CI=TRUE)
paintree<-paintSubTree(treeI,node=length(c$tip)+1,"1")
trans<-as.character(floor(0:50/2))
trans[as.numeric(trans)<10]<- paste("0", trans[as.numeric(trans)<10],sep="")
for(i in 0:50){
p<-i/length(trans)
phenogram(treeI,c(contTrait,(1-p)*A$CI95[,1]+p*A$ace), colors=setNames(paste("#0000ff",trans[i+1],sep=""),1), add=i>0, ftype="off")
phenogram(treeI,c(contTrait,(1-p)*A$CI95[,2]+p*A$ace), colors=setNames(paste("#0000ff",trans[i+1],sep=""),1), add=TRUE, ftype="off")
}
phenogram(treeI,c(contTrait,A$ace),add=TRUE, colors=setNames("black",1), ftype="off")
}
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]]
evol_vars <- which(AICc_list[[tree]]$Best_model != "WN")
i_dat = i_dat[,evol_vars]
print(names(data_list)[tree])
for(i in 1:ncol(i_dat)){
pdf(paste("deliverables/phenograms/", names(data_list)[tree], names(i_dat)[i], ".pdf",sep=""),width=12,height=12)
Phenogram(i_dat[,i], i_dat, i_tree)
dev.off()
}
}
### Uncertainty estimates
uncertainty = c("Variable", "Tree", "Mean_ACE", "Mean_CI95_range", "Rel_CI95_uncertainty")
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]]
evol_vars <- which(AICc_list[[tree]]$Best_model != "WN")
i_dat = i_dat[,evol_vars]
for(c in 1:ncol(i_dat)){
var_i <- i_dat[,c]
names(var_i) = rownames(i_dat)
ace_i <- fastAnc(i_tree, var_i, CI=TRUE)
CI95r <- apply(ace_i$CI95, 1, function(x){abs(x[1]-x[2])})
mCI95 <- mean(CI95r)
print(mCI95)
scaled_mCi95 <- (mCI95 - min(abs(c(var_i, CI95r))))/(max(var_i) - min(var_i))
uncertainty <- rbind(uncertainty, c(names(i_dat)[c], names(data_list)[tree], mean(ace_i$ace), mCI95, scaled_mCi95))
}
}
rownames(uncertainty) = 1:nrow(uncertainty)
colnames(uncertainty) = uncertainty[1,]
uncertainty = uncertainty[-1,]
uncertainty <- as.data.frame(uncertainty)
uncertainty <- sapply(uncertainty, as.character)
uncertainty[,3:5] <- sapply(uncertainty[,3:5], as.numeric)
#write.csv(uncertainty, "deliverables/model_support/uncertainty.csv")
###PCA ecotoxospace
PCPS_list = list()
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]]
PCA(i_dat[,-c(1:4,13)]) -> Pca
coloring = as.numeric(Pca$ind$coord[,3])
names(coloring) = rownames(Pca$ind$coord)
obj_i <- contMap(i_tree,coloring, plot=F)
obj_i$cols[1:length(obj_i$cols)] = colorRampPalette(c("yellow","purple"), space="Lab")(length(obj_i$cols))
pdf(paste("deliverables/chromophylomorphospaces/PCA_", names(data_list)[tree], ".pdf",sep=""),width=14,height=10)
phylomorphospace(obj_i$tree, Pca$ind$coord[,1:2], colors=obj_i$cols, label="horizontal", fsize=0.8, xlab = "PC1 (45%)", ylab = "PC2 (24%)")
title(paste("Color = PC3 (23%) --- Tree: ", names(data_list)[tree], sep = ""))
dev.off()
PCdat_i <- Pca$ind$coord[,1:4]
print(names(data_list)[tree])
physignal(PCdat_i, i_tree) -> PCPSi
c(PCPSi$phy.signal, PCPSi$pvalue) -> PCPS_list[[tree]]
}
names(PCPS_list) = names(data_list)
as.data.frame(rbind(PCPS_list[[1]], PCPS_list[[2]], PCPS_list[[3]], PCPS_list[[4]])) -> PCPS_table
names(PCPS_table) = c("multivariate_K", "p_value")
rownames(PCPS_table) = names(data_list)
write.csv(PCPS_table, "deliverables/PCA/physignal.csv")
PCA(d_all[,-c(1:4,13)]) -> Pca
Pca %>% fviz_contrib(choice="var", axes=1, sort.val="desc")
Pca %>% fviz_contrib(choice="var", axes=2, sort.val="desc")
Pca %>% fviz_contrib(choice="var", axes=3, sort.val="desc")
Pca %>% fviz_pca_biplot( col.var="contrib", gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE)
Pca %>% fviz_pca_biplot(axes=c(4,3), col.var="contrib", gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE)
#PhylPCA
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]]
ppca_i <- phyl.pca(i_tree, i_dat[,-c(1:4,13)])
ppca_i %>% biplot(main=paste(signif(summary(ppca_i)$importance[2,1]*100,3),"%"), ylab=paste(signif(summary(ppca_i)$importance[2,2]*100,3),"%"), cex = .6, expand =100)
ppca_i$L %>% as.data.frame() %>% ggplot() + geom_point(mapping = aes(x=rownames(ppca_i$L), y=-PC1)) + theme(axis.text.x = element_text(angle = 50, hjust = 1))
ppca_i$L %>% as.data.frame() %>% ggplot() + geom_point(mapping = aes(x=rownames(ppca_i$L), y=-PC2)) + theme(axis.text.x = element_text(angle = 50, hjust = 1))
}
###Best Model parameters
modelpars = c("Variable", "Tree", "model", "sigsq", "z0", "alpha", "slope")
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]]
i_SE <- data.frame(rep(0, nrow(i_dat)), rep(0, nrow(i_dat)),rep(0, nrow(i_dat)),rep(0, nrow(i_dat)), SElist[[tree]],SElist[[tree]], rep(0, nrow(i_dat)))
names(i_SE)=names(i_dat)
evol_vars <- which(AICc_list[[tree]]$Best_model != "WN")
i_dat = i_dat[,evol_vars]
for(c in 1:ncol(i_dat)){
model_c = fullModelTesting$Best_model[which(fullModelTesting$Tree == names(data_list)[tree] & fullModelTesting$Variable == names(i_dat)[c])]
if(model_c != "WN" & names(i_dat)[c] != "endosym"){
C = i_dat[,c]
names(C) = rownames(i_dat)
Cse = i_SE[,c]
names(Cse) = rownames(i_SE)
fit_c <- fitContinuous(i_tree, C, model=as.character(model_c), SE = Cse)
if(as.character(model_c)=="trend"){
SLP = fit_c$opt$slope
}
else{SLP = 0}
if(as.character(model_c)=="EB"){
alpha = fit_c$opt$a
}
else{alpha = 0}
modelpars <- rbind(modelpars, c(names(i_dat)[c], names(data_list)[tree], as.character(model_c), fit_c$opt$sigsq, fit_c$opt$z0, alpha, SLP))
}
}
}
rownames(modelpars) = 1:nrow(modelpars)
colnames(modelpars) = modelpars[1,]
modelpars = modelpars[-1,]
modelpars = as.data.frame(modelpars)
modelpars[,1:3] <- sapply(modelpars[,1:3], as.character)
modelpars[,4:7] <- sapply(modelpars[,4:7], as.character)
modelpars[,4:7] <- sapply(modelpars[,4:7], as.numeric)
#modelpars = modelpars[,-6] #to remove alpha if no OU
write.csv(modelpars, "deliverables/model_support/model_parameters_wSE.csv")
##Phylogenetic Path Analysis
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]] %>% .[,c(5:8,13)]
m <- define_model_set(
null = c(),
electon_mediation = c(dGrowth~dETR, dETR~dEQY, dEQY~dChlA),
.common = c()
)
PP <- phylo_path(m, i_dat, i_tree)
#summary(PP) %>% .$CICc %>% min() %>% print()
print(summary(PP))
#plot(best(PP))
#coef_plot(best(PP), error_bar = "se", order_by = "strength", to = "a") + ggplot2::coord_flip()
#avgPP = average(PP, avg_method = "full")
#coef_plot(avgPP, error_bar = "se", order_by = "strength", to = "a") + ggplot2::coord_flip()
}
for(tree in 1:4){
i_tree <- treelist[[tree]]
i_dat <- data_list[[tree]] %>% .[,9:13]
m <- define_model_set(
null = c(),
electon_mediation = c(mGrowth~mETR, mETR~mEQY, mEQY~mChlA)) #,
#.common = c()
#)
PP <- phylo_path(m, i_dat, i_tree)
#summary(PP) %>% .$CICc %>% min() %>% print()
print(summary(PP))
#plot(best(PP))
#coef_plot(best(PP), error_bar = "se", order_by = "strength", to = "a") + ggplot2::coord_flip()
#avgPP = average(PP, avg_method = "full")
#coef_plot(avgPP, error_bar = "se", order_by = "strength", to = "a") + ggplot2::coord_flip()
}
|
9262fb086a79b7ae8300327968a3f2ffed2681d8
|
f4e7f741f3e84259b264b25a831962e02d65e6a6
|
/P10/tarea10.1.R
|
f3291ff33f6014ad8841e9d0214404e6f156d6de
|
[] |
no_license
|
pejli/simulacion
|
73d807942d903990af05870c357a27e73728cfec
|
990e023a2be5e52289a40fea5355ae6771c3cb1e
|
refs/heads/master
| 2021-07-17T21:05:45.726325
| 2018-12-04T06:21:44
| 2018-12-04T06:21:44
| 144,336,591
| 2
| 1
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 6,362
|
r
|
tarea10.1.R
|
#LIBRERIAS
library(testit)
library(parallel)
#VARIABLES GENERALES
pm <- 0.05
rep <- 50
tmax <- 50
init<-200
#FUNCIONES
knapsack <- function(cap, peso, valor) {
n <- length(peso)
pt <- sum(peso)
assert(n == length(valor))
vt <- sum(valor)
if (pt < cap) {
return(vt)
} else {
filas <- cap + 1
cols <- n + 1
tabla <- matrix(rep(-Inf, filas * cols),
nrow = filas, ncol = cols)
for (fila in 1:filas) {
tabla[fila, 1] <- 0
}
rownames(tabla) <- 0:cap
colnames(tabla) <- c(0, valor)
for (objeto in 1:n) {
for (acum in 1:(cap+1)) { # consideramos cada fila de la tabla
anterior <- acum - peso[objeto]
tabla[acum, objeto + 1] <- tabla[acum, objeto]
if (anterior > 0) { # si conocemos una combinacion con ese peso
tabla[acum, objeto + 1] <- max(tabla[acum, objeto], tabla[anterior, objeto] + valor[objeto])
}
}
}
return(max(tabla))
}
}
factible <- function(seleccion, pesos, capacidad) {
return(sum(seleccion * pesos) <= capacidad)
}
objetivo <- function(seleccion, valores) {
return(sum(seleccion * valores))
}
normalizar <- function(data) {
menor <- min(data)
mayor <- max(data)
rango <- mayor - menor
data <- data - menor # > 0
return(data / rango) # entre 0 y 1
}
generador.pesos <- function(cuantos, min, max) {
return(sort(round(normalizar(rnorm(cuantos)) * (max - min) + min)))
}
generador.valores <- function(pesos, min, max) {
n <- length(pesos)
valores <- double()
for (i in 1:n) {
media <- pesos[n]
desv <- runif(1)
valores <- c(valores, rnorm(1, media, desv))
}
valores <- normalizar(valores) * (max - min) + min
return(valores)
}
poblacion.inicial <- function(n, tam) {
pobl <- matrix(rep(FALSE, tam * n), nrow = tam, ncol = n)
for (i in 1:tam) {
pobl[i,] <- round(runif(n))
}
return(as.data.frame(pobl))
}
mutacion <- function(sol, n) {
pos <- sample(1:n, 1)
mut <- sol
mut[pos] <- (!sol[pos]) * 1
return(mut)
}
reproduccion <- function(x, y, n) {
pos <- sample(2:(n-1), 1)
xy <- c(x[1:pos], y[(pos+1):n])
yx <- c(y[1:pos], x[(pos+1):n])
return(c(xy, yx))
}
muttacion<-function(i){
if (runif(1) < pm) {
mutado <- mutacion(p[i,], n)
}
else{
mutado<-rep(2,n)
}
return(as.numeric(mutado))
}
reproduxion<-function(i){
padres <- sample(1:tam, 2, replace=FALSE)
hijos <- reproduccion(p[padres[1],], p[padres[2],], n)
return(as.numeric(hijos))
}
objetivox<-function(i){
obj <- c(obj, objetivo(p[i,], valores))
return(obj)
}
factiblex<-function(i){
fact <- c(fact, factible(p[i,], pesos, capacidad))
return(fact)
}
#DATA.FRAME
resultados<-data.frame(Réplica=integer(), Objetos=integer(),Tiempo=double(), Método=character())
resultadox<-data.frame(Réplica=integer(), Objetos=integer(),Tiempo=double(), Método=character())
#######PARALELIZAR####
cluster <- makeCluster(detectCores() - 1)
for ( n in c(50,100,200)){
print(n)
pesos <-generador.pesos(n, 15, 80)
valores <- generador.valores(pesos, 10, 500)
capacidad <- round(sum(pesos) * 0.65)
for (replicas in 1:10) {
print(replicas)
######INICIO PARALLEL
inicio<-as.numeric(Sys.time()) #Inicia a medir el tiempo del parallel
optimo <- knapsack(capacidad, pesos, valores)
p <- poblacion.inicial(n, init)
tam <- dim(p)[1]
assert(tam == init)
mejores <- double()
for (iter in 1:tmax) {
p$obj <- NULL
p$fact <- NULL
#Paralelizar "mutación"
clusterExport(cluster,c("p" ,"pm","mutacion","n") )
pp<-t(parSapply(cluster,1:tam,muttacion))
pp<-pp[pp[,1] < 2,]
p<-rbind(p,pp)
#Paralelizar "reproducir"
clusterExport(cluster,c("p","reproduccion","tam","n"))
ppp<-parSapply(cluster, 1:rep, reproduxion)
pppp<-(matrix(unlist(ppp), ncol=n ))
p<-rbind(p,pppp)
tam <- dim(p)[1]
obj <- double()
fact <- integer()
#Paralelizar factibilidad
clusterExport(cluster,c("pesos","factible","capacidad","p","fact"))
fact<-parSapply(cluster, 1:tam,factiblex)
p<-cbind(p,fact)
#Paralelizar objetivo
clusterExport(cluster,c("objetivo","obj","valores","p"))
obj<-parSapply(cluster, 1:tam,objetivox)
p<-cbind(p,obj)
#FINAL
mantener <- order(-p[, (n + 2)], -p[, (n + 1)])[1:init]
p <- p[mantener,]
tam <- dim(p)[1]
assert(tam == init)
factibles <- p[p$fact == TRUE,]
mejor <- max(factibles$obj)
mejores <- c(mejores, mejor)
}
final<-as.numeric(Sys.time())
resultados<-rbind(resultados, data.frame(Réplica= replicas, Objetos=n,Tiempo=final-inicio, Método="Paralelo"))
###########SECUENCIAL##############
inicio <- as.numeric(Sys.time())
p <- poblacion.inicial(n, init)
tam <- dim(p)[1]
assert(tam == init)
mejores <- double()
for (iter in 1:tmax) {
p$obj <- NULL
p$fact <- NULL
for (i in 1:tam) { # cada individuo puede mutarse con probabilidad pm
if (runif(1) < pm) {
p <- rbind(p, mutacion(p[i,], n))
}
}
for (i in 1:rep) { # una cantidad fija de reproducciones
padres <- sample(1:tam, 2, replace = FALSE)
hijos <- reproduccion(p[padres[1],], p[padres[2],], n)
p <- rbind(p, hijos[1:n]) # primer hijo
p <- rbind(p, hijos[(n+1):(2*n)]) # segundo hijo
}
tam <- dim(p)[1]
obj <- double()
fact <- integer()
for (i in 1:tam) {
obj <- c(obj, objetivo(p[i,], valores))
fact <- c(fact, factible(p[i,], pesos, capacidad))
}
p <- cbind(p, obj)
p <- cbind(p, fact)
mantener <- order(-p[, (n + 2)], -p[, (n + 1)])[1:init]
p <- p[mantener,]
tam <- dim(p)[1]
assert(tam == init)
factibles <- p[p$fact == TRUE,]
mejor <- max(factibles$obj)
mejores <- c(mejores, mejor)
}
final <- as.numeric(Sys.time())
resultados<-rbind(resultados, data.frame(Réplica= replicas, Objetos=n,Tiempo=final-inicio, Método="Secuencial"))
}
}
stopCluster(cluster)
|
7c3bdee370ea083e8229bd3550d6edd371b18100
|
ab3c34ea788e2d35ed22dd24b7cc8ccfbdedc381
|
/R/covid19_Canada.R
|
c1de43f1bba99b673c1a6d4ec11728bdeff2a971
|
[] |
no_license
|
cran/covid19.analytics
|
bd2e2637f4287bddb4836851d0419534fe17b6f8
|
088444f644832744421d02ef4851523f879dd02d
|
refs/heads/master
| 2023-06-24T15:02:46.793254
| 2023-05-25T06:20:09
| 2023-05-25T06:20:09
| 253,551,012
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,730
|
r
|
covid19_Canada.R
|
# Data acquisition fns for the "City of Toronto" and CANADA
# Part of the covid19.analytics package
#
# M.Ponce
#######################################################################
covid19.Toronto.data <- function(origin="OD", data.fmt="TS",local.data=FALSE,debrief=FALSE, OLD.fmt=FALSE, acknowledge=FALSE) {
#' function to import data from the city of Toronto, ON - Canada
#' as reported by the City of Toronto OR Open Data Toronto
#'
#' @param origin select between the "City of Toronto" ('city') OR "Open Data Toronto" ('OD')
#' @param data.fmt "TS" for TimeSeries of cumulative cases or "original" for the data as reported in the google-document with multiple sheets
#' @param local.data boolean flag to indicate whether the data will be read from the local repo, in case of connectivity issues or data integrity
#' @param debrief boolean specifying whether information about the read data is going to be displayed in screen
#' @param OLD.fmt boolean flag to specify if the data is being read in an old format
#' @param acknowledge boolean flag to indicate that the user acknowledges where the data is coming from. If FALSE, display data acquisition messages.
#'
#' @return a dataframe (or a list in the case of "original") with the latest data reported for the city of Toronto, ON - Canada
#'
#' @export
#'
if (origin=="city") {
# from the City of Toronto
covid19.Toronto_city.data(data.fmt=data.fmt,local.data=local.data,debrief=debrief, OLD.fmt=OLD.fmt, acknowledge=acknowledge)
} else {
# will push all other request to Open Data Toronto
covid19.Toronto_OD.data(data.fmt=data.fmt,local.data=local.data,debrief=debrief, acknowledge=acknowledge)
}
}
###########################################################################
covid19.Toronto_city.data <- function(data.fmt="TS",local.data=FALSE,debrief=FALSE, OLD.fmt=FALSE, acknowledge=FALSE) {
#' function to import data from the city of Toronto, ON - Canada
#' as reported by the City of Toronto
#' https://www.toronto.ca/home/covid-19/covid-19-pandemic-data/
#'
#' @param data.fmt "TS" for TimeSeries of cumulative cases or "original" for the data as reported in the google-document with multiple sheets
#' @param local.data boolean flag to indicate whether the data will be read from the local repo, in case of connectivity issues or data integrity
#' @param debrief boolean specifying whether information about the read data is going to be displayed in screen
#' @param OLD.fmt boolean flag to specify if the data is being read in an old format
#' @param acknowledge boolean flag to indicate that the user acknowledges where the data is coming from. If FALSE, display data acquisition messages.
#'
#' @return a dataframe (or a list in the case of "original") with the latest data reported for the city of Toronto, ON - Canada
#'
#' @importFrom utils download.file
#' @importFrom readxl excel_sheets read_excel
#'
#' @export
#'
## Diclaimer LOG...
## // Sept. 12, 2021 //
## orig URL:
## https://www.toronto.ca/home/covid-19/covid-19-latest-city-of-toronto-news/covid-19-status-of-cases-in-toronto/
## moved to
## https://www.toronto.ca/home/covid-19/covid-19-pandemic-data/
## From: inst/doc/covid19.analytics.html
## Status: 301
## Message: Moved Permanently
loadLibrary("readxl")
# identify source of the data
if (!local.data) {
# Google drive URL, with "City of Toronto" data
city.of.Toronto.data <- "https://drive.google.com/uc?export=download&id=1euhrML0rkV_hHF1thiA0G5vSSeZCqxHY"
# temporary file to retrieve data, does not exist yet ==> mustwork=FALSE to avoid warning message
Tor.xlsx.file <- normalizePath(file.path(tempdir(), "covid19-toronto.xslx"), mustWork=FALSE)
if (!acknowledge) {
header('',paste("Accessing file from...",Tor.xlsx.file))
}
# save excel file
#if (capabilities('libcurl')) {
# dwnld.method <- 'libcurl'
#} else {
# stop("curl/libcurl; needed to download data from internet")
#}
download.file(city.of.Toronto.data, destfile=Tor.xlsx.file, mode = 'wb' ) #method=dwnld.method)
} else {
# use local data
covid19.pckg <- 'covid19.analytics'
if (!acknowledge) {
message("Data being read from *local* repo in the '",covid19.pckg,"' package")
header('~')
}
Tor.xlsx.file <- system.file("extdata","covid19_Toronto.xlsx", package=covid19.pckg, mustWork = TRUE)
}
if (file.exists(Tor.xlsx.file)) {
###~~~~~~
#print(Tor.xlsx.file)
###~~~~~~
# obtain names of sheets
lst.sheets <- excel_sheets(Tor.xlsx.file)
#print(lst.sheets)
# if only "TS" identify corresponding sheet
key.wrd <- "Cumulative Cases by Reported"
tgt.sheet <- pmatch(key.wrd,lst.sheets)
#if (is.na(tgt.sheet)) {
# #key.wrd <- "Cases by Episode Date"
# #tgt.sheet <- pmatch(key.wrd,lst.sheets)
# covid19.Toronto.data(data.fmt,local.data=TRUE,debrief, OLD.fmt, acknowledge)
#}
# read data
if (toupper(data.fmt)=="TS") {
# check that the target sheet was found
if (!is.na(tgt.sheet)) {
if (!acknowledge) {
header('',"Reading TimeSeries data...")
}
toronto <- read_excel(Tor.xlsx.file,sheet=tgt.sheet)
} else {
message(key.wrd, " NOT FOUND!")
toronto.loc <- covid19.Toronto_city.data(data.fmt,local.data=TRUE,debrief=debrief, OLD.fmt, acknowledge=acknowledge)
return(toronto.loc)
}
} else {
if (!acknowledge) {
header('',"Collecting all data reported...")
}
toronto <- list()
# iterate on each sheet...
for (sht in lst.sheets) {
toronto[[sht]] <- read_excel(Tor.xlsx.file,sheet=sht)
}
}
# clean-up after reading the file only if it isn't the local repo
if (!local.data) file.remove(Tor.xlsx.file)
} else {
if (!local.data) {
warning("Could not access data from 'City of Toronto' source, attempting to reach local repo")
toronto.loc <- covid19.Toronto_city.data(data.fmt=data.fmt,local.data=TRUE,debrief=debrief, OLD.fmt, acknowledge=acknowledge)
return(toronto.loc)
} else {
stop("An error occurred accessing the data for the City of Toronto")
}
}
if (toupper(data.fmt)=="TS") {
## PREVIOUS FORMAT -- cases identified in 3 categories: deaths, active, resolved
if (OLD.fmt) {
# identify columns
cat.col <- 2
date.col <- 1
nbr.col <- 3
# filter categories
categs <- unique(toronto[[cat.col]])
# sort them alphabetically
categs <- sort(categs)
# check for inconsistencies in data, ie. missing categories
if (length(categs) != 3) {
stop("There supppose to be at least three categories/status within the data!\n This may represent some inconsistency with the datasets please contact the author of the package.")
}
# break into different categories
data.per.categ <- split(toronto, toronto[[cat.col]])
# Convert into TS format
x <- data.frame()
for (i in categs) {
reported.dates <- rev(unique(as.Date(data.per.categ[[i]][[date.col]])))
x <- rbind(x,rev(data.per.categ[[i]][[nbr.col]]))
}
###########
} else {
###########
date.col <- 1
categs <- names(toronto[2:4])
# get the dates...
reported.dates <- rev(unique(as.Date(toronto[,date.col][[1]])))
x <- data.frame()
# Convert into TS format
for (i in categs) {
#reported.dates <- rev(unique(as.Date(data.per.categ[[i]][[date.col]])))
#x <- rbind(x,rev(data.per.categ[[i]][[nbr.col]]))
data.per.categ <- toronto[,i]
x <- rbind(x,rev(data.per.categ[[1]]))
}
}
# add category
x <- cbind(x, categs)
## OLD WAY!!!! ###
#reported.dates <- rev(as.Date(toronto[[date.col]]))
#reported.cases <- rev(toronto[[nbr.col]])
#
#tor.data <- cbind(data.frame("Canada","Toronto, ON",43.6532,79.3832),
# rbind(as.integer(reported.cases)) )
##################
tor.data <- cbind(data.frame("Canada","Toronto, ON",43.6532,79.3832), x)
names(tor.data) <- c("Country.Region","Province.City","Lat","Long",
as.character(reported.dates),
"status")
} else {
# ALL DATA
tor.data <- toronto
print(names(tor.data))
}
# debrief...
debriefing(tor.data,debrief)
return(tor.data)
}
###########################################################################
covid19.Toronto_OD.data <- function(data.fmt="TS",local.data=FALSE,debrief=FALSE, acknowledge=FALSE) {
#' function to import data from the city of Toronto, ON - Canada
#' as reported by Open Data Toronto
#' https://open.toronto.ca/dataset/covid-19-cases-in-toronto/
#' This dataset is updated WEEKLY.
#'
#' @param data.fmt "TS" for TimeSeries of cumulative cases or "original" for the data as original reported
#' @param local.data boolean flag to indicate whether the data will be read from the local repo, in case of connectivity issues or data integrity
#' @param debrief boolean specifying whether information about the read data is going to be displayed in screen
#' @param acknowledge boolean flag to indicate that the user acknowledges where the data is coming from. If FALSE, display data acquisition messages.
#'
#' @return a dataframe with the latest data reported by "OpenData Toronto" for the city of Toronto, ON - Canada
#'
#'
#' @export
#'
# read data
openDataTOR <- covid19.URL_csv.data(local.data, acknowledge,
#srcURL="https://ckan0.cf.opendata.inter.prod-toronto.ca/download_resource/e5bf35bc-e681-43da-b2ce-0242d00922ad?format=csv",
srcURL="https://ckan0.cf.opendata.inter.prod-toronto.ca/dataset/64b54586-6180-4485-83eb-81e8fae3b8fe/resource/fff4ee65-3527-43be-9a8a-cb9401377dbc/download/COVID19%20cases.csv",
srcName="Open Data Toronto",
locFileName="covid19_openData_Toronto.RDS", locVarName="openDataTOR")
# TS data
if (data.fmt=="TS") {
# identify type of cases and dates reported
cases.types <- unique(openDataTOR$Outcome)
cases.dates <- unique(openDataTOR$Episode.Date)
df.cum.cases <- data.frame()
for (i in cases.types) {
cum.cases <- cumsum(as.numeric(tapply(openDataTOR$Outcome==i,openDataTOR$Episode.Date,sum)))
names(cum.cases) <- sort(unique(openDataTOR$Episode.Date))
df.cum.cases <- rbind(df.cum.cases,cum.cases)
}
names(df.cum.cases) <- sort(unique(openDataTOR$Episode.Date))
df.cum.cases <- cbind(df.cum.cases, Status=as.character(cases.types), stringsAsFactors=FALSE)
# fix names and conventions
colnames <- names(df.cum.cases)
# JE SUIS ICI!!!
names(df.cum.cases)[length(colnames)] <- "status"
colnames <- names(df.cum.cases)
df.cum.cases[df.cum.cases$status=="FATAL","status"] <- "Deaths"
df.cum.cases[df.cum.cases$status=="ACTIVE","status"] <- "Active Cases"
df.cum.cases[df.cum.cases$status=="RESOLVED","status"] <- "Recovered Cases"
tor.data <- cbind(data.frame("Canada","Toronto, ON",43.6532,79.3832), df.cum.cases)
names(tor.data) <- c("Country.Region","Province.City","Lat","Long",
colnames)
# debriefing...
debriefing(tor.data,debrief)
return(tor.data)
} else {
# ORIGINAL data as reported by OpenData Toronto
# debriefing...
debriefing(openDataTOR,debrief)
return(openDataTOR)
}
}
#######################################################################
covid19.Canada.data <- function(data.fmt="TS",local.data=FALSE,debrief=FALSE, acknowledge=FALSE) {
#' function to import data for Canada
#' as reported by Health Canada
#' https://health-infobase.canada.ca/src/data/covidLive/covid19.csv
#'
#'
#' @param data.fmt "TS" for TimeSeries of cumulative cases or "original" for the data as original reported
#' @param local.data boolean flag to indicate whether the data will be read from the local repo, in case of connectivity issues or data integrity
#' @param debrief boolean specifying whether information about the read data is going to be displayed in screen
#' @param acknowledge boolean flag to indicate that the user acknowledges where the data is coming from. If FALSE, display data acquisition messages.
#'
#' @return a dataframe with the latest data reported by "OpenData Toronto" for the city of Toronto, ON - Canada
#'
#'
#' @export
#'
# read data
data <- covid19.URL_csv.data(local.data, acknowledge,
srcURL="https://health-infobase.canada.ca/src/data/covidLive/covid19.csv",
srcName="Health Canada",
locFileName="covid19_HealthCanada_Canada.RDS", locVarName="canada_covid19")
return(data)
}
#######################################################################
covid19.URL_csv.data <- function(local.data=FALSE, acknowledge=FALSE,
srcURL="", #"https://ckan0.cf.opendata.inter.prod-toronto.ca/download_resource/e5bf35bc-e681-43da-b2ce-0242d00922ad?format=csv",
srcName="", #"Open Data Toronto",
locFileName=NA, #"covid19_openData_Toronto.RDS",
locVarName=NA) { #"openDataTOR") {
#'
#' function to read CSV from URLs or local replicas
#'
#' @param local.data boolean flag to indicate whether the data will be read from the local repo, in case of connectivity issues or data integrity
#' @param acknowledge boolean flag to indicate that the user acknowledges where the data is coming from. If FALSE, display data acquisition messages.
#' @param srcURL URL from where to obtain the data
#' @param srcName name of the source
#' @param locFileName name of the file to read from local repo
#' @param locVarName name of the variable loaded from local file
#'
#' @return data as oriignally obtained from the URL src
#'
#'
#' @export
#'
###############################
## function for error handling
errorHandling.Msg <- function(condition,target.case) {
header('=')
message("A problem was detected when trying to retrieve the data for the package: ",target.case)
if (grepl("404 Not Found",condition)) {
message("The URL or file was not found! Please contact the developer about this!")
} else {
message("It is possible that your internet connection is down! Please check!")
}
message(condition,'\n')
header('=')
# update problems counter
#pkg.env$problems <- pkg.env$problems + 1
}
###############################
tryCatch( {
# identify source of the data
if (!local.data) {
# define URL to get the data
#OpenData.Toronto.CSV <- "https://ckan0.cf.opendata.inter.prod-toronto.ca/download_resource/e5bf35bc-e681-43da-b2ce-0242d00922ad?format=csv"
URL.file.CSV <- srcURL
if (!acknowledge) header('',paste0("Reading data from ",srcName," ..."))
# read data directly from internet
data.from.URL <- read.csv(URL.file.CSV)
return(data.from.URL)
} else if (!is.na(locFileName)) {
if (!acknowledge) header('',paste0("Reading data from *LOCAL REPO* for ",srcName," ..."))
covid19.pckg <- "covid19.analytics"
loc.data.file <- system.file("extdata",locFileName, package=covid19.pckg, mustWork = TRUE)
if (file.exists(loc.data.file)) {
locVarName0 <- load(loc.data.file)
} else {
stop("Local data file ",loc.data.file," associated to ",locFileName," NOT found!")
}
if (!(locVarName %in% ls()) & !(locVarName0 %in% ls()) )
stop("Couldn't load data from local file",loc.data.file)
return(eval(parse(text=locVarName)))
}
},
# warning
warning = function(cond) {
errorHandling.Msg(cond,srcURL)
},
# error
error = function(e){
errorHandling.Msg(e,srcURL)
}
)
}
########################################################################
|
2bed8ec4da4be88a8fb0d3c45359a40ad2198544
|
024003232dbcda6aa8e920e5c345d0137e92d07a
|
/man/richards-package.Rd
|
552e1a8d60217ca9aefc589bcca0760626b13a0d
|
[] |
no_license
|
cran/ActuDistns
|
6dc7c284c4250d909266db1407c5fb4f35569f64
|
61b9ec31d6dc6c8800351b59e5655b22812c858f
|
refs/heads/master
| 2021-01-23T03:43:43.416428
| 2012-09-13T00:00:00
| 2012-09-13T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 850
|
rd
|
richards-package.Rd
|
\name{ActuDistns-package}
\alias{ActuDistns-package}
\alias{ActuDistns}
\docType{package}
\title{
Computes functions for actuarial use
}
\description{
Computes the probability density function, hazard rate function, integrated hazard rate function and the quantile function for 44 commonly used survival models}
\details{
\tabular{ll}{
Package: \tab ActuDistns\cr
Type: \tab Package\cr
Version: \tab 3.0\cr
Date: \tab 2012-09-13\cr
License: \tab What license is it under?\cr
}
probability density functions, hazard rate functions, integrated hazard rate functions and quantile functions
}
\author{Saralees Nadarajah
Maintainer: Saralees Nadarajah <Saralees.Nadarajah@manchester.ac.uk>
}
\references{
S. Nadarajah, S. A. A. Bakar, A new R package for actuarial survival models, Computational Statistics}
\keyword{ package }
|
9ef75d08293d4d494e3661a7dd68715dc31ea5ac
|
78014f186b458c0dfbd4dd4dc660adfe858d4c62
|
/man/COL.OLD.Rd
|
b11e248dfdaa73a45dcd3d1d998d23781634a7fc
|
[] |
no_license
|
r-spatial/spdep
|
a1a8cebe6c86724bc4431afefc2ba2f06a3eb60c
|
a02d7aa6be8a42e62ba37d2999a35aff06a0ea59
|
refs/heads/main
| 2023-08-21T19:27:09.562669
| 2023-08-07T12:08:51
| 2023-08-07T12:08:51
| 109,153,369
| 109
| 30
| null | 2022-12-10T12:31:36
| 2017-11-01T16:10:35
|
R
|
UTF-8
|
R
| false
| false
| 2,562
|
rd
|
COL.OLD.Rd
|
% Copyright 2001 by Roger S. Bivand
\name{oldcol}
\alias{oldcol}
\alias{COL.OLD}
\alias{COL.nb}
%\alias{coords.OLD}
%\alias{polys.OLD}
\docType{data}
\title{Columbus OH spatial analysis data set - old numbering}
\usage{data(oldcol)}
\description{
The \code{COL.OLD} data frame has 49 rows and 22 columns. The observations are
ordered and numbered as in the original analyses of the data set in the
SpaceStat documentation and in Anselin, L. 1988 Spatial econometrics: methods and models, Dordrecht: Kluwer.
Unit of analysis: 49 neighbourhoods in Columbus, OH, 1980 data.
In addition the data set includes \code{COL.nb}, the neighbours list
as used in Anselin (1988).
}
\format{
This data frame contains the following columns:
\describe{
\item{AREA_PL}{computed by ArcView (agrees with areas of polygons in the \dQuote{columbus} data set}
\item{PERIMETER}{computed by ArcView}
\item{COLUMBUS.}{internal polygon ID (ignore)}
\item{COLUMBUS.I}{another internal polygon ID (ignore)}
\item{POLYID}{yet another polygon ID}
\item{NEIG}{neighborhood id value (1-49);
conforms to id value used in Spatial Econometrics book.}
\item{HOVAL}{housing value (in $1,000)}
\item{INC}{household income (in $1,000)}
\item{CRIME}{residential burglaries and vehicle thefts per thousand
households in the neighborhood}
\item{OPEN}{open space in neighborhood}
\item{PLUMB}{percentage housing units without plumbin}
\item{DISCBD}{distance to CBD}
\item{X}{x coordinate (in arbitrary digitizing units, not polygon coordinates)}
\item{Y}{y coordinate (in arbitrary digitizing units, not polygon coordinates)}
\item{AREA_SS}{neighborhood area (computed by SpaceStat)}
\item{NSA}{north-south dummy (North=1)}
\item{NSB}{north-south dummy (North=1)}
\item{EW}{east-west dummy (East=1)}
\item{CP}{core-periphery dummy (Core=1)}
\item{THOUS}{constant=1,000}
\item{NEIGNO}{NEIG+1,000, alternative neighborhood id value}
\item{PERIM}{polygon perimeter (computed by SpaceStat)}
}
}
\details{
The row names of \code{COL.OLD} and the \code{region.id} attribute of
\code{COL.nb} are set to \code{columbus$NEIGNO}.
}
\source{
Anselin, Luc. 1988. Spatial econometrics: methods and
models. Dordrecht: Kluwer Academic, Table 12.1 p. 189.
}
\note{
All source data files prepared by
Luc Anselin, Spatial Analysis Laboratory, Department of Agricultural and Consumer Economics, University of Illinois, Urbana-Champaign, \url{https://spatial.uchicago.edu/sample-data}.
}
\keyword{datasets}
|
a5b1368cb6331700014b646f3dac9e78303a0e5b
|
d5b07bc99c7aa850abe61bb9ec815e8b3bfc6d61
|
/QID-2426-MVAbluepullover/MVAbluepullover.r
|
6c17c60d42ca52dd1a5d4681c57a5f48d4730c4c
|
[] |
no_license
|
QuantLet/MVA
|
2707e294c7ad735b0e562aafedfb27439c3cc81e
|
c8b6c94ef748af17edad1db65a77e594721aa421
|
refs/heads/master
| 2023-04-13T12:49:52.563437
| 2023-04-04T15:30:05
| 2023-04-04T15:30:05
| 73,212,746
| 18
| 45
| null | 2023-04-04T15:30:06
| 2016-11-08T18:00:17
|
R
|
UTF-8
|
R
| false
| false
| 946
|
r
|
MVAbluepullover.r
|
# clear all variables
rm(list = ls(all = TRUE))
graphics.off()
#load data
blue.data = read.table("pullover.dat", header = T)
attach(blue.data)
# generating mu and S
mu = colMeans(blue.data)
mu = t(mu)
(mu = t(mu))
s.unbiased = cov(blue.data) # the result of cov(xy) is the unbiased one
# meaning n/n-1*S
(covxy = s.unbiased/(10/9))
# partial correlation between 'Sales' and 'Price' given 'Advertisement' and 'Sales Assistans', x1 = Sales, x2 = Price
z = blue.data[c(3:4)]
data = data.frame(blue.data[, 1], blue.data[, 2], z)
xdata = na.omit(data.frame(data[, c(1, 2)]))
Sxx = cov(xdata, xdata)
xzdata = na.omit(data)
xdata = data.frame(xzdata[, c(1, 2)])
zdata = data.frame(xzdata[, -c(1, 2)])
Sxz = cov(xdata, zdata)
zdata = na.omit(data.frame(data[, -c(1, 2)]))
Szz = cov(zdata, zdata)
Sxx.z = Sxx - Sxz %*% solve(Szz) %*% t(Sxz)
(rxx.z = cov2cor(Sxx.z)[1, 2])
# correlation matrix
(P.blue.data = cor(blue.data))
|
d041e6045c3932ca596a19fcfde083ce2d6995ca
|
f9c06cd3058b1f99e8c90b38074a80c6073f0aca
|
/getting-and-cleaning-data-course-project/run_analysis.R
|
68f44d78741f87a2d2c7a4b93a14d345abb09e51
|
[] |
no_license
|
bacton/datasciencecoursera
|
f56737516b7e979078dbb75094f24b95f91d6655
|
3ee7a1f6d577f8755508c254d865622f44a8855b
|
refs/heads/master
| 2020-05-29T16:09:06.252685
| 2016-10-23T18:32:10
| 2016-10-23T18:32:10
| 59,430,647
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,394
|
r
|
run_analysis.R
|
clean_data_set <- function() {
library(data.table)
library(dplyr)
library(dtplyr)
# Read the "features.txt" file -- this contains the
# names of the variables in the ./test/X_test.txt and
# ./train/X_train.txt files
features_table <- read.table("features.txt",
header=FALSE,sep="",
stringsAsFactors = FALSE)
# Get the variables names from the features_table as a
# character vector
variables <- features_table$V2
# Read the training observations from
# "./train/X_train.txt", naming the columns using
# the variables vector
training_table <- read.table("./train/X_train.txt",
header=FALSE,sep="",
col.names=variables)
# Now delete all columns from "training_table"
# whose variable name does not contain either "std"
# or "mean" because these are the only variables that
# we're interested in
training_table <- training_table %>%
# (In addition, remove the "^angle.*" variables as they
# are derived from other variables not on the mean or
# std. dev.)
select(-matches('^angle')) %>%
select(matches('mean|std')) %>%
rename_(.dots=setNames(names(.),
gsub("\\.+",".",names(.)))) %>%
rename_(.dots=setNames(names(.),gsub("\\.$","",names(.))))
#
# Now add the acceleration and gyro computed mean and sd value for each
# observation ("train" case) to the training_table
#
# First, create a few new data frames which will act
# as "source" dataframes for the mutate() function
# setwd("./train/Inertial Signals")
total_accel_x_train <- fread("./train/Inertial Signals/total_acc_x_train.txt")
#
# These are the new variables that will be added to
# the training_table; they're all related to the
# accelerometer and gyroscope data from the
# experiments:
# Total_Accel_x_Mean Total_Accel_x_SD
# Total_Accel_y_Mean Total_Accel_y_SD
# Total_Accel_z_Mean Total_Accel_z_SD
# Est_Body_Accel_x_Mean
# Est_Body_Accel_x_SD Est_Body_Accel_y_Mean
# Est_Body_Accel_y_SD Est_Body_Accel_z_Mean
# Est_Body_Accel_z_SD Gyro_Velocity_x_Mean
# Gyro_Velocity_x_SD Gyro_Velocity_y_Mean
# Gyro_Velocity_y_SD Gyro_Velocity_z_Mean
# Gyro_Velocity_z_SD
#
# We can extract data from this table by row, and
# use dplyr's mutate function to calculate the mean
# and sd
# Compute the first new variable in the list above,
# i.e.: Total_Accel_x_Mean, for _training_ data
total_accel_x_train_mutated <- total_accel_x_train %>%
mutate(Total_Accel_x_Mean=apply(.,1,mean),
Total_Accel_x_SD=apply(.,1,sd)) %>%
select(Total_Accel_x_Mean,Total_Accel_x_SD)
# Now mutate these two new variables onto the
# training_table:
training_table <- training_table %>%
mutate(Total_Accel_x_Mean=total_accel_x_train_mutated$Total_Accel_x_Mean,
Total_Accel_x_SD=total_accel_x_train_mutated$Total_Accel_x_SD)
# Similarly, compute the rest of the variables in the
# list above for training data, then repeat for
# testing data after joining all of the training
# computations with the training_table
# [Total_Accel_y_Mean & Total_Accel_y_SD]:
total_accel_y_train <- fread("./train/Inertial Signals/total_acc_y_train.txt")
total_accel_y_train_mutated <- total_accel_y_train %>%
mutate(Total_Accel_y_Mean=apply(.,1,mean),
Total_Accel_y_SD=apply(.,1,sd)) %>%
select(Total_Accel_y_Mean,Total_Accel_y_SD)
# Mutate these two new variables onto the
# training_table
training_table <- training_table %>%
mutate(Total_Accel_y_Mean=total_accel_y_train_mutated$Total_Accel_y_Mean,
Total_Accel_y_SD=total_accel_y_train_mutated$Total_Accel_y_SD)
# [Total_Accel_z_Mean & Total_Accel_z_SD]
total_accel_z_train <- fread("./train/Inertial Signals/total_acc_z_train.txt")
total_accel_z_train_mutated <- total_accel_z_train %>%
mutate(Total_Accel_z_Mean=apply(.,1,mean),
Total_Accel_z_SD=apply(.,1,sd)) %>%
select(Total_Accel_z_Mean,Total_Accel_z_SD)
# Mutate these two new variables onto the
# training_table
training_table <- training_table %>%
mutate(Total_Accel_z_Mean=total_accel_z_train_mutated$Total_Accel_z_Mean,
Total_Accel_z_SD=total_accel_z_train_mutated$Total_Accel_z_SD)
# [Est_Body_Accel_x_Mean & Est_Body_Accel_x_SD]
Est_Body_Accel_x_train <- fread("./train/Inertial Signals/body_acc_x_train.txt")
Est_Body_Accel_x_train_mutated <- Est_Body_Accel_x_train %>%
mutate(Est_Body_Accel_x_Mean=apply(.,1,mean),
Est_Body_Accel_x_SD=apply(.,1,sd)) %>%
select(Est_Body_Accel_x_Mean,Est_Body_Accel_x_SD)
# Mutate these two new variables onto the
# training_table
training_table <- training_table %>%
mutate(Est_Body_Accel_x_Mean=Est_Body_Accel_x_train_mutated$Est_Body_Accel_x_Mean,
Est_Body_Accel_x_SD=Est_Body_Accel_x_train_mutated$Est_Body_Accel_x_SD)
# [Est_Body_Accel_y_Mean & Est_Body_Accel_y_SD]
Est_Body_Accel_y_train <- fread("./train/Inertial Signals/body_acc_y_train.txt")
Est_Body_Accel_y_train_mutated <- Est_Body_Accel_y_train %>%
mutate(Est_Body_Accel_y_Mean=apply(.,1,mean),
Est_Body_Accel_y_SD=apply(.,1,sd)) %>%
select(Est_Body_Accel_y_Mean,Est_Body_Accel_y_SD)
# Mutate these two new variables onto the training_table:
training_table <- training_table %>%
mutate(Est_Body_Accel_y_Mean=Est_Body_Accel_y_train_mutated$Est_Body_Accel_y_Mean,
Est_Body_Accel_y_SD=Est_Body_Accel_y_train_mutated$Est_Body_Accel_y_SD)
# [Est_Body_Accel_z_Mean Est_Body_Accel_z_SD]
Est_Body_Accel_z_train <- fread("./train/Inertial Signals/body_acc_z_train.txt")
Est_Body_Accel_z_train_mutated <- Est_Body_Accel_z_train %>%
mutate(Est_Body_Accel_z_Mean=apply(.,1,mean),
Est_Body_Accel_z_SD=apply(.,1,sd)) %>%
select(Est_Body_Accel_z_Mean,Est_Body_Accel_z_SD)
# Mutate these two new variables onto the training_table
training_table <- training_table %>%
mutate(Est_Body_Accel_z_Mean=Est_Body_Accel_z_train_mutated$Est_Body_Accel_z_Mean,
Est_Body_Accel_z_SD=Est_Body_Accel_z_train_mutated$Est_Body_Accel_z_SD)
# [Gyro_Velocity_x_Mean Gyro_Velocity_x_SD]
Gyro_Velocity_x_train <- fread("./train/Inertial Signals/body_gyro_x_train.txt")
Gyro_Velocity_x_train_mutated <- Gyro_Velocity_x_train %>%
mutate(Gyro_Velocity_x_Mean=apply(.,1,mean),
Gyro_Velocity_x_SD=apply(.,1,sd)) %>%
select(Gyro_Velocity_x_Mean,Gyro_Velocity_x_SD)
# Mutate these two new variables onto the training_table
training_table <- training_table %>%
mutate(Gyro_Velocity_x_Mean=Gyro_Velocity_x_train_mutated$Gyro_Velocity_x_Mean,
Gyro_Velocity_x_SD=Gyro_Velocity_x_train_mutated$Gyro_Velocity_x_SD)
# [Gyro_Velocity_y_Mean & Gyro_Velocity_y_SD]
Gyro_Velocity_y_train <- fread("./train/Inertial Signals/body_gyro_y_train.txt")
Gyro_Velocity_y_train_mutated <- Gyro_Velocity_y_train %>%
mutate(Gyro_Velocity_y_Mean=apply(.,1,mean),
Gyro_Velocity_y_SD=apply(.,1,sd)) %>%
select(Gyro_Velocity_y_Mean,Gyro_Velocity_y_SD)
# Mutate these two new variables onto the training_table
training_table <- training_table %>%
mutate(Gyro_Velocity_y_Mean=Gyro_Velocity_y_train_mutated$Gyro_Velocity_y_Mean,
Gyro_Velocity_y_SD=Gyro_Velocity_y_train_mutated$Gyro_Velocity_y_SD)
# [Gyro_Velocity_z_Mean & Gyro_Velocity_z_SD]
Gyro_Velocity_z_train <- fread("./train/Inertial Signals/body_gyro_z_train.txt")
Gyro_Velocity_z_train_mutated <- Gyro_Velocity_z_train %>%
mutate(Gyro_Velocity_z_Mean=apply(.,1,mean),
Gyro_Velocity_z_SD=apply(.,1,sd)) %>%
select(Gyro_Velocity_z_Mean,Gyro_Velocity_z_SD)
# Mutate these two new variables onto the training_table
training_table <- training_table %>%
mutate(Gyro_Velocity_z_Mean=Gyro_Velocity_z_train_mutated$Gyro_Velocity_z_Mean,
Gyro_Velocity_z_SD=Gyro_Velocity_z_train_mutated$Gyro_Velocity_z_SD)
# Now,do all of the above again, but for the "test" data this time
# First, read the testing observations from
# "./test/X_test.txt"
testing_table <- read.table("./test/X_test.txt",
header=FALSE,
sep="",
col.names=variables)
# Now delete all columns from "testing_table" whose
# variable name does not contain either "std" or "mean"
testing_table <- testing_table %>%
# (In addition, remove the "^angle.*" variables as they
# are derived from other variables not on the mean or
# std. dev.)
select(-matches('^angle')) %>%
select(matches('mean|std')) %>%
rename_(.dots=setNames(names(.),
gsub("\\.+",".",names(.)))) %>%
rename_(.dots=setNames(names(.),
gsub("\\.$","",names(.))))
# Create a few new data frames which will act as
# "source" dataframes for the mutate() function
#setwd("./test/Inertial Signals")
total_accel_x_test <- fread("./test/Inertial Signals/total_acc_x_test.txt")
total_accel_x_test_mutated <- total_accel_x_test %>%
mutate(Total_Accel_x_Mean=apply(.,1,mean),
Total_Accel_x_SD=apply(.,1,sd)) %>%
select(Total_Accel_x_Mean,Total_Accel_x_SD)
# Now mutate these two new variables onto the
# testing_table
testing_table <- testing_table %>%
mutate(Total_Accel_x_Mean=total_accel_x_test_mutated$Total_Accel_x_Mean,
Total_Accel_x_SD=total_accel_x_test_mutated$Total_Accel_x_SD)
# [Total_Accel_y_Mean & Total_Accel_y_SD]
total_accel_y_test <- fread("./test/Inertial Signals/total_acc_y_test.txt")
total_accel_y_test_mutated <- total_accel_y_test %>%
mutate(Total_Accel_y_Mean=apply(.,1,mean),
Total_Accel_y_SD=apply(.,1,sd)) %>%
select(Total_Accel_y_Mean,Total_Accel_y_SD)
# Mutate these two new variables onto the
# testing_table
testing_table <- testing_table %>%
mutate(Total_Accel_y_Mean=total_accel_y_test_mutated$Total_Accel_y_Mean,
Total_Accel_y_SD=total_accel_y_test_mutated$Total_Accel_y_SD)
# [Total_Accel_z_Mean & Total_Accel_z_SD]
total_accel_z_test <- fread("./test/Inertial Signals/total_acc_z_test.txt")
total_accel_z_test_mutated <- total_accel_z_test %>%
mutate(Total_Accel_z_Mean=apply(.,1,mean),
Total_Accel_z_SD=apply(.,1,sd)) %>%
select(Total_Accel_z_Mean,Total_Accel_z_SD)
# Mutate these two new variables onto the
# testing_table
testing_table <- testing_table %>%
mutate(Total_Accel_z_Mean=total_accel_z_test_mutated$Total_Accel_z_Mean,
Total_Accel_z_SD=total_accel_z_test_mutated$Total_Accel_z_SD)
# [Est_Body_Accel_x_Mean & Est_Body_Accel_x_SD]
Est_Body_Accel_x_test <- fread("./test/Inertial Signals/body_acc_x_test.txt")
Est_Body_Accel_x_test_mutated <- Est_Body_Accel_x_test %>%
mutate(Est_Body_Accel_x_Mean=apply(.,1,mean),
Est_Body_Accel_x_SD=apply(.,1,sd)) %>%
select(Est_Body_Accel_x_Mean,Est_Body_Accel_x_SD)
# Mutate these two new variables onto the
# testing_table
testing_table <- testing_table %>%
mutate(Est_Body_Accel_x_Mean=Est_Body_Accel_x_test_mutated$Est_Body_Accel_x_Mean,
Est_Body_Accel_x_SD=Est_Body_Accel_x_test_mutated$Est_Body_Accel_x_SD)
# [Est_Body_Accel_y_Mean & Est_Body_Accel_y_SD]
Est_Body_Accel_y_test <- fread("./test/Inertial Signals/body_acc_y_test.txt")
Est_Body_Accel_y_test_mutated <- Est_Body_Accel_y_test %>%
mutate(Est_Body_Accel_y_Mean=apply(.,1,mean),
Est_Body_Accel_y_SD=apply(.,1,sd)) %>%
select(Est_Body_Accel_y_Mean,Est_Body_Accel_y_SD)
# Mutate these two new variables onto the
# testing_table
testing_table <- testing_table %>%
mutate(Est_Body_Accel_y_Mean=Est_Body_Accel_y_test_mutated$Est_Body_Accel_y_Mean,
Est_Body_Accel_y_SD=Est_Body_Accel_y_test_mutated$Est_Body_Accel_y_SD)
# [Est_Body_Accel_z_Mean Est_Body_Accel_z_SD]
Est_Body_Accel_z_test <- fread("./test/Inertial Signals/body_acc_z_test.txt")
Est_Body_Accel_z_test_mutated <- Est_Body_Accel_z_test %>%
mutate(Est_Body_Accel_z_Mean=apply(.,1,mean),
Est_Body_Accel_z_SD=apply(.,1,sd)) %>%
select(Est_Body_Accel_z_Mean,Est_Body_Accel_z_SD)
# Mutate these two new variables onto the
# testing_table
testing_table <- testing_table %>%
mutate(Est_Body_Accel_z_Mean=Est_Body_Accel_z_test_mutated$Est_Body_Accel_z_Mean,
Est_Body_Accel_z_SD=Est_Body_Accel_z_test_mutated$Est_Body_Accel_z_SD)
# [Gyro_Velocity_x_Mean Gyro_Velocity_x_SD]
Gyro_Velocity_x_test <- fread("./test/Inertial Signals/body_gyro_x_test.txt")
Gyro_Velocity_x_test_mutated <- Gyro_Velocity_x_test %>%
mutate(Gyro_Velocity_x_Mean=apply(.,1,mean),
Gyro_Velocity_x_SD=apply(.,1,sd)) %>%
select(Gyro_Velocity_x_Mean,Gyro_Velocity_x_SD)
# Mutate these two new variables onto the testing_table
testing_table <- testing_table %>%
mutate(Gyro_Velocity_x_Mean=Gyro_Velocity_x_test_mutated$Gyro_Velocity_x_Mean,
Gyro_Velocity_x_SD=Gyro_Velocity_x_test_mutated$Gyro_Velocity_x_SD)
# [Gyro_Velocity_y_Mean & Gyro_Velocity_y_SD]
Gyro_Velocity_y_test <- fread("./test/Inertial Signals/body_gyro_y_test.txt")
Gyro_Velocity_y_test_mutated <- Gyro_Velocity_y_test %>%
mutate(Gyro_Velocity_y_Mean=apply(.,1,mean),
Gyro_Velocity_y_SD=apply(.,1,sd)) %>%
select(Gyro_Velocity_y_Mean,Gyro_Velocity_y_SD)
# Mutate these two new variables onto the
# testing_table
testing_table <- testing_table %>%
mutate(Gyro_Velocity_y_Mean=Gyro_Velocity_y_test_mutated$Gyro_Velocity_y_Mean,
Gyro_Velocity_y_SD=Gyro_Velocity_y_test_mutated$Gyro_Velocity_y_SD)
# [Gyro_Velocity_z_Mean & Gyro_Velocity_z_SD]
Gyro_Velocity_z_test <- fread("./test/Inertial Signals/body_gyro_z_test.txt")
Gyro_Velocity_z_test_mutated <- Gyro_Velocity_z_test %>%
mutate(Gyro_Velocity_z_Mean=apply(.,1,mean),
Gyro_Velocity_z_SD=apply(.,1,sd)) %>%
select(Gyro_Velocity_z_Mean,Gyro_Velocity_z_SD)
# Mutate these two new variables onto the testing_table
testing_table <- testing_table %>%
mutate(Gyro_Velocity_z_Mean=Gyro_Velocity_z_test_mutated$Gyro_Velocity_z_Mean,
Gyro_Velocity_z_SD=Gyro_Velocity_z_test_mutated$Gyro_Velocity_z_SD)
# Read the "activity_labels.txt" file into a
# separate dataframe. This data will be used to add
# a new variable, "Activity", to the testing
# observations dataframe.
activity_label_mapping <- read.table("./activity_labels.txt",header=FALSE,sep="")
# Read the ./test/y_test.txt file (i.e. containing
# the activity code for each observation during the
# testing phase)
activity_codes_during_testing <- read.table("./test/y_test.txt",header=FALSE,sep="")
# Use dplyr's left_join() function to replace the
# activity code in the df
# "activity_codes_during_testing" with the label
# mapped to the activity code in the df
# "activity_label_mapping":
mapped_activity_codes_test <- activity_codes_during_testing %>%
left_join(activity_label_mapping)
# For now, add both the V1 (activity code) and V2
# (activity label) vectors to the "testing_table" df
# -- to check that the mapping is correct. Later,
# will remove the "activity code" column, leaving
# only the "activity label" column.
testing_table <- testing_table %>%
mutate(Activity_Code = mapped_activity_codes_test$V1,
Activity_Label=mapped_activity_codes_test$V2)
# Add the Subject ID data from ./test/subject_test.txt
# as a new column in the "testing_table" df.
# First, read the subject ID data as a separate df
subject_ID_test <- read.table("./test/subject_test.txt",
header=FALSE,
sep="")
# Next, use dplyr's mutate() function again, to add
# this vector as a new column in the "testing_table"
# df
testing_table <- testing_table %>%
mutate(Subject_ID = subject_ID_test$V1)
#
# Repeat the steps above, but for the training
# table data instead
#
# Read the ./train/y_train.txt file (i.e. containing
# the activity code for each observation during the
# training phase)
activity_codes_during_training <- read.table("./train/y_train.txt",
header=FALSE,
sep="")
# Use dplyr's left_join() function to replace the
# activity code in the df
# "activity_codes_during_training" with the label
# mapped to the activity code in the df
# "activity_label_mapping"
mapped_activity_codes_train <- activity_codes_during_training %>%
left_join(activity_label_mapping)
# For now, add both the V1 (activity code) and V2
# (activity label) vectors to the "training_table"
# df -- to check that the mapping is correct.
# Later, will remove the "activity code" column,
# leaving only the "activity label" column.
training_table <- training_table %>%
mutate(Activity_Code = mapped_activity_codes_train$V1,Activity_Label=mapped_activity_codes_train$V2)
# Add the Subject ID data from ./train/subject_train.txt
# as a new column in the "training_table" df
# First, read the subject ID data as a separate df:
subject_ID_train <- read.table("./train/subject_train.txt",
header=FALSE,
sep="")
# Next, use dplyr's mutate() function again, to add
# this vector as a new column in the
# "training_table" df
training_table <- training_table %>%
mutate(Subject_ID = subject_ID_train$V1)
# Now append the testing table to the training
# table, but first, add a variable to both the test
# and train df, indicating which mode, i.e. either
# Test or Train
training_table <- training_table %>%
mutate(Mode="Train")
testing_table <- testing_table %>%
mutate(Mode="Test")
# Now bind the two tables together
combined_table <- bind_rows(training_table,
testing_table)
# [bind_rows() is a dplyr function]
# ...and convert the "Mode" variable from character
# to factor class (because it makes more sense that
# way)
combined_table <- combined_table %>%
mutate_each(funs(as.factor),Mode)
# After debugging, remove the "Activity_Code"
# variable, as it's already been mapped to another
# variable (Activity_Label) that will remain in the
# table. So "Activity_Code" is now redundant.
combined_table <- combined_table %>%
select(-Activity_Code)
# One last step: convert all variable names to
# lowercase, after converting all remaining "." to
# "_"
combined_table <- combined_table %>%
rename_(.dots=setNames(names(.),
gsub("\\.","_",names(.)))) %>%
rename_(.dots=setNames(names(.),tolower(names(.))))
# At this point we have a "base" data set on which
# we can perform grouping, summarising, etc. Now we
# have to do: "From the data set in step 4, creates
# a second, independent tidy data set with the
# average of each variable for each activity and
# each subject."
# This code does it:
combined_table <- combined_table %>%
group_by(activity_label,subject_id) %>%
select(-one_of(c("activity_label","subject_id","mode"))) %>%
summarise_each(funs(mean))
# (For debugging only)
# Wrote out the result to a .CSV file:
# write.csv(combined_table_cleaned_grouped, "combined_tbl_cleaned_grouped_wip.csv")
# Write the "output" tidy data set to a text file.
# (This text file must be submitted separately when
# submitting the course project work.)
write.table(combined_table, "tidy_data_set.txt", row.names = FALSE)
# Return the dataframe after all processing steps
# are complete
combined_table
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.