blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e81f7850a4886b57ddb2c4b1ca5c929818bc134
|
3cebb834ac72ecf720c9a0a229a2ede27bb1ed3a
|
/cleaning/merge.r
|
745dd298ed05cde9b6e0f53d09416d1cc2e14f46
|
[] |
no_license
|
mpsanchis/airquality-alpha
|
e1e9ff5220dc5312c35686f822892cf734d6b9bf
|
fe4a7296d045462548508b7041b1681fb2d30a91
|
refs/heads/master
| 2021-05-11T14:32:02.394943
| 2018-01-24T11:32:02
| 2018-01-24T11:32:02
| 117,703,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,522
|
r
|
merge.r
|
# FUNCTION: this code joined the data from the two files in "join.R" to one file: "full_data.csv"
#
# ORIGINAL FILES: 'obs_data.csv' and 'mod_data.csv'
# WORKING DIRECTORY ----------------------------------
setwd("~/Documents/R/airquality-alpha")
# LOAD PACKAGES --------------------------------------
library(data.table)
# LOAD DATA ------------------------------------------
obs_data = data.table(read.csv('data/obs_data.csv', stringsAsFactors = F))
mod_data = data.table(read.csv('data/mod_data.csv', stringsAsFactors = F))
stations = data.table(read.csv('data/stations.csv', stringsAsFactors = F))
# MERGE DATA
mod_data = merge(mod_data, stations[,.(code, lat, lon, height)], by=c('lat','lon'), all.x=T)
obs_data[, DateBegin := sapply(DatetimeBegin, function(x){return(unlist(strsplit(x,' '))[1])})]
obs_data[, TimeBegin := sapply(DatetimeBegin, function(x){return(unlist(strsplit(x,' '))[2])})]
obs_data[, DateEnd := sapply(DatetimeEnd, function(x){return(unlist(strsplit(x,' '))[1])})]
obs_data[, TimeEnd := sapply(DatetimeEnd, function(x){return(unlist(strsplit(x,' '))[2])})]
setnames(obs_data, 'Concentration', 'ConcentrationObs')
mod_data[,AirPollutant := NULL]
data = merge(obs_data, mod_data, by.x=c('AirQualityStationEoICode','DateBegin','TimeBegin'), by.y=c('code','day','hour'), all.y=T)
data[,`:=`(lat=NULL,lon=NULL,height=NULL)]
data = merge(data, stations[,.(code,lat,lon,height)], by.x='AirQualityStationEoICode', by.y='code', all.x=T)
write.csv(data, 'data/full_data.csv', row.names = F)
|
afd15789468955729d9c35e38dda88514c511048
|
bc43cfc66bf4508f26682b1d9bf0bd29f219a3cc
|
/r_code/results10k005.R
|
c54a5b50a384cdf07a413de187aa7ef480133706
|
[] |
no_license
|
fernote7/thesis
|
0a7b856477de9c5beeb2fedbd98af926d10ef719
|
c79684f4a4c1443ddb79aaa89f151066b99b30cc
|
refs/heads/master
| 2021-03-23T11:35:57.911382
| 2017-12-08T18:20:29
| 2017-12-08T18:20:29
| 81,347,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 674
|
r
|
results10k005.R
|
load("./r_code/dados/resulteadi10k.Rda")
resulteadi <-result
load("./r_code/dados/resulteuler10k.Rda")
resulteuler <-result
load("./r_code/dados/resultkj10k.Rda")
resultkj <-result
rm(result)
cfhest = c(14.176)
par(mfrow= c(3,1), oma=c(0,0,0,0), mar=c(3,4,1,2))
hist(resulteuler, breaks = 100, main = "", col = "grey", las=1, xlab = "Euler")
abline(v=cfhest, col='red')
box()
mtext("Euler", cex = 0.6)
hist(resultkj, breaks = 100, main = "", col = "grey", las=1, xlab = "KJ")
abline(v=cfhest, col='red')
box()
mtext("KJ", cex = 0.6)
hist(resulteadi, breaks = 100, main = "", col = "grey", las=1, xlab = "EA-DI")
abline(v=cfhest, col='red')
box()
mtext("EA-DI", cex = 0.6)
|
e9fa838609d8bcaced272a547cfecaeaa0920f58
|
a23b0cca20c9d0d6f0def3c8d4a51d486b690af4
|
/man/RFsim.Rd
|
6c4eaf9b6fe08dc034c39e04ce6bb89e19a4ae41
|
[] |
no_license
|
cran/CompRandFld
|
7cb6c7cd9fdb696f089d019bec0003ad07ec2703
|
6705a1498c0a23fff027e0394d35b152e3e9ddd4
|
refs/heads/master
| 2021-05-15T01:38:00.698452
| 2020-01-10T04:30:45
| 2020-01-10T04:30:45
| 17,678,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,809
|
rd
|
RFsim.Rd
|
\name{RFsim}
\alias{RFsim}
\alias{print.RFsim}
\title{Simulation of Gaussian, Binary and Max-stable Random Fields}
\description{
Simulation of spatial and spatio-temporal Gaussian, binary and max-stable random fields.
The function returns one or more replications of a random field for a given covariance model and covariance parameters.
}
\usage{
RFsim(coordx, coordy=NULL, coordt=NULL, corrmodel, distance="Eucl",
grid=FALSE, model='Gaussian', numblock=NULL, param,
replicates=1, threshold=NULL)
}
\arguments{
\item{coordx}{A numeric (\eqn{d \times 2}{d x 2})-matrix (where
\code{d} is the number of spatial sites) giving 2-dimensions of spatial coordinates or a numeric \eqn{d}{d}-dimensional vector giving
1-dimension of spatial coordinates.}
\item{coordy}{A numeric vector giving 1-dimension of
spatial coordinates; \code{coordy} is interpreted only if \code{coordx} is a numeric
vector or \code{grid=TRUE} otherwise it will be ignored. Optional argument, the default is \code{NULL} then \code{coordx} is expected to
be numeric a (\eqn{d \times 2}{d x 2})-matrix.}
\item{coordt}{A numeric vector giving 1-dimension of
temporal coordinates. At the moment implemented only for the
Gaussian case. Optional argument, the default is \code{NULL}
then a spatial random field is expected.}
\item{corrmodel}{String; the name of a correlation model, for the
description see the Section \bold{Details}.}
\item{distance}{String; the name of the spatial distance. The default
is \code{Eucl}, the euclidean distance. See the Section
\bold{Details} of \code{\link{FitComposite}}.}
\item{grid}{Logical; if \code{FALSE} (the default) the data
are interpreted as spatial or spatial-temporal realisations on a set
of non-equispaced spatial sites (irregular grid).}
\item{model}{String; the type of random field and therefore the densities associated to the likelihood
objects. \code{Gaussian} is the default, see the Section
\bold{Details}.}
\item{numblock}{Numeric; the observation size of the underlying random
field. Only in case of max-stable random fields.}
\item{param}{A list of parameter values required in the
simulation procedure of random fields, see \bold{Examples}.}
\item{replicates}{Numeric; a positive integer denoting the number of independent and identically distributed (iid)
replications of a spatial or spatial-temporal random field. Optional argument, the default value is \eqn{1} then
a single realisation is considered.}
\item{threshold}{Numeric; a value indicating a threshold for the
binary random field. Optional in the case that \code{model}
is \code{BinaryGauss}, see the Section \bold{Details}.}
}
\details{
Note that this function is also interfaced to the \bold{R} package \bold{RandomFields},
using fast routines therein developed for the simulation of random fields.
}
\value{
Returns an object of class \code{RFsim}.
An object of class \code{RFsim} is a list containing
at most the following components:
\item{coordx}{A \eqn{d}{d}-dimensional vector of spatial coordinates;}
\item{coordy}{A \eqn{d}{d}-dimensional vector of spatial coordinates;}
\item{coordt}{A \eqn{t}{t}-dimensional vector of temporal coordinates;}
\item{corrmodel}{The correlation model; see \code{\link{Covmatrix}}.}
\item{data}{The vector or matrix or array of data, see
\code{\link{FitComposite}};}
\item{distance}{The type of spatial distance;}
\item{model}{The type of random field, see \code{\link{FitComposite}}.}
\item{numcoord}{The number of spatial coordinates;}
\item{numtime}{The number the temporal realisations of the random field;}
\item{param}{The vector of parameters' estimates;}
\item{randseed}{The seed used for the random simulation;}
\item{replicates}{The number of the iid replicatations of the random field;}
\item{spacetime}{\code{TRUE} if spatio-temporal and \code{FALSE} if
spatial random field;}
\item{threshold}{The threshold for deriving the binary random field.}
}
\references{
Padoan, S. A. and Bevilacqua, M. (2015). Analysis of Random Fields Using CompRandFld.
\emph{Journal of Statistical Software}, \bold{63}(9), 1--27.
}
\seealso{\code{\link{Covmatrix}}}
\author{Simone Padoan, \email{simone.padoan@unibocconi.it},
\url{http://faculty.unibocconi.it/simonepadoan};
Moreno Bevilacqua, \email{moreno.bevilacqua@uv.cl},
\url{https://sites.google.com/a/uv.cl/moreno-bevilacqua/home}.}
\examples{
library(CompRandFld)
library(RandomFields)
library(mapproj)
library(fields)
################################################################
###
### Example 1. Simulation of a Gaussian random field.
### Gaussian random fields with Whittle-Matern correlation.
### One spatial replication.
###
###
###############################################################
# Define the spatial-coordinates of the points:
x <- runif(500, 0, 2)
y <- runif(500, 0, 2)
set.seed(261)
# Simulation of a spatial Gaussian random field:
data <- RFsim(x, y, corrmodel="matern", param=list(smooth=0.5,
mean=0,sill=1,scale=0.2,nugget=0))$data
################################################################
###
### Example 2. Simulation of a binary random field based on
### the latent Gaussian random field with exponential correlation.
### One spatial replication on a regular grid
###
###
###############################################################
# Define the spatial-coordinates of the points:
x <- seq(0, 1, 0.05)
y <- seq(0, 1, 0.05)
set.seed(251)
# Simulation of a spatial binary random field:
sim <- RFsim(x, y, corrmodel="exponential", grid=TRUE,
model="BinaryGauss", threshold=0,
param=list(nugget=0,mean=0,scale=.1,sill=1))
image(x,y,sim$data,col=terrain.colors(100))
################################################################
###
### Example 3. Simulation of a max-stable random
### extremal-t type with exponential correlation.
### One spatial replication on a regular grid
###
###
###############################################################
set.seed(341)
x <- seq(0, 1, 0.02)
y <- seq(0, 1, 0.02)
# Simulation of a spatial binary random field:
sim <- RFsim(x, y, corrmodel="exponential", grid=TRUE, model="ExtT",
numblock=500, param=list(nugget=0,mean=0,scale=.1,
sill=1,df=5))
image.plot(x,y,log(sim$data))
################################################################
###
### Example 4. Simulation of a Gaussian random field.
### with double exponential correlation.
### One spatio-temporal replication.
###
###
###############################################################
# Define the spatial-coordinates of the points:
x <- seq(0, 1, 0.1)
y <- seq(0, 1, 0.1)
# Define the temporal-coordinates:
times <- seq(1, 3, 1)
#
# Simulation of a spatial Gaussian random field:
sim <- RFsim(x, y, times, corrmodel="exp_exp", grid=TRUE,
param=list(nugget=0,mean=0,scale_s=0.3,
scale_t=0.5,sill=1))$data
# Spatial simulated data at first temporal instant
sim[,,1]
################################################################
###
### Example 5. Simulation of a Gaussian random field
### with exponential correlation on a portion of the earth surface
### One spatial replication.
###
###
###############################################################
lon_region<-c(-40,40)
lat_region<-c(-40,40)
#
lon<-seq(min(lon_region),max(lon_region),2)
lat<-seq(min(lat_region),max(lat_region),2)
#
data<-RFsim(coordx=lon,coordy=lat,corrmodel="exponential",
distance="Geod",grid=TRUE,param=list(nugget=0,mean=0
,scale=8000,sill=1))$data
image.plot(lon,lat,data,xlab="Longitude",ylab="Latitude")
map(database="world",xlim=lon_region,ylim=lat_region,add=TRUE)
}
\keyword{Simulation}
|
7b9bb860497bd8f1567ebaebcd4df192b8708c00
|
af3c3bef72e78cbee524814229539ed9c4b81c68
|
/R/symptoms_scripts/sympt_positive_age_band.R
|
06d11d3afe7a26e1c00f48417c1b0c6130fd4eb4
|
[] |
no_license
|
gabrielburcea/cvindia
|
fbb2092fc3b1d3131064cb085f1b7754d5fe24c5
|
3307704674e4bd407b5fea3bc3d7141b90ee0a80
|
refs/heads/master
| 2023-03-02T15:09:34.665456
| 2021-02-15T10:51:18
| 2021-02-15T10:51:18
| 261,289,679
| 0
| 1
| null | 2020-05-15T15:30:05
| 2020-05-04T20:34:14
|
R
|
UTF-8
|
R
| false
| false
| 3,190
|
r
|
sympt_positive_age_band.R
|
#' sympt_positive_age_band
#'
#' @param data a dataset that contains ids, showing symptoms of covid, and symptoms
#' @param start_date a min date passed in as.Date
#' @param end_date a max data passed as.Date
#' @param plot_chart if TRUE then give me a chart, if FALSE then give me a table with symptoms accross age, with counts and percentages
#'
#' @return
#' @export
#'
#' @examples
sympt_positive_age_band <- function(data, start_date = as.Date("2020-04-19"), end_date = as.Date("2020-09-01"), plot_chart = TRUE) {
symptoms_cov_age_band <- data %>%
dplyr::select(id, covid_tested, age_band, chills, cough, diarrhoea, fatigue, headache, loss_smell_taste, muscle_ache, nasal_congestion, nausea_vomiting,
shortness_breath, sore_throat, sputum, temperature, loss_appetite, chest_pain, itchy_eyes, joint_pain) %>%
tidyr::drop_na()
gather_divided <- symptoms_cov_age_band %>%
tidyr::pivot_longer(cols= 4:20, names_to="symptoms", values_to="yes_no") %>%
dplyr::filter(age_band != "0-19" & covid_tested != "negative") %>%
dplyr::group_by(age_band, symptoms, covid_tested, yes_no) %>%
dplyr::summarise(count=n()) %>%
dplyr::mutate(percentage= count/sum(count) *100) %>%
dplyr::filter(yes_no !="No") %>%
dplyr::arrange(desc(percentage))
gather_divided$age_band <- as.factor(gather_divided$age_band)
gather_divided$symptoms <- as.factor(gather_divided$symptoms)
gather_divided$covid_tested <- as.factor(gather_divided$covid_tested)
gather_divided$percentage <- round(gather_divided$percentage, digits = 1)
gather_divided$yes_no <- NULL
start_date = as.Date("2020-04-19")
end_date = as.Date("2020-09-01")
# title_stub <- "SARS-Covid-19 symptoms across age band\n"
# start_date_title <- format(as.Date(start_date), format = "%d %B %Y")
# end_date_title <- format(as.Date(end_date), format = "%d %B %Y")
# chart_title <- paste0(title_stub, start_date_title, " to ", end_date_title)
#
#
# sympt_show_age_band <-
# ggplot2::ggplot(gather_divided, ggplot2::aes(x = reorder(symptoms, - count), count, fill = age_band)) +
# ggplot2::geom_col(ggplot2::aes(colour = age_band), width = 0.9 ) +
# ggplot2::coord_flip() +
# ggplot2::scale_fill_brewer(palette = 'Reds') +
# ggplot2::theme_bw() +
# ggplot2::labs(title = chart_title,
# subtitle = "Symptoms accross age band",
# y = "Count", x = "Symptoms", caption = "Source: Your.md Dataset, Global Digital Health") +
# ggplot2::theme(axis.title.y = ggplot2::element_text(margin = ggplot2::margin(t = 0, r = 21, b = 0, l = 0)),
# plot.title = ggplot2::element_text(size = 10, face = "bold"),
# plot.subtitle = ggplot2::element_text(size = 9),
# legend.position = "bottom", legend.box = "horizontal",
# axis.text.x = ggplot2::element_text(angle = 55, hjust = 1))
#
#
# sympt_show_age_band
if(plot_chart == TRUE){
sympt_show_age_band
}else{
gather_divided_numbers <- gather_divided %>%
dplyr::arrange(desc(count))
gather_divided_numbers
}
}
|
e1a87ca5267c6ec253456beea78a8a9644b27905
|
c454bb95b38915faf2fdb4bf55b8bf50b88b251f
|
/man/process_eset.Rd
|
d9fbe473886549a0aa101e48387e0ad712675ea9
|
[] |
no_license
|
frankRuehle/systemsbio
|
3faceeead1f8def591316dc63b7f2dca54461d9e
|
533341be6c314b1216d8ab5920360961af44aa82
|
refs/heads/master
| 2021-07-03T12:41:05.583964
| 2020-09-10T06:43:53
| 2020-09-10T06:43:53
| 61,125,514
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,226
|
rd
|
process_eset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GEX_process_eset.R
\name{process_eset}
\alias{process_eset}
\title{Processing of expression data}
\usage{
process_eset(eset, method_norm = "quantile", transform = "none")
}
\arguments{
\item{eset}{ExpressionSet or ExpressionSetIllumina}
\item{method_norm}{character with normalisation method. Options are "quantile", "qspline", "vsn", "rankInvariant", "median" and "none"}
\item{transform}{character with data transformation method. Options are "none", "log2", "neqc", "rsn" and "vst".}
}
\value{
processed ExpressionSet object
}
\description{
Normalisation, transformation and/or probe quality filtering of expression data.
}
\details{
Expression data is normalised and/or transformed by algorithms dedicated in 'method_norm' and/or 'transform'.
If a column \code{PROBEQUALITY} is available within the feature data as for Illumina expression arrays,
probes assigned a `Bad' or `No match' quality score after normalisiation are removed from the dataset.
Lastly, if the input object is of class \code{ExpressionSetIllumina}, it is changed to \code{ExpressionSet}
to avoid later incompabilities with the 'limma'-package.
}
\author{
Frank Ruehle
}
|
ad25cbea2737b67062978d49d6f3e124842ea20b
|
b39ce967048b4c8846b7d7bfe179780f0860a942
|
/HW2/lan_tian_ps2_task5.R
|
5dc781fc21af34131a99e969c6c2d9177bc05fa9
|
[] |
no_license
|
lantian2012/STAT221
|
dd824ad7471d7a5b94de0b7ed47821b3a138dbaf
|
9a671ee2ba77dc3634832827ff32c8a2586107e5
|
refs/heads/master
| 2016-09-05T15:12:43.952807
| 2014-11-21T18:43:51
| 2014-11-21T18:43:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,573
|
r
|
lan_tian_ps2_task5.R
|
#PSet 2 Task 4
args <- as.numeric(commandArgs(trailingOnly = TRUE))
if(length(args) != 1) {
args[1] = 1
}
job.id = args[1]
source("poissonLogN_MCMC.R")
source("Lan_Tian_ps2_functions.R")
source("rASL.R")
w = read.table('weights.txt')[[1]]
select = job.id%%4
if (select == 0)
select = 4
x0 = c(1.6, 1.6, 1.6, 1.6)
m = c(0, -0.7, 0.7, 0)
b = c(1.3, 1.3, 1.3, 2.6)
x0 = x0[select]
m = m[select]
b = b[select]
Ntheta = 3 #number of theta drawn
Ny = 40 #number of Y drawn from each theta
J = 1000
coverage95 = matrix(nrow = J, ncol = Ntheta)
coverage68 = matrix(nrow = J, ncol = Ntheta)
logtheta = matrix(nrow = J, ncol = Ntheta)
means = matrix(nrow = J, ncol = (Ntheta*Ny))
stds = matrix(nrow = J, ncol = (Ntheta*Ny))
for (nt in 1:Ntheta){
logtheta0 = rASL(J, x0, m, b)
Covered95 = matrix(nrow=J, ncol=Ny)
Covered68 = matrix(nrow=J, ncol=Ny)
theta0 = exp(logtheta0)
for (ny in 1:Ny){
Y = simYgivenTheta(theta0, w, 2)
postlogtheta = poisson.logn.mcmc(Y, w)[["logTheta"]]
Covered95[,ny] = isCovered95(postlogtheta, logtheta0)
Covered68[,ny] = isCovered68(postlogtheta, logtheta0)
means[, (nt-1)*Ny+ny] = rowMeans(postlogtheta)
stds[, (nt-1)*Ny+ny] = apply(postlogtheta, 1, sd)
}
coverage95[,nt] = rowSums(Covered95)/Ny
coverage68[,nt] = rowSums(Covered68)/Ny
logtheta[,nt] = logtheta0
}
#logtheta a matrix of logtheta, each column 25 sim combined
#coverage95 a matrix, each column the coverage by 25 sim combined
save(job.id, logtheta, coverage95, coverage68, means, stds, file=sprintf("out/task5_coverage_%d.rda", job.id))
|
b4bf040db4b2b44d2fe0e42e9d714b89fabb7fe5
|
dffc96de2021301fd93ccdd91bcfd912f95be448
|
/Atividades_1,2/Script_atv1,2.R
|
a3e7a278674a064e79bc0bb1d5a64a4e9ec33d64
|
[] |
no_license
|
CarlosFilgueira/ciencia_colab
|
fe25db0e0c7b752562f23fce8c2275a7a29b1d19
|
40aa3457cce1dae1d0a08ae69d916aa59972d3f3
|
refs/heads/master
| 2023-08-20T10:31:45.789958
| 2021-10-14T05:17:55
| 2021-10-14T05:17:55
| 416,744,453
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,622
|
r
|
Script_atv1,2.R
|
###Disciplina de Ciencia Colaborativa###
##Atividades 01 e 02##
## ATIVIDADE 01 - Planilhamento de Dados ##
#Carregando Pacotes
library(tidyverse)
library(taxize)
library(vegan)
library(rgbif)
library(dplyr)
#Montar planilha de dados
data.frame(
participante = seq(1, 10, 1),
fichas = c(read.csv("~/github/cursodados/data/iris_mod.csv", header = T) %>%
distinct("A116, A25, A69, A114, A21, A113, A95, A94, A26, A10, A136, A129, A147, A79, A115") %>%
pull()%>%
sample()),
n = 'amostras'
) %>%
pivot_wider(
names_from = "n",
values_from = "fichas"
) %>%
knitr::kable()
?bind_rows()
plan1 <- read.csv("atividade1_carlos_filgueira.csv", header = TRUE)
plan2 <- read.csv("atividade1_CAROLINA-OLIVEIRA-VALE.csv", header = TRUE)
plan3 <- read.csv("atividade1_GABRIEL-DEPIANTTI.csv", header = TRUE)
plan4 <- read.csv("atividade1_GUSTAVO_VIANA.csv", header = TRUE)
plan5 <- read.csv("atividade1_ISABELLA-FERREIRA.csv", header = TRUE)
plan6 <- read.csv("atividade1_marcosdelucena.csv", header = TRUE)
plan7 <- read.csv("atividade1_marinasissini.csv", header = TRUE)
plan8 <- read.csv("atividade1_NILSON-BERRIEL.csv", header = TRUE)
plan9 <- read.csv("atividade1_pedrozau.csv", header = TRUE)
plan10 <- read.csv("atividade1_Vanessa Xavier.csv", header = TRUE)
bdados <- bind_rows(plan1, plan2, plan5, plan6, plan7, plan8, plan9, plan10)
## Utilizei a função 'bind_rows' para unir todas as planilhas. Ocorreu algum erro na plan4, ainda preciso verificar e corrigir para poder acrescentar ela na análise;
## As demais planilhas foram adicionadas no arquivo 'bdados', totalizando 137 obs. e 8 variaveis.
## ATIVIDADE 02 - Criando arquivos no sistema eMOF ##
iris1 <- read.csv("iris_mod.csv", header = T)
lapply(iris, unique)
str(bdados)
str(iris1)
## Irei utilizar os dados cedidos pelo professor para a realização da Atividade 2, porque os dados unificados não estão organizados da forma adequada.
##Checando os taxons do banco de dados 'iris1'
# check taxa
species <- iris1 %>%
distinct(Species) %>%
pull() %>%
get_tsn() %>%
data.frame() %>%
bind_cols(iris %>%
distinct(Species))
== 3 queries ===============
Retrieving data for taxon 'Iris setosa'
v Found: Iris setosa
Retrieving data for taxon 'Iris versicolor'
v Found: Iris versicolor
Retrieving data for taxon 'Iris virginica'
v Found: Iris virginica
== Results =================
* Total: 3
* Found: 3
* Not Found: 0
##Manipulação dos dados
iris_1 <- iris1 %>%
dplyr::mutate(eventID = paste(site, date, sep = "_"), # Criar campos de indexação
occurrenceID = paste(site, date, amostra, sep = "_")) %>%
left_join(species %>%
select(Species, uri)) %>% # Add identificador unico da especie
dplyr::rename(decimalLongitude = lon, # Renomear campos de acordo com DwC
decimalLatitude = lat,
eventDate = date,
scientificName = Species,
scientificNameID = uri) %>%
mutate(geodeticDatum = "WGS84", # adicionar campos complementares
verbatimCoordinateSystem = "decimal degrees",
georeferenceProtocol = "Random coordinates obtained from Google Earth",
locality = "Gaspe Peninsula",
recordedBy = "Edgar Anderson",
taxonRank = "Species",
organismQuantityType = "individuals",
basisOfRecord = "Human observation")
## Planilhas do eMOF
## create eventCore
eventCore <- iris_1 %>%
select(eventID, eventDate, decimalLongitude, decimalLatitude, locality, site,
geodeticDatum, verbatimCoordinateSystem, georeferenceProtocol) %>%
distinct()
## create occurrence
occurrences <- iris_1 %>%
select(eventID, occurrenceID, scientificName, scientificNameID,
recordedBy, taxonRank, organismQuantityType, basisOfRecord) %>%
distinct()
## create measurementsOrFacts
eMOF <- iris_1 %>%
select(eventID, occurrenceID, recordedBy, Sepal.Length:Petal.Width) %>%
pivot_longer(cols = Sepal.Length:Petal.Width,
names_to = "measurementType",
values_to = "measurementValue") %>%
mutate(measurementUnit = "cm",
measurementType = plyr::mapvalues(measurementType,
from = c("Sepal.Length", "Sepal.Width", "Petal.Width", "Petal.Length"),
to = c("sepal length", "sepal width", "petal width", "petal length")))
##Controle de qualidade
# check if all eventID matches
setdiff(eventCore$eventID, occurrences$eventID)
#character(0)
setdiff(eventCore$eventID, eMOF$eventID)
#character(0)
setdiff(occurrences$eventID, eMOF$eventID)
#character(0)
# check NA values
eMOF %>%
filter(is.na(eventID))
# A tibble: 0 x 6
# ... with 6 variables: eventID <chr>, occurrenceID <chr>, recordedBy <chr>,
# measurementType <chr>, measurementValue <dbl>, measurementUnit <chr>
occurrences %>%
filter(is.na(eventID))
[1] eventID occurrenceID scientificName
[4] scientificNameID recordedBy taxonRank
[7] organismQuantityType basisOfRecord
<0 linhas> (ou row.names de comprimento 0)
##Escrever as matrizes como arquivos de texto
rm(list = setdiff(ls(), c("eventCore", "occurrences", "eMOF")))
files <- list(eventCore, occurrences, eMOF)
data_names <- c("DF_eventCore","DF_occ","DF_eMOF")
dir.create("Dwc_Files")
for(i in 1:length(files)) {
path <- paste0(getwd(), "/", "DwC_Files")
write.csv(files[[i]], paste0(path, "/", data_names[i], ".csv"))
}
|
491652508ccaaa1730abd0cfaee68aa69fba0255
|
a55a9fe01bc8db5ac8ab4242a5a27a5946d20181
|
/ui.R
|
96984072b8357faeb8f6e77ea0043888e0c897e3
|
[] |
no_license
|
azeroz/ddp1
|
ca42912d5fc91ee62d46010a90a5d5a4db742a12
|
b0de163f7d57048604cee2d49802a5f8715fb726
|
refs/heads/master
| 2021-05-28T21:04:32.936594
| 2015-06-21T23:29:14
| 2015-06-21T23:29:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,108
|
r
|
ui.R
|
shinyUI(pageWithSidebar(
headerPanel("Linear Regression with mtcars"),
sidebarPanel(
h6(paste("This application uses the mtcars dataset to investigate the effects of",
"multiple variables on the linear regression model. Selected variables",
"below will be included in the formula used to build the linear regression",
"model. Performance of the model is displayed along with a quantile plot.")),
a("https://github.com/marckr/ddp1"),
p(),
checkboxGroupInput("variable", "Predictor Variable:",
c("Transmission" = "am",
"Weight" = "wt",
"Cylinders" = "cyl",
"Displacement" = "disp",
"Horsepower" = "hp"))
),
mainPanel(
div(
strong("F Statistic: "),
textOutput("fstatistic")),
br(),
div(
strong("Sigma: "),
textOutput("sigma")),
br(),
div(
strong("R Squared: "),
textOutput("rsquared")),
plotOutput('residualPlot')
)
))
|
001d14d5b28e5f65ca21cf755629fbdd614c8d26
|
fb717eec1432650b78e989ad1b6969ba37e9a6b6
|
/Rpack/man/profiles.Rd
|
3d4da50d579061d5bbad8f8ad287d6dcac33bc20
|
[] |
no_license
|
traets/IASB
|
116a4c60f3d1213f383d95739448e75dcbe038d4
|
bbba998a358c2a3690585f1fe78d2fe436f8a053
|
refs/heads/master
| 2021-06-16T07:49:17.388140
| 2017-04-24T12:39:35
| 2017-04-24T12:39:35
| 62,038,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 746
|
rd
|
profiles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generation.R
\name{profiles}
\alias{profiles}
\title{Profiles generation}
\usage{
profiles(lvls, coding, intercept = FALSE)
}
\arguments{
\item{lvls}{A vector which contains for each attribute, the number of levels.}
\item{coding}{Type op coding that need to be used. See ?contrasts for more information.}
\item{intercept}{Logical argument indicating whether an intercept should be included. The Default is False.}
}
\value{
A list containing 2 matrices, one contains all possible profiles with discrete levels, the other contains the coded version.
}
\description{
Function to generate all possible combinations of attribute levels (i.e. all possible profiles).
}
|
fc653280f02653c88da5b35329cfe5d266e3fb3c
|
54ff210f8313184137e68f4a337c1b0bfd74af56
|
/man/r6_extract_methods.Rd
|
ce6eb77c9e0d998dc2bbc740cee6888b0b2dd849
|
[
"MIT"
] |
permissive
|
petermeissner/db6
|
fdcf3fac8ffe29d36bf983ba14b67f406a86e352
|
fbff81c8c7a319efc8d84127c18d8952d4aa39ac
|
refs/heads/master
| 2020-03-27T01:54:39.952645
| 2018-10-31T15:34:32
| 2018-10-31T15:34:32
| 145,753,015
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 334
|
rd
|
r6_extract_methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r6_extract_methods.R
\name{r6_extract_methods}
\alias{r6_extract_methods}
\title{r6_extract_methods}
\usage{
r6_extract_methods(r6)
}
\arguments{
\item{r6}{R6 object instance}
}
\description{
Extract public methods from R6 object instances
}
|
de2dcbabdce8ccd88c7f77b961af712b3881756a
|
b22595c1f5903877cf6d6c13ece977f246aa6bdd
|
/Plot2.R
|
d81992c24f2efee48d9af0bc0491e9936123a9d0
|
[] |
no_license
|
mw179/ExData_Plotting1
|
99952db216a4963478c67cdb5a60d5ff5a7b4f9e
|
1dff15eccbe1e5e1f13805e5bd88f6b8641c5ed9
|
refs/heads/master
| 2021-01-22T15:11:21.426085
| 2015-06-07T23:36:38
| 2015-06-07T23:36:38
| 37,034,540
| 0
| 0
| null | 2015-06-07T22:22:48
| 2015-06-07T22:22:48
| null |
UTF-8
|
R
| false
| false
| 584
|
r
|
Plot2.R
|
#this is a large dataset, and may take a long time to load
dat <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
#isolate date range
dates <- as.Date(dat$Date, "%d/%m/%Y")
start_date <- as.Date("2007-02-01", "%Y-%m-%d")
end_date <- as.Date("2007-02-02", "%Y-%m-%d")
date_range <- dates >= start_date & dates <= end_date
dataset <- dat[date_range,]
#PLOT 2
plot.new()
png(filename="plot2.png", width=480, height=480)
plot(dataset$Time, as.numeric(dataset$Global_active_power), type="l", ylab="Global Active Power (kilowatts)")
title(main="Plot 2")
dev.off()
|
08fcc40a5e9433af5a8c9943b46a808863644033
|
592a15eb25edb66fff4a938d3a20969aa8d2e3aa
|
/man/NIRpaarorDataSQL.Rd
|
0ceaf2810fb719012403dc900417a9f26de2e3ca
|
[] |
no_license
|
Rapporteket/intensiv
|
a6accf24600e47cded5065a988f1192e5ba62c14
|
544b6b120efa17da041eba1f53a986712f648ec3
|
refs/heads/rel
| 2023-07-19T03:01:43.656550
| 2023-07-12T09:38:42
| 2023-07-12T09:38:42
| 60,067,715
| 0
| 0
| null | 2023-07-06T05:53:31
| 2016-05-31T07:30:48
|
R
|
UTF-8
|
R
| false
| true
| 731
|
rd
|
NIRpaarorDataSQL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NIRpaarorDataSQL.R
\name{NIRpaarorDataSQL}
\alias{NIRpaarorDataSQL}
\title{Henter data fra pårørendeskjema registrert for Intensiv og kobler til
noen variabler fra hovedskjema.}
\usage{
NIRpaarorDataSQL(datoFra = "2015-12-01", datoTil = Sys.Date(), medH = 0)
}
\arguments{
\item{datoFra}{Tidligste dato i utvalget (vises alltid i figuren). Standard 2019-01-01.
Registeret inneholder registreringer f.o.m. 2011}
\item{datoTil}{Seneste dato i utvalget (vises alltid i figuren).}
\item{medH}{kobler på variabler fra hovedskjema}
}
\value{
Henter dataramma RegData for Intensivregisteret
}
\description{
Henter data for Intensivregisterets database
}
|
e6111ff3a599ebe915040c4da4ba2011b83ef6f2
|
2b7562a55a2a7176402d832795db563ded09d80d
|
/cachematrix.R
|
370894e0464735b22c1d8bdf10d978f4dad7a94f
|
[] |
no_license
|
Vital95/ProgrammingAssignment2
|
e8369f75fdc5726e260fc5c6f7354391760e9197
|
d3eda4a9c18a03ac057c6c2a65261783db903158
|
refs/heads/master
| 2021-01-19T01:15:16.907475
| 2017-04-04T21:51:26
| 2017-04-04T21:51:26
| 87,233,785
| 0
| 0
| null | 2017-04-04T20:49:52
| 2017-04-04T20:49:52
| null |
UTF-8
|
R
| false
| false
| 1,384
|
r
|
cachematrix.R
|
## This couple of functions allows to efficiently compute inverse matrix
## with ability to retrieve cached data if input is the same
## How to use:
## > matrix
## [,1] [,2]
## [1,] 5 1
## [2,] 4 8
## > newMatrix <- makeCacheMatrix(matrix)
## > solve<- cacheSolve(newMatrix)
## > solve
## [,1] [,2]
## [1,] 0.2222222 -0.02777778
## [2,] -0.1111111 0.13888889
## > class(solve)
## [1] "matrix"
## > solve<- cacheSolve(newMatrix)
## Getting cached data
## > solve
## [,1] [,2]
## [1,] 0.2222222 -0.02777778
## [2,] -0.1111111 0.13888889
## Creates a list with set/get parameters that
## help distinguish if matrix needs to be
## calculated or take data from cache if cache is not empty
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get, setInverse = setInverse,
getInverse = getInverse)
}
## Computes inverse of the first matrix and puts data in cache
## return cached data if same matrix have entered again
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m))
{
message("Getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
a01360f53011c90d1602ef198c0097cc5a6282fb
|
c680fe4504ef675532f088c610b0437bfdb7cf6a
|
/cachematrix.R
|
63a2705042d3a47e217acd3c52d1dc9f2cc4ba94
|
[] |
no_license
|
angelalegaspi/ProgrammingAssignment2
|
92397c128e28dce322444b48d1a68f384bf0deb7
|
ad64c0d79910dcecd70c259375212ba0e1defc67
|
refs/heads/master
| 2021-08-29T12:21:44.013715
| 2017-12-13T23:55:38
| 2017-12-14T00:17:05
| 113,756,072
| 0
| 0
| null | 2017-12-10T14:21:10
| 2017-12-10T14:21:10
| null |
UTF-8
|
R
| false
| false
| 885
|
r
|
cachematrix.R
|
##This is a list of functions
makeInverse <- function(x = matrix()) {
i <- NULL #Sets the inverse to NULL
set <- function(y) {
x <<- y ##Saves called matrix outside present environment
i <<- NULL #Reset inverse to null every time called matrix is changed
}
get <- function() {x} ##Returns called matrix
setsolve <- function(solve) {i <<- solve} ##Saves inverse outside present environment
getsolve <- function() {i} ##Returns the Inverse
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
cachesolve <- function(x, ...) {
i <- x$getsolve() #Retrieves the inverse
if(!is.null(i)) {
message("retrieving inverse")
return(i) ##If i is not null then return
}
data <- x$get() #Called matrix is retrieved
i <- solve(data, ...) #Inverse is computed
x$setsolve(i) #and cached in list
i #return to user
}
|
d9449745b66a7e0685dc4f53b70bf6912ef3bd51
|
edf49cec9bf1872d8855113c106b4fc5900bdfed
|
/cachematrix.R
|
3e30e048222fbb37e2e155cd9403e009dd41ca1b
|
[] |
no_license
|
lc19940813/ProgrammingAssignment2
|
5e5ab0290b47512f1d22ec67657a24ccff9c9351
|
a802a64ad36d40a717b807c0dad8a5ab083873f0
|
refs/heads/master
| 2021-01-15T22:00:33.287124
| 2016-02-28T08:02:53
| 2016-02-28T08:02:53
| 52,659,126
| 0
| 0
| null | 2016-02-27T09:19:44
| 2016-02-27T09:19:44
| null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
cachematrix.R
|
## In this R file, we can reduce the cost of computing the inversion of a matrix repeatedly by storing
## its inverse matrix so the next time we use it by withdrawing it from the cache matrix object we created
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv_matrix <- NULL
set <- function(y){
x <<- y
inv_matrix <<- NULL
}
get <- function() x
set_inv <- function(inverse_matrix) inv_matrix <- inverse_matrix
get_inv <- function() inv_matrix
list(set = set, get = get, set_inv = set_inv, get_inv = get_inv)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inv_matrix <- x$get_inv()
if(!is.null(inv_matrix)){
message("getting cached data")
return(inv_matrix)
}
data <-x$get()
inv_matrix <-solve(data)
x$set_inv(inv_matrix)
inv_matrix
## Return a matrix that is the inverse of 'x'
}
## I've created a testing case for this R file. It turns out that all the functions and values works properly.
test_matrix <- matrix(nrow = 3, ncol = 3,data = c(1,1,1,2,3,5,4,9,25))
y <- makeCacheMatrix(test_matrix)
class(y)
result_matrix <- cacheSolve(y)
class(result_matrix)
print(result_matrix)
|
bad914194f50aff92da31a4a13f9b81ea0e2d0a2
|
a6b5611ffa3310eb97b3e7e9c51ea8118f53adb4
|
/Functions/f_predicting_links_HSM.R
|
092655bf288d2abe0de2daad163d47e226ba9592
|
[] |
no_license
|
SalomeLang/link-prediction-social-networks
|
f3b6032a70f1ef9ef57ed8c68911f74250c1897a
|
837dc519d8329aa6d798022cc1d046e363ca2593
|
refs/heads/master
| 2022-07-11T02:34:17.389020
| 2020-05-21T11:30:32
| 2020-05-21T11:30:32
| 265,830,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,331
|
r
|
f_predicting_links_HSM.R
|
# Copyright (c) 2015. All rights reserved.
####################################################################################################
# Proj: Link Prediction
# Desc: Predicting links using Hierarchical Structure Model
# Auth: Bublitz Stefan, Gaegauf Luca, Lang Salome, Sutter Pascal
# Date: 2015/12/07
hsm.number.of.samples <- 1000
PredictLinksHSM <- function(net.train){
# =======================================================================================
# Predict links using the Hierarchical Structure Model
# Arguments:
# - net.train: Network, for which links have to be predicted
# Returned values/objects:
# - Returns a matrix, containing prediction values obtained using the Hierarchical
# Structure Model for all links in net.train
# Libraries required:
# - igraph
# =======================================================================================
# Calculate prediction
prediction <- hrg.predict(net.train, num.samples = hsm.number.of.samples)
# Create adjacency matrix to display results
number.nodes <- length(V(net.train))
result <- matrix(0, number.nodes, number.nodes)
# Update predicted edges as bidirectional
result[prediction$edges[, 1:2]] <- prediction$prob
result[prediction$edges[, 2:1]] <- prediction$prob
# Extract unobserved links
result
}
|
a40b3c59470779c705d75d6afd676c774a1b5457
|
a9098f4badaf8222a47fd3864b9adf193de880a2
|
/ui.R
|
60886d9a6c10bb1044fa9f79afb9f77c4451c8f4
|
[] |
no_license
|
majisomnath/DevelopingDataProduct
|
f03a7cfe7cdba014bbb3bb08fc5a891a5809f2ac
|
a2a1edf0022f5b90e691513ba42765bc928f3c06
|
refs/heads/master
| 2021-05-03T16:36:53.254291
| 2018-02-06T10:42:34
| 2018-02-06T10:42:34
| 120,439,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,069
|
r
|
ui.R
|
# Call required libraries
library(shiny)
library(ggplot2)
dataset <- diamonds
pageWithSidebar(
# Add header panel with name of the application
headerPanel("Diamond Dataset Analysis"),
sidebarPanel(
# Add text to explain data attributes
h4(' The variables are as follows:'),
h6('1. price : diamond price in USD'),
h6('2. carat : diamond weight'),
h6('3. cut : quality of the cut'),
h6('4. color : diamond color (J-Worst to D-Best)'),
h6('5. clarity : how clear diamond is'),
h6('6. x : length in mm'),
h6('7. y : width in mm'),
h6('8. z : depth in mm'),
h6('9. depth : depth percentage'),
h6('10. table : width of top of diamond'),
br(),
# Some form ofuser input for plotting
h4(' Select different parameters:'),
sliderInput('sampleSize', 'Sample Size', min=1, max=nrow(dataset), value=min(5000, nrow(dataset)), step=100, round=0),
selectInput('x', 'X Axis Measure', names(dataset)),
selectInput('y', 'Y Axis Measure', names(dataset), names(dataset)[[7]]),
selectInput('color', 'Measure Color', c('None', names(dataset)), names(dataset)[[4]]),
selectInput('facet_row', 'Facet Row', c(None='.', names(dataset)), names(dataset)[[2]]),
selectInput('facet_col', 'Facet Column', c(None='.', names(dataset))),
textInput('caption', 'Plot Caption', value='Plot on Diamon Dataset')
),
mainPanel(
# Add text to guide users of this application
h4('Introduction & How to use'),
p("A dataset containing the prices and other attributes of almost 54,000 Diamonds. Using this shiny application we can interactively change different plot attributes and App will plot those. This is very easy and interactive application which gives an idea of Diamond attributes and relation between them."),
br(),
# call plot function
plotOutput('plot')
)
)
|
a6dd085da43d0806a794bcbd86bc6954bf8311c4
|
16c6a136123cefe07cbd1cac3bb893a6a1521f93
|
/generate_trajectories/retrieve_NARR_data.R
|
00b1f0965913fec9216f756d4f91dd66710788c0
|
[] |
no_license
|
bendabel/SEPPR_moisture_source
|
6de600584bcdf7ec2a508e59b9e1731a1168b134
|
180fc503877ca8747d0a633af53ec755495a6746
|
refs/heads/main
| 2023-07-02T19:07:04.833622
| 2021-08-10T22:04:32
| 2021-08-10T22:04:32
| 394,783,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 239
|
r
|
retrieve_NARR_data.R
|
#Retrieve desired NARR files from Hysplit ftp server
source("getMet.R")
#Choose years, months, and save location and place in vectors below
years <- 1979:1980
months <- 5:9
path <- "C:/hysplit/NARR/"
getMet(years, months, path)
|
272835eccb4379b93b115e29d2460ace59e834f3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/simecol/examples/pcuseries.Rd.R
|
15b73378505c5acb47b74c9f205e46eef01b6882
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 312
|
r
|
pcuseries.Rd.R
|
library(simecol)
### Name: pcuseries
### Title: Generate Plackett Bivariate Random Numbers
### Aliases: pcuseries pcu rho2alpha alpha2rho
### Keywords: distribution
### ** Examples
x <- runif(100)
y <- pcu(x, rho = 0.8)
plot(x, y)
cor(x, y)
x <- pcuseries(1000, rho=0.8)
plot(x, type="l")
acf(x)
pacf(x)
|
d0d3afdea4bf66469f7bdd736fa76b01e59431ec
|
50d5966803e3a2bb9b1d3be0a21a53725dac6547
|
/old_files/SR_model_Run_brood_switch_Bayesian.R
|
121faef6bd30f063162efe5f6da772f84f029efe
|
[] |
no_license
|
hhamazaki/Shiny-Apps
|
9a474cbc562001e4427ef8dcc04effd4a753ed5f
|
23bcc89208079bfe264016136b868f196b8c0ee2
|
refs/heads/develop
| 2023-01-20T11:00:15.488378
| 2023-01-18T04:49:15
| 2023-01-18T04:49:15
| 235,636,206
| 0
| 2
| null | 2023-01-18T04:54:08
| 2020-01-22T18:19:00
|
R
|
UTF-8
|
R
| false
| false
| 87,071
|
r
|
SR_model_Run_brood_switch_Bayesian.R
|
#initialize
library(shiny)
library(shinythemes)
library(datasets)
#library(lmtest)
library(reshape2)
#library(mgcv)
library(MCMCpack)
#library(maptools)
#library(nlme)
#library(AICcmodavg)
library(bsplus)
library(coda)
library(R2jags)
library(openxlsx)
#=======================================================================
# UI:
#=======================================================================
ui<-fluidPage(
navbarPage(
theme = shinytheme("cerulean"), id = "tabs",
"Pacific Salmon Escapement Goal Analyses",
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Panel 1: Data Input and Submit
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
tabPanel("Data Input",
sidebarPanel(width = 3,
#------------------------------------------------------------------------
# File Inuput
#------------------------------------------------------------------------
# Data Type
selectInput(inputId="dataType","Data Type", choices = c('S-R','Run')),
p("Choose Summarized SR or Run data"),
# Input: Select a file ----
fileInput("file1", "Choose csv / tab File",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Checkbox if file has header ----
checkboxInput("header", "Header", TRUE),
# Input: Select separator ----
radioButtons("sep", "Separator",
choices = c(Comma = ",", Tab = "\t"), selected = ","),
#------------------------------------------------------------------------
# Conditional File Inuput UI
#------------------------------------------------------------------------
#-----------Run data Input UI--------------------------------------------
conditionalPanel(
condition = "input.dataType == 'Run'",
# Input: Checkbox if file has header ----
p("Select First age of run"),
# Input: Select what to display
numericInput("fage", "First Retun Age", value=4,min=1,max=20,step=1),
p('Return Age =FW Age+SW Age + 1')
),# End Conditional Panel
p("Bayesian Simulation Setting"),
numericInput(inputId='n.iter','Simulation Length',value=10000,min=0,step=10000),
numericInput(inputId='n.burnin','Burn-in Length',value=1000,min=0,step = 1000),
numericInput(inputId='n.thin','Thinning',value=10,min=0,step = 1),
numericInput(inputId='n.chain','Number of Chains',value=1,min=1,step = 1),
selectInput('Model',"Select SR Model",choice=list('Ricker','Ricker AR1','Beverton-Holt','Deriso-Shunute'),selected ='Ricker'),
p("Start Bayesian Analyses"),
actionButton("RunBayes","Run")
), # End of Sidebar Panel
# output
#------------------------------------------------------------------------
# Main Panel: Input Data Summary
#------------------------------------------------------------------------
mainPanel(
tabsetPanel(
tabPanel("Data Table",
conditionalPanel(
condition = "input.dataType == 'S-R'",
h4("S-R Data file column orders: Year, Spawner (Escapement), Recruit")
),
conditionalPanel(
condition = "input.dataType == 'Run'",
h4("Run Data file column orders: Year, Escapement, Run,
Run by age (or proportion) from youngest to oldest")
),
dataTableOutput("table")),
tabPanel("Brood Table",
conditionalPanel(
condition = "input.dataType == 'Run'",
dataTableOutput("btable"))
# Horizontal line ----
),
tabPanel("Summary",
verbatimTextOutput('summary'),
plotOutput('hist')
)
) # End tabsetPanel
) # End mainPanel
), # End tabanle
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Panel 2 SR Analyses
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
tabPanel("SR Analyses",
sidebarPanel(width = 3,
textInput(inputId='caption',label='Figue Caption Title',value=''),
selectInput(inputId="ui","Axis Dislpay Unit", choices = c(1,10,100,1000,1000000)),
p("Escapement Goal Range"),
numericInput(inputId='egl','Lower Goal',value=0,min=0),
numericInput(inputId='egu','Upper Goal',value=0,min=0),
checkboxInput(inputId="show.eg", "Show Escapement Goal", FALSE),
checkboxInput(inputId="show.points", "show Years", TRUE),
checkboxInput(inputId="show.smsy", "show Smsy", TRUE),
checkboxInput(inputId="show.smax", "show Smax", TRUE),
checkboxInput(inputId="show.int", "show Interval", TRUE),
numericInput(inputId="p.i", "% Interval", value=90,min=0,max=100,step=5),
selectInput(inputId="Li","Interval Type", choices = c('confidence','prediction')),
numericInput("bn", "Number bootstrap replicates", value=10000,min=1000,step=1000)
), # End sidebarPanel
#------------------------------------------------------------------------
# SR Analyses Output
#------------------------------------------------------------------------
mainPanel(tabsetPanel(
#------------------ SR Plot----------------------------------------------
tabPanel("SR Plot",
plotOutput(height='500px',"p"),
# downloadButton("down", label = "Download the plot"),
p(strong("Anova Table")),
verbatimTextOutput('test'),
p(strong("SR Parameters")),
verbatimTextOutput('BayesSum')
# verbatimTextOutput('RS.out')
),
#------------------ Yield Plot-------------------------------------------
tabPanel("Yield Plot",
plotOutput(height='500px','py')
),
#------------------ Time Series -------------------------------------------
tabPanel("Time Series",
plotOutput("srt"),
conditionalPanel(condition = "input.dataType == 'Run'",
plotOutput('runesc')
)
),
#------------------ Residuals ---------------------------------------------
tabPanel("Residuals",
plotOutput("Resid"),
p(strong("Durbin-Watson Serial Correlation Analyses")),
verbatimTextOutput('dwtest')),
#------------------ Bootstrap -----------------------------------------------
tabPanel("Bootstrap",
verbatimTextOutput('bsummary'),
verbatimTextOutput('bquantile'),
# dataTableOutput("boot.R"),
plotOutput("bhist"))
)#End tabsetPanel
)#End mainPanel
),#End tabPanel
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Panel 3 Escapement Goal Aanalyses
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
navbarMenu("Escapement Goal Analyses",
#------------------------------------------------------------------------
# Smsy Goal Analyses
#------------------------------------------------------------------------
tabPanel("Smsy Goal Analyses",
sidebarPanel(width = 3,
p(strong("Smsy Analyses")),
numericInput("p1", "Min % of MSY", value=90,min=0,max=100,step=5),
numericInput("p3", "% Meeting MSY Target", value=90,min=0,max=100,step=5)
),
mainPanel(
tabsetPanel(
#------------------ Smsy Profile -----------------------------------------------
tabPanel("Smsy Profile",
plotOutput(height='500px','bsmsy'),
verbatimTextOutput("bsmsyt")),
#------------------ Smsy Yield Profile -----------------------------------------
tabPanel("Yield & Recruit Profile",
# splitLayout(cellWidths = c("50%", "50%"),
plotOutput(height='300px','bsmsy.y'),
plotOutput(height='300px','bsmsy.r')
# )
)
)#End tabsetPanel
)#End mainPanel
),#End tabPanel
#------------------------------------------------------------------------
# Smax Goal Analyses
#------------------------------------------------------------------------
tabPanel("Smax Goal Analyses",
sidebarPanel(width = 3,
p(strong("Smax Analyses")),
numericInput("sp", "Min % of Rmax", value=90,min=0,max=100,step=5),
numericInput("sp1", "% Meeting Rmax Target", value=90,min=0,max=100,step=5)
), # End sidebarPanel
mainPanel(
tabsetPanel(
#------------------ Smax Profile -----------------------------------------------
tabPanel("Smax Profile",
plotOutput(height='500px','bsmax1'),
verbatimTextOutput('bsmaxt')
), # End tabrPanel
#------------------ Run Profile -----------------------------------------------
tabPanel("Run Profile",
# splitLayout(cellWidths = c("50%", "50%"),
plotOutput(height='300px',"bsmax.r"),
plotOutput(height='300px',"bsmax")
# )
)# End tabPanel
)#End tabsetPanel
)#End mainPanel
),#End tabPanel
#------------------------------------------------------------------------
# Yield & Recruit Goal Analyses
#------------------------------------------------------------------------
tabPanel("Yield & Recruit Goal Analyses",
sidebarPanel(width = 3,
p(strong("Yield GoalAnalyses")),
numericInput("y1", "Min Mean Yied", value=100000,min=0, step=10000),
numericInput("y1p", "Min % Achieve", value=90,min=0, max=100,step=5),
p(strong("Recruit Goal Analyses")),
numericInput("r1", "Min Mean Recruit", value=100000,min=0, step=10000),
numericInput("r1p", "Min % Achieve", value=90,min=0, max=100,step=5)
), # End sidebarPanel
mainPanel(
tabsetPanel(
#------------------ Yield Goal Profile -----------------------------------------------
tabPanel("Yield Goal Analyses",
# splitLayout(cellWidths = c("50%", "50%"),
plotOutput(height='300px','byield'),
plotOutput(height='300px','byp')
# )
,
splitLayout(cellWidths = c("50%", "50%"),
verbatimTextOutput("byt"),
textOutput("bypt"))),
#------------------ Recruit Goal Profile ----------------------------------------------
tabPanel("Recruit Goal Analyses",
# splitLayout(cellWidths = c("50%", "50%"),
plotOutput(height='300px','breturn'),
plotOutput(height='300px','brp')
# )
,
splitLayout(cellWidths = c("50%", "50%"),
verbatimTextOutput("brt"),
textOutput("brpt"))
)# End tabPanel
)#End tabsetPanel
)#End maiPanel
),#End tabPanel
#------------------------------------------------------------------------
# Custom Escapement Goal Evaluation
#------------------------------------------------------------------------
tabPanel("Escapment Goal Evaluation",
sidebarPanel(width = 3,
p(strong("Select Lower and Upper Escapement Goal")),
numericInput("lg", "Lower Goal", value=50000,min=0, step=1000),
numericInput("ug", "Upper Goal", value=100000,min=0, step=1000),
p("Submit Goal Range for Analyses"),
actionButton("Run","Run"),
# Horizontal line ----
tags$hr(),
numericInput("rg", "Target Recruit", value=200000,min=0, step=1000),
numericInput("yg", "Target Yield", value=100000,min=0, step=1000)
), #End Sidepar Panel
mainPanel(
tabsetPanel(
#------------------ Expected Mean Recruit and Yields ----------------------------------
tabPanel("Expected Mean Recruit & Yields",
plotOutput(height='600px',"bGAf"),
splitLayout(cellWidths = c("50%", "50%"),
p(strong("Recruit and Yields Summary")),
p(strong("Probability of Meeting Target"))),
splitLayout(cellWidths = c("50%", "50%"),
verbatimTextOutput("bGAs"),
verbatimTextOutput("bGAt"))
), #End tab Panel
#------------------ Recruit Annual Recruit and Yields ---------------------------------
tabPanel("Expected Annual Recruit & Yields",
plotOutput(height='600px',"bGASR"),
splitLayout(cellWidths = c("50%", "50%"),
p(strong("Recruit and Yields Summary")),
p(strong("Probability of Meeting Target"))),
splitLayout(cellWidths = c("50%", "50%"),
verbatimTextOutput("bGASRs"),
verbatimTextOutput("bGASRt"))
) #End tab Panel
)#End tabsetPanel
)#End main Panel
)#End tabPanel
),#End nabVarMenu
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Panel 4 Management Strategy Evaluation
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
navbarMenu("MSE Analyses",
#------------------------------------------------------------------------
# Simulation model UI
#------------------------------------------------------------------------
tabPanel("Simulation Model",
sidebarPanel(width = 3,
p(strong("Modeling Parameters")),
selectInput(inputId="EGm","Escapement Goal", choices = c('Smsy','Smax')),
sliderInput("EGlu", label = "Escament Goal Range", min = 0, max = 3, value = c(0.8, 1.6),step=0.1),
selectInput(inputId="cmode","Fishery Oepnng above Escapement Goal", choices = c('Lower','Middle','Upper')),
numericInput(inputId="maxH", "Maximum Harvest", value=100000,min=0,step=10000),
sliderInput(inputId="maxHr", "Maximum Surplus Harvest Rate", value=0.5,min=0,max=1,step=0.1),
numericInput(inputId="EGY", "Update Escapement Goal Years", value=6,min=1,step=1)
), #End sidebarPanel
mainPanel(
tabsetPanel(
#------------------------------------------------------------------------
# Simulation Run
#------------------------------------------------------------------------
tabPanel("Simulation Run",
fluidRow(
p(("To compare outcomes of the same startegy, reapeat Simulation and Simuulate")),
p(("To compare outcomes of different startegy, change strategis and click Simulate"))
),
fluidRow(
column(3, actionButton("InitRun","Initialize")),
column(3, actionButton("SimRun","Simulate")),
column(3,actionButton("SimClear","Clear Results"))
),
fluidRow(
plotOutput(height='500px',"runsim"),
verbatimTextOutput("simsum"),
plotOutput("simhist")
)
),
tabPanel("Sim Summary",
plotOutput('altsim.H'),
verbatimTextOutput("altsim.sum")
),
tabPanel("Sim time series",
plotOutput(height='600px',"altsim.N"),
downloadButton("simdownload", "Download")
),
#------------------ Model Parameters ---------------------------------
tabPanel("Model Parameters",
fluidPage(
title = 'Set MSE Simulaiton Initization Parameters',
hr(),
fluidRow(
column(4,
p(strong("Simulation Years")),
sliderInput(inputId="burnin", "Burnin", value=25,min=0,max=100,step=5,round=0),
sliderInput(inputId="train", "Training", value=25,min=0,max=100,step=5,round=0),
sliderInput(inputId="simy", "Management", value=50,min=0,max=100,step=5,round=0)
),
column(4,
p(strong("Errors")),
sliderInput(inputId="spred", "Preseason Run prediction %E", value=20,min=0,max=100,step=5),
sliderInput(inputId="simpH", "Management Imprementation %E", value=10,min=0,max=100,step=5),
sliderInput(inputId="sobsH", "Harvest Observation %E", value=10,min=0,max=100,step=5),
sliderInput(inputId="sobsE", "Escapement Observation %E", value=30,min=0,max=100,step=5),
sliderInput(inputId="Nobage", "Age Comp Sample size", value=100,min=10,max=500,step=10)
),
column(4,
p(strong("Population Errors")),
sliderInput(inputId="phi", "AR1 correlation", value=0.6,min=0,max=1,step=.1),
sliderInput(inputId="D", "Drishelet D", value=50,min=0,max=200,step=10)
)
) # End fluidRow
)# End fluidOPage
) # End tabPanel
)#End tabsetPanel
)#End mainPanel
),#End tabPanel
#------------------ Model Descriptions ---------------------------------
tabPanel("Model Description",
tabsetPanel(
#------------------ Model Structure -----------------------------------
tabPanel("Model Structure",
h2("Model steps"),
p("Management Strategy Evaluation (MSE) simulation model take follwoing steps"),
p("1. Set preseason run forecast: Each year, run size is forecasted"),
p("2. Set preseason harvest target: Based on preseason run size and harvest strategy, harvest target is determined."),
p("3. Implenent harvet: Harvest strategy is implemented"),
p("4. Escapement: Escapement is Actual run minus harvest"),
p("5. Recruitment: Future recuritment is determined by Ricker SR model"),
p("6. Run: Annual run consists of Recuritment and maturity schedule (brood return age proporion"),
p("7. Back to step 1."),
h2("Management stragey"),
p("Management strategy is based on setting escapement goal."),
p("Each year annual harvest and escapement is observed (with error), brood table is built,
and Ricker SR parmeters are estimated."),
p("Management target: the model considered two taregets: Smsy,and Smx. Escapement goal range is set as
x% below and y% above target."),
p("Fishery opening triger: the model has 3 trigers: Lower (open fishey when preseson run exceeds
lower escapement goal), Middle (open above mid-escapemnt goal), and Upper (open above upper
escapement goal"),
p("Fishery target: Harvestable surplus is preseason run minus escapement goal. The model considers two criteria:
maximum harvest and maximum harvest rate. Maximum harvest is the maximum number of fish harvested. Maximum harvest rate
is the maximum surplus harvest rate. 100% harvest rate means that all fish above escapement target will be harvested."),
p("Frequency of evaulating escapement goal. Model can change escapement goal every x years. Typical bord cycle is
every 6 years")
),
#------------------ Parameters Description ---------------------------------
tabPanel("Base model Parameters",
h3("Simulation length"),
p("- Burnin: Years to make modle stabilize "),
p("- Training: Years SR data are collected befre setting active management"),
p("- Managmenet: Years active mangement is conducted"),
h3("Management Errors"),
p("Fishery management takes following steps: 1) predict preseason run size,
2) determin harves target, 3) execute harvests, and
4) observe harvest and escapetnt to set and escapement goal. The model incorporates errors
associated with each step. Errors were modeled as independent log-normal"),
p("- Preseaon Run prediction: Accuracy +/- x%"),
p("- Management Imprementation: Accuracy +/- x%"),
p("- Harvest Observation: Accuracy +/- x%"),
p("- Escapement Observation: Accuracy +/- x%"),
p("Observed postseason run size is Observed harvest + escapement"),
h3("Poplation Dynamic"),
p("- AR1 corelation: Recuruitment error was modeled as AR1 with sigma and phi (correlation),
sigma is derived from SR model fit."),
p("- Drishelet D. Brood age proportion is modeled as Drishelet distrribution
D determines level of variation. Lower D indicate higher variation")
)
)# End TabsetPanel
)#End tabPanel
)#End navMenue Panel
)#End nabVarPage
,
#------------------------------------------------------------------------
# Citation Discraimer
#------------------------------------------------------------------------
use_bs_tooltip(),
use_bs_popover(),
# withMathJax(),
hr(),
h6("Disclaimer"),
print("This App is developed by Toshihide Hamachan Hamazaki, Alaska Department of Fish and Game Division of Commercial Fisheries"),
h6("Update"),
print(paste("Updated","08/07/2019","AR1 model and bootstrap corrected")),
h6("Contact about this applicaiton"),
print(paste("Questions and improvement suggestions? Please contact")),
a(href="mailto:toshihide.hamazaki@alaska.gov", "Hamachan"),
h6("Suggested Citation"),
print(paste0("Hamazaki, T. ",format(Sys.Date(), "%Y"),". Escapement goal analyses (source: https://shiny.rstudio.com/). Available from https://hamachan.shinyapps.io/Spawner_Recruit/"))
)#End fluidPage
#=======================================================================
# Server:
#=======================================================================
server<-shinyServer(function(input, output, session){
#-----------------------------------------------------------------------
# Control MSE Analysis page.
# This is page will show up only when Data type is Run
#-----------------------------------------------------------------------
observe({
if(input$dataType=='S-R') {
hideTab(inputId = "tabs", target = "MSE Analyses")
hideTab(inputId = "tabs", target = "Brood Table")
}
else {
showTab(inputId = "tabs", target = "MSE Analyses")
showTab(inputId = "tabs", target = "Brood Table")
}
})
#-----------------------------------------------------------------------
# UI output
#-----------------------------------------------------------------------
output$ui <- renderUI({
if (is.null(input$dataType))
return()
# Depending on input$dataType, we'll generate a different
# UI component and send it to the client.
switchn(input$dataType,
"Run" = tagList(
p("Select First age of run"),
# Input: Select what to display
numericInput("fage", "First Age", value=4,min=1,max=20,step=1)
)
)
})
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Panel 1: Data upload and output
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#------------------ File Upload ---------------------------------
data <- reactive({
req(input$file1)
inFile <- input$file1
#could also store in a reactiveValues
df <- read.csv(inFile$datapath,
header = input$header,
sep = input$sep)
return(df)
})
#------------------ Brood Table Construction ---------------------------------
brood.table <- reactive({
if(input$dataType== "Run"){
x <- data()
# sum first run age: if p
p <- round(sum(x[1,-c(1:3)]),0)
fage <- input$fage
nages <- dim(x)[2]-3
lage <- fage+nages-1
yr <- c(min(x[,1])-seq(lage,1),x[,1])
brood <- matrix(0,ncol=nages+2,nrow = length(yr))
brood[,1] <- yr
brood[,2] <- c(rep(NA,lage),x[,2])
for(i in 1:nages){
if(p==1){
brood[,i+2] <- c(rep(NA,lage-fage+1-i),x[,3+i]*x[,3],rep(NA,fage+i-1))
}
else{
brood[,i+2] <- c(rep(NA,lage-fage+1-i),x[,3+i],rep(NA,fage+i-1))
}
}
brood.c <- data.frame(brood)
names(brood.c) <- c('b.Year','Spawner',paste0('b.Age',seq(fage,lage)))
brood.c$Recruit <- rowSums(brood.c[,-c(1:2)])
return(brood.c)
}
})
#------------------ Brood Table name ---------------------------------
#------------------ Data Download ---------------------------------
output$downloadData <- downloadHandler(
filename = function() {"broodtable.csv"},
content = function(file) {
write.csv(brood.table(), file, row.names = FALSE)
})
plotdownload <- function(plots) {
plotout <- downloadHandler(filename = function() {
paste("plot",input$Doption,sep= ".")},
content = function(file){
# open the format of file which needs to be downloaded ex: pdf, png etc.
if (input$Dooption == "png"){
png(file)
} else if (input$Dooption == "pdf"){
pdf(file)
} else {
jpeg(file)
}
plots
dev.off()
}
)
return(plotout)
}
#------------------ Create SR Data ---------------------------------
sr.data <- reactive({
if(input$dataType== "Run"){
x <- brood.table()
x <- x[complete.cases(x),c(1,2,dim(x)[2])]
} else if (input$dataType== "S-R"){
x <- data()
}
names(x) <- c('Yr','S','R')
return(x)
})
# Data output display
output$table <- renderDataTable(
{
data()
})
# Brood table display
output$btable <- renderDataTable(
{
round(brood.table(),0)
})
# Data summary output disply
output$summary <- renderPrint({
summary(sr.data())
})
# SR Histogrom
output$hist <- renderPlot({
par(mfrow=c(1,2))
x <- sr.data()
hist(x$S,main='',xlab='Spawnter')
hist(x$R,main='',xlab='Recruit')
})
#-----------------------------------------------------------------------
# 1.2: Create Bayese data
#-----------------------------------------------------------------------
Bayesdata <- reactive({
# Import SR data
x <- sr.data()
# nyrs is the number of years (i.e. number of rows)
nyrs <- dim(x)[1]
R <- x$R
S <- x$S
# d is S multiplier
d <- floor(log10(mean(S)))
# out is Bayesian data
out <-list(nyrs=nyrs, S=S, R=R,d=d)
return(out)
})
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Panel 2: SR Data Analyses and Output
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#===============================================================================
# JAG Modelse
#===============================================================================
#---------------------------------------------------------------
# Classic Ricker
#---------------------------------------------------------------
jag.model.CR <- function(){
for(y in 1:nyrs){
s[y] <- S[y]/(10^d)
lnRm[y] = log(S[y]) + lnalpha - beta * s[y]
}
# Define Priors
lnalpha ~ dunif(0,10)
beta ~ dunif(0,10)
sigma ~ dunif(0,10)
phi ~ dunif(-1,1)
Tau <- 1/(sigma*sigma)
# Likelihood
for(y in 1:nyrs){
R[y] ~ dlnorm(lnRm[y],Tau)
}
}
#---------------------------------------------------------------
# AR1 Ricker
#---------------------------------------------------------------
jag.model.AR1 <- function(){
for(y in 1:nyrs){
s[y] <- S[y]/(10^d)
lnRm1[y] = log(S[y]) + lnalpha - beta * s[y]
lnResid[y] = log(R[y]) - lnRm1[y]
}
lnRm[1] = lnRm1[1] + phi * lnresid0;
for(y in 2:nyrs){
lnRm[y] = lnRm1[y] + phi * lnResid[y-1]
}
# Define Priors
lnalpha ~ dunif(0,10)
beta ~ dunif(0,10)
sigma ~ dunif(0,10)
phi ~ dunif(-1,1)
lnresid0 ~ dnorm(0,0.001)
Tau <- 1/(sigma*sigma)
# Likelihood
for(y in 1:nyrs){
R[y] ~ dlnorm(lnRm[y],Tau)
}
}
#---------------------------------------------------------------
# Beverton Holt
#---------------------------------------------------------------
jag.model.BH <- function(){
for(y in 1:nyrs){
s[y] <- S[y]/(10^d)
lnRm[y] <- lnalpha + log(S[y]) -log(1+beta*s[y])
}
# Define Priors
lnalpha ~ dunif(0,10)
beta ~ dunif(0,10)
sigma ~ dunif(0,10)
Tau <- 1/(sigma*sigma)
# Likelihood
for(y in 1:nyrs){
R[y] ~ dlnorm(lnRm[y],Tau)
}
}
#---------------------------------------------------------------
# Deriso-Shunute
#---------------------------------------------------------------
jag.model.DS <- function(){
for(y in 1:nyrs){
s[y] <- S[y]/(10^d)
lnS[y] <- log(S[y])
lnR[y] <- log(R[y])
lnRm[y] = lnS[y] + lnalpha - log(1 + beta*c*s[y])/c
}
# Define Priors
lnalpha ~ dunif(0,10)
beta ~ dunif(0,10)
sigma ~ dunif(0,10)
c ~ dunif(0,1)
Tau <- 1/(sigma*sigma)
# Likelihood
for(y in 1:nyrs){
R[y] ~ dlnorm(lnRm[y],Tau)
}
}
#-------------------------------------------------------------------
# SR functions for post pcoessing
#-------------------------------------------------------------------
# Classic Ricker
SR.CR <- function(lnalpha,beta,S,d){
s <- S/(10^d)
lnR <- log(S) + lnalpha - beta*s
R <- exp(lnR)
return(R)
}
# Beverton-Holt
SR.BH <- function(lnalpha,beta,S,d){
s <- S/(10^d)
lnR <- lnalpha +log(S) - log(1+beta*s)
R <- exp(lnR)
return(R)
}
# Deriso-Shunute
SR.DS <- function(lnalpha,beta,c,S,d){
s <- S/(10^d)
lnR <- log(S) + lnalpha - log(1 + beta*c*s)/c
R <- exp(lnR)
return(R)
}
output$test <- renderPrint({ Bayesdata() })
#===============================================================================
# End of JAG Modelse
#===============================================================================
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Pane 2: Run JAG Model
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
run.JAGS <- eventReactive(input$RunBayes,{
#-----------------------------------------------------------------------
# The number of replicates
boot.n <- (input$n.iter*input$n.chain)
#-----------------------------------------------------------------------
progress <- Progress$new(min=1,max=boot.n)
on.exit(progress$close())
progress$set(message = paste('JAG Model in progress'),
detail = 'This will take a while. Be patient please....')
for (i in 1:boot.n) {
progress$set(value = i)
}
#-----------------------------------------------------------------------
# Import data
datnew <- Bayesdata()
niter <- input$n.iter
nburn <- input$n.burnin
nthin <- input$n.thin
nchain <- input$n.chain
# JAGS model selection
if(input$Model=='Ricker'){
jagmodel <- jag.model.CR
parameters <- c('lnalpha','beta','sigma')
srmodel <- SR.CR
}
if(input$Model=='Ricker AR1'){
jagmodel <- jag.model.AR1
parameters <- c('lnalpha','beta','phi','sigma')
srmodel <- SR.CR
}
if(input$Model=='Beverton-Holt'){
jagmodel <- jag.model.BH
parameters <- c('lnalpha','beta','sigma')
srmodel <- SR.BH
}
if(input$Model=='Deriso-Shunute'){
jagmodel <- jag.model.DS
parameters <- c('lnalpha','beta','c','sigma')
srmodel <- SR.DS
}
output <- jags(data=datnew,parameters.to.save=parameters, model.file= jag.model,
n.chains=nchain, n.iter=niter,n.burnin=nburn,n.thin=nthin,DIC=TRUE)
return(output)
})
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Pane 3: Post Data processing
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#-----------------------------------------------------------------------
# 1.3: Extract JAG results
#-----------------------------------------------------------------------
sim <- reactive({
x <- run.JAGS()
return(x)
})
mcmc <- reactive({
# Read mcmc data
mcmc <- as.mcmc(sim())
# post is mcmc data
post <- as.matrix(mcmc)
return(post)
})
post.summary <- reactive({
sim_sum <- print(sim())
post <- sim_sum$summary
return(post)
})
output$BayesSum <- renderPrint({
sim_sum <- print(sim())
print(sim_sum$summary)
})
#-----------------------------------------------------------------------
# JAG Results
#-----------------------------------------------------------------------
#-------------------------------------------------------------------------------
# 5.4 Extract Annual model parameters CI
#-------------------------------------------------------------------------------
model.pars <- reactive({
# CI interval probability
pci <- (100-input$CI)/200
# Read mcmc data
post <- as.matrix(mcmc())
# import model parameter column names
pars <- par.cols()
# extract only mcmc model parameters
model.pars <- post[,as.vector(as.matrix(pars))]
# calculate mean, lci, uci
ym <- apply(model.pars,2,mean)
yl <- apply(model.pars,2,function(x) quantile(x, pci))
yu <- apply(model.pars,2,function(x) quantile(x, 1-pci))
# extract model par names
parname <- names(ym)
# create data.frame and output
out <- data.frame(pars=parname,mean=ym, LCI = yl, UCI = yu)
return(out)
})
#-----------------------------------------------------------------------
# 1.3: Extract mcmc of missing passage by day, year
#-----------------------------------------------------------------------
post.samp <- reactive({
# post is mcmc data
post <- as.matrix(mcmc())
# Get original data
x <- data()
# nyrs is the number of years (i.e. number of columns)
nyrs <- dim(x)[2] -1
# ndays is the number of days (i.e. number of rows)
ndays <- dim(x)[1]
# Create na.list matrix
na.list <- matrix(NA,nyrs,ndays)
# Find Row and column of NA and insert location y[,] name
for (i in 1:ndays){
for (j in 1:nyrs){
na.list[j,i]<- ifelse(is.na(x[i,j+1]),paste0('y[',j,',',i,']'),NA)
}
}
# Vectorize the matrix, and remove NA
# navector is a vector of the y matrix with NA
navector <- na.list[which(!is.na(na.list))]
# out is mcmc matrix that include only missing passage estimates
out <- post[,navector]
return(out)
})
#-----------------------------------------------------------------------
# 2.0: SR Parameters
#-----------------------------------------------------------------------
SR.out <- reactive({
D <- floor(mean(log10(sr.data()$S)))
ln.alpha <- coef(SR())[1]
alpha <- exp(ln.alpha)
beta <- -coef(SR())[2]/(10^D)
sigma <- sigma(SR())
ln.alpha.c <- ln.alpha+0.5*(sigma(SR()))^2
Seq <- ln.alpha.c/beta
Smsy <- Seq*(0.5-0.07*ln.alpha)
Umsy <- ln.alpha*(0.5-0.07*ln.alpha)
Rmsy <- Smsy*exp(ln.alpha-beta*Smsy)
MSY <- Rmsy-Smsy
Smax <- 1/beta
Rmax <- exp(ln.alpha-1)/beta
out <- data.frame(t(c(ln.alpha,alpha, beta, sigma, ln.alpha.c,Seq,Smsy,Umsy,Rmsy,MSY,Smax,Rmax)))
names(out) <- c('ln.alpha','alpha', 'beta', 'sigma','ln.alpha.c','Seq','Smsy','Umsy','Rmsy','MSY','Smax','Rmax')
return(out)
})
#-----------------------------------------------------------------------
# 3.0: SRp: Create Model predicted mean, CI, PI
#-----------------------------------------------------------------------
SRp <- reactive({
par <-SR.out()
mp <- input$p.i/100
rg <- input$r1
yg <- input$y1
# Prediction model s range
D <- floor(mean(log10(sr.data()$S)))
s <- seq(0,1.1*max(par$Seq,max(sr.data()$S)), length.out =101)
sd <- s/(10^D)
# Predicttion
pred <- predictSE(SR(), newdata=data.frame(s=sd))
# Predicted ln(R/S)
lRS <- pred$fit
# Model SE
se <- pred$se.fit
# Model st.Residual
res <- sd(as.vector(SR()$residuals))
# Model df
foo <-as.data.frame(SR()$dims)
dft <- foo$N-foo$p
# Calculatge SE for Prediction interval
pse <- sqrt(se^2+res^2)
# Calculate tdist
tf <- qt((1-mp)/2,dft,lower.tail=FALSE)
# Calculate CI-PI
if (input$Li =='confidence') {
lwr <- lRS -tf*se
upr <- lRS +tf*se
}
else {
lwr <- lRS -tf*pse
upr <- lRS +tf*pse
}
ER <- exp(cbind(lRS,lwr,upr))*s
# ln(R/S) at given Run target
erg <- log(rg/s)
# ln(R/S) at given Yield target
eyg <- log(yg/s + 1)
# Probability of achiving target Rercuits: CI and PI
prof.Rci <- 1-pt((erg-lRS)/se,dft)
prof.Rpi <- 1-pt((erg-lRS)/pse,dft)
# Probability of achiving target Yield: CI and PI
prof.Yci <- 1-pt((eyg-lRS)/se,dft)
prof.Ypi <- 1-pt((eyg-lRS)/pse,dft)
out <- data.frame(cbind(s,ER,prof.Rci,prof.Rpi,prof.Yci,prof.Ypi ))
names(out) <- c('s','fit','lwr','upr','prof.Rci','prof.Rpi','prof.Yci','prof.Ypi')
return(out)
})
#-----------------------------------------------------------------------
# 4.0: SR Analyses Outputs
#-----------------------------------------------------------------------
# Model Parameters
output$RS.out <- renderPrint({
print(SR.out(),digits=c(3,3,10,3,3,0,0,0,0,0,0))
})
# ANOVA
output$anova <- renderPrint({
list(likelihood_Ratio_Test=SRM()[[3]],
Model_Summary=summary(SR()),
Parmeter_Intervals=intervals(SR()))
})
# Durbin-Watson auto-correlaiton
output$dwtest <- renderPrint({
x <- sr.data()
model.s <- lm(log(R/S)~S,data=x)
dwtest(model.s)
})
output$modelslct <- renderText({
anova <- SRM()[[3]]
if(input$SRM=='Standard') { model <- 'Standard Ricker' }
else if (input$SRM=='AR1') { model <- 'AR1 Ricker'}
else {
if(anova[[9]][2] < 0.05) {model <- 'AR1 Ricker'}
else
{ model <-'Standard Ricker' }
}
paste('Model Selected:',model)
})
#-----------------------------------------------------------------------
# 5.0 Helper Unit functions
#-----------------------------------------------------------------------
mult <- reactive({
u <- as.numeric(input$ui)
mult <- ifelse(u==1000000,paste0('(x million)'),ifelse(u>1,paste0('(x',u,')'),''))
return(mult)
})
# Color Scheme
tcol <- function(color, percent = 50, name = NULL) {
# color = color name
# percent = % transparency
# name = an optional name for the color
## Get RGB values for named color
rgb.val <- col2rgb(color)
## Make new color using input color as base and alpha set by transparency
t.col <- rgb(rgb.val[1], rgb.val[2], rgb.val[3],
max = 255,
alpha = (100-percent)*255/100,
names = name)
## Save the color
# invisible(t.col)
return(t.col)
}
#-----------------------------------------------------------------------
# 6.0 Base SR plot
#-----------------------------------------------------------------------
base.p <- reactive({
# dev.control("enable")
u <- as.numeric(input$ui)
mult <- mult()
x <- sr.data()
par <-SR.out()
xp <- x/u
SRp <- SRp()[,1:4]/u
par(xaxs='i',yaxs='i',bty='l')
plot(R~S,data=xp,pch=19,col=1,
main= input$caption,
xlab=paste("Escapement",mult),ylab=paste('Recruit',mult),
xlim=c(0,max(SRp$s)),ylim=c(0,1.1*max(xp$R)))
abline(0,1,col=2)
# Add Predicted
lines(fit~s,data=SRp,col=1,lw=2)
out <-recordPlot()
# dev.off()
return(out)
})
#-----------------------------------------------------------------------
# 7.0 Base Yield plot
#-----------------------------------------------------------------------
base.py <- reactive({
# dev.control("enable")
u <- as.numeric(input$ui)
mult <- mult()
x <- sr.data()
par <-SR.out()
SRp <- SRp()[,1:4]/u
xp <- x/u
# Plot Basic Yield plot
par(xaxs='i',yaxs='i',bty='l')
plot((R-S)~S,data=xp,pch=19,col=1,
main= input$caption,
xlab=paste("Escapement",mult),ylab=paste('Yield',mult),
xlim=c(0,max(SRp$s)),ylim=c(min(SRp$lwr-SRp$s),1.1*max(xp$R-xp$S)))
lines((fit-s)~s,data=SRp,col=1,lw=2)
abline(h=0,col=2)
out <-recordPlot()
# dev.off()
return(out)
})
#=======================================================================
# Bootstrap Analyses
#=======================================================================
#-----------------------------------------------------------------------
# 1.0: Create Bootstrap Data
#-----------------------------------------------------------------------
boot <- reactive({
# The number of bootstrap replicates
boot.n <- isolate(input$bn)
#-----------------------------------------------------------------------
progress <- Progress$new()
on.exit(progress$close())
progress$set(message = paste(boot.n,'Bootstrap Calculation in progress'),
detail = 'This will take a while. Be patient please....')
#-----------------------------------------------------------------------
D <- floor(mean(log10(sr.data()$S)))
s <- sr.data()$S/(10^D)
SRP <-predict(SR())
SRR <-residuals(SR())
phi <- coef(SR()$modelStruct$corStruct,unconstrained=FALSE)
phi <- ifelse(is.null(phi),0,phi)
n <- length(SRR)
# Step 1: Calculate AR1 residuals
v <- SRR[-1] - phi*SRR[-n]
# Step 2: AR1[1] is E[1]
v <- c(SRR[1],v)
# Vector for AR1
eb <- numeric(n)
# Step 3: Create randonm vector
# Create bootstrap replicates matrix
boot.R <- matrix(0,nrow=boot.n,ncol=4)
# Add column name
colnames(boot.R) <- c('ln.alpha','beta','phi','sigma')
resamples <- lapply(1:boot.n, function(x) sample(v, replace = TRUE))
output <- matrix(unlist(resamples), ncol = n, byrow = TRUE)
output1 <- t(t(output)+SRP)
if(phi!=0){
vph <- numeric(n)
for(i in 1:n){
vph[i] <- phi^(i-1)
}
# Bootstrap Calcuration
output2 <- matrix(0,nrow=boot.n,ncol=n)
output2[,1] <- output[,1]
for (j in 2:n){
output2[,j] <- colSums(t(output[,1:j])*rev(vph[1:j]))
}
output2 <- t(t(output2)+SRP)
}
withProgress(message = 'Creating bootstrap data', value = 0, {
# Bootstrap Calcuration
for (i in 1:boot.n)
{
# Create bottstrap random ln(R/S)
if(phi==0)
{
bootR <- output1[i,]
# bx <- data.frame(bootR,s)
SRi <- gls(bootR~s,method='ML')
} else {
# Create AR1 error
bootR <- output2[i,]
# bx <- data.frame(bootR,s)
SRi <- gls(bootR~s,correlation=corAR1(0.5,~1),method='ML')
}
# Extract, Ricker lnalpha, beta, phi,sigma
boot.R[i,1] <- SRi$coefficients[1]
boot.R[i,2] <- -SRi$coefficients[2]/(10^D)
foo <- coef(SRi$modelStruct$corStruct,unconstrained=FALSE)
foo <- ifelse(is.null(foo),0,foo)
boot.R[i,3] <- foo
boot.R[i,4] <- sigma(SRi)
# progress$set(value = i/boot.n)
# Increment the progress bar, and update the detail text.
# progress$inc(1/boot.n, detail = paste("Completed", round(100*i/boot.n,0),"%"))
incProgress(1/boot.n, detail = paste("Progress", round(100*i/boot.n,0),"%"))
}
})
# Change to data.frame
boot.R <- data.frame(boot.R)
# boot.R$phi <- ifelse(is.null(boot.R$phi),0,boot.R$phi)
boot.R$alpha <- exp(boot.R$ln.alpha)
boot.R$ln.alpha.c <- with(boot.R,ln.alpha+0.5*sigma^2)
boot.R$Seq <- with(boot.R,ln.alpha/beta)
boot.R$Smsy <- with(boot.R,Seq*(0.5-0.07*ln.alpha))
boot.R$Rmsy <- with(boot.R,Smsy*exp(ln.alpha-beta*Smsy))
boot.R$MSY <- with(boot.R,Rmsy-Smsy)
boot.R$Smax <- with(boot.R,1/beta)
boot.R$Rmax <- with(boot.R,exp(ln.alpha-1)/beta)
# Remove bad data (i.e. beta is neagative)
boot.R <- boot.R[boot.R[,2]>0,]
boot.R <- boot.R[boot.R[,2]>=quantile(boot.R[,2],0.001),]
boot.R <- boot.R[boot.R$Seq<quantile(boot.R$Seq,0.995),]
out <- boot.R
return(out)
})
#-----------------------------------------------------------------------
# 2.0 Bootstrap Recruit and Yields Data Out
#-----------------------------------------------------------------------
Y.boot <- reactive({
#-----------------------------------------------------------------------
progress <- Progress$new(session, min=1, max=15)
on.exit(progress$close())
progress$set(message = 'Profile Calculation in progress',
detail = 'This may take a while...')
for (i in 1:15) {
progress$set(value = i)
Sys.sleep(0.5)}
#-----------------------------------------------------------------------
par <-SR.out()
Seq <- par$Seq
boot.s <- seq(0,1.1*max(par$Seq,max(sr.data()$S)), length.out =101)
boot.R <- boot()
boot.Rec <- t(boot.s*t(exp(boot.R[,'ln.alpha']-boot.R[,'beta']%o%boot.s)))
boot.Yield <- t(t(boot.Rec)-boot.s)
out <- list(Y.boot = boot.Yield, R.boot = boot.Rec, boot.s = boot.s)
return(out)
})
#-----------------------------------------------------------------------
# 3.0 Bootstrap Recruit & Yields Curve Out
#-----------------------------------------------------------------------
b.YA <- reactive({
# Yield goal
mp <- (1-input$p.i/100)/2
boot.s <- Y.boot()$boot.s
boot.Y <- as.matrix(Y.boot()$Y.boot)
boot.Ym <- colMeans(boot.Y)
boot.Yu <- apply(boot.Y,2,function(x) quantile(x,1-mp))
boot.Yl <- apply(boot.Y,2,function(x) quantile(x,mp))
# Recruit goal
boot.R <- as.matrix(Y.boot()$R.boot)
boot.Rm <- colMeans(boot.R)
boot.Ru <- apply(boot.R,2,function(x) quantile(x,1-mp))
boot.Rl <- apply(boot.R,2,function(x) quantile(x,mp))
out <- data.frame(cbind(boot.s,boot.Ym,boot.Yu,boot.Yl,boot.Rm,boot.Ru,boot.Rl))
return(out)
})
#-----------------------------------------------------------------------
# 4.0 Target Yield and Recruit based Escapement Goal
#-----------------------------------------------------------------------
b.YAg <- reactive({
yg <- input$y1
boot.s <- Y.boot()$boot.s
boot.Y <- as.matrix(Y.boot()$Y.boot)
boot.Yp <- apply(boot.Y,2,function(x) ifelse(x >yg,1,0))
boot.Yp <- colMeans(boot.Yp)
boot.Ym <- b.YA()$boot.Ym
boot.Yu <- b.YA()$boot.Yu
boot.Yl <- b.YA()$boot.Yl
# Find Intersections
b.l <- boot.s[sign(boot.Yl-yg)==1]
b.m <- boot.s[sign(boot.Ym-yg)==1]
b.u <- boot.s[sign(boot.Yu-yg)==1]
BEG.l <- c(min(b.l),max(b.l))
BEG.m <- c(min(b.m),max(b.m))
BEG.u <- c(min(b.u),max(b.u))
out <- list(boot.Yp=boot.Yp, BEG.l = BEG.l, BEG.m = BEG.m, BEG.u=BEG.u)
return(out)
})
# Find Yield Target Intersection
b.YApg <- reactive({
ypg <- input$y1p/100
boot.s <- Y.boot()$boot.s
boot.Yp <- b.YAg()$boot.Yp
# Find Intersections
b.p <- boot.s[sign(boot.Yp-ypg)==1]
BEG.p <- c(min(b.p),max(b.p))
out <- BEG.p
return(out)
})
# Find Recruit Target Intersection
b.RAg <- reactive({
rg <- input$r1
boot.s <- Y.boot()$boot.s
boot.R <- as.matrix(Y.boot()$R.boot)
boot.Rp <- apply(boot.R,2,function(x) ifelse(x >rg,1,0))
boot.Rp <- colMeans(boot.Rp)
boot.Rm <- b.YA()$boot.Rm
boot.Ru <- b.YA()$boot.Ru
boot.Rl <- b.YA()$boot.Rl
# Find Intersections
b.l <- boot.s[sign(boot.Rl-rg)==1]
b.m <- boot.s[sign(boot.Rm-rg)==1]
b.u <- boot.s[sign(boot.Ru-rg)==1]
BEG.l <- c(min(b.l),max(b.l))
BEG.m <- c(min(b.m),max(b.m))
BEG.u <- c(min(b.u),max(b.u))
out <- list(boot.Rp=boot.Rp, BEG.l = BEG.l, BEG.m = BEG.m, BEG.u=BEG.u)
return(out)
})
b.RApg <- reactive({
rpg <- input$r1p/100
boot.s <- Y.boot()$boot.s
boot.Rp <- b.RAg()$boot.Rp
# Find Intersections
b.p <- boot.s[sign(boot.Rp-rpg)==1]
BEG.p <- c(min(b.p),max(b.p))
out <- BEG.p
return(out)
})
#-----------------------------------------------------------------------
# Panel 3: SR Smsy based Escapement Goal
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
# 5.0 Bootstrap Smsy Anaylses Results Out
#-----------------------------------------------------------------------
# Calculate Smsy Optimum profile and escapement goal
b.SA <- reactive({
mp.1 <- input$p1/100
ap <- input$p3/100
boot.s <- Y.boot()$boot.s
boot.Y <- as.matrix(Y.boot()$Y.boot)
nrows <- dim(boot.Y)[1]
ncols <- dim(boot.Y)[2]
foo <- matrix(0,nrow = nrows,ncol=ncols)
for(j in 1:nrows){
foo[j,] <- ifelse(boot.Y[j,]>mp.1*max(boot.Y[j,]),1,0)
}
Y.prob.1 <- colMeans(foo)
# Find Smsy Profile Intersections
b.1 <- boot.s[sign(Y.prob.1-ap)==1]
BEG.1 <- c(min(b.1),max(b.1))
out <- list(boot.s = boot.s, Y.prob.1 = Y.prob.1, BEG.1 = BEG.1)
return(out)
})
# Calculate Smsy Optimum profile and escapement goal
b.SA.st <- reactive({
boot.s <- Y.boot()$boot.s
boot.Y <- as.matrix(Y.boot()$Y.boot)
nrows <- dim(boot.Y)[1]
ncols <- dim(boot.Y)[2]
st <- c(0.9,0.8,0.7)
foo <- matrix(0,nrow = nrows,ncol=ncols)
Y.prob.st <- matrix(0,nrow = 3,ncol=ncols)
BEG.st <- matrix(0,nrow=3,ncol=2)
for(i in 1:3){
for(j in 1:nrows){
foo[j,] <- ifelse(boot.Y[j,]> st[i]*max(boot.Y[j,]),1,0)
}
Y.prob.st[i,] <- colMeans(foo)
b <- boot.s[sign(Y.prob.st[i,]-0.9)==1]
BEG.st[i,] <- c(min(b),max(b))
}
# Find Smsy Profile Intersections
out <- list(Y.prob.st = Y.prob.st, BEG.st = BEG.st)
return(out)
})
#=============================================================================
# Panel 3.2: SR Smax based Escapement Goal
#=============================================================================
# Calculate Smax Profile and escapement goal
b.SMX <- reactive({
mp <- input$sp/100
ap <- input$sp1/100
bn <- isolate(input$bn)
boot.s <- Y.boot()$boot.s
boot.R <- as.matrix(Y.boot()$R.boot)
nrows <- dim(boot.R)[1]
ncols <- dim(boot.R)[2]
foo <- matrix(0,nrow = nrows,ncol=ncols)
for(j in 1:nrows){
foo[j,] <- ifelse(boot.R[j,]>mp*max(boot.R[j,]),1,0)
}
R.prob <- colMeans(foo)
# Find Smax Profile Intersections
b <- boot.s[sign(R.prob-ap)==1]
BEG <- c(min(b),max(b))
out <- list(boot.s = boot.s, R.prob = R.prob, BEG = BEG)
return(out)
})
b.SMX.st <- reactive({
boot.s <- Y.boot()$boot.s
boot.R <- as.matrix(Y.boot()$R.boot)
nrows <- dim(boot.R)[1]
ncols <- dim(boot.R)[2]
st <- c(0.9,0.8,0.7)
foo <- matrix(0,nrow = nrows,ncol=ncols)
R.prob.st <- matrix(0,nrow = 3,ncol=ncols)
BEG.st <- matrix(0,nrow=3,ncol=2)
for(i in 1:3){
for(j in 1:nrows){
foo[j,] <- ifelse(boot.R[j,]> st[i]*max(boot.R[j,]),1,0)
}
R.prob.st[i,] <- colMeans(foo)
b <- boot.s[sign(R.prob.st[i,]-0.9)==1]
BEG.st[i,] <- c(min(b),max(b))
}
# Find Smsy Profile Intersections
out <- list(R.prob.st = R.prob.st, BEG.st = BEG.st)
return(out)
})
#-----------------------------------------------------------------------
# 7.0 Bootstrap Escapement Goal based Analneyses
#-----------------------------------------------------------------------
b.GA <- eventReactive(input$Run,{
#-----------------------------------------------------------------------
progress <- Progress$new(session, min=1, max=15)
on.exit(progress$close())
progress$set(message = 'Bootstrap EG (7.0) in progress',
detail = 'This may take a while...')
for (i in 1:15) {
progress$set(value = i)
Sys.sleep(0.5)}
#-----------------------------------------------------------------------
lg <- input$lg
ug <- input$ug
boot.n <- dim(boot())[1]
boot.s <- seq(lg,ug,length.out=101)
boot.R <- boot()
boot.Recruit <- t(boot.s*t(exp(boot.R[,'ln.alpha']-boot.R[,'beta']%o%boot.s)))
boot.Yield <- t(t(boot.Recruit)-boot.s)
out <- list(S = boot.s, R = boot.Recruit, Y = boot.Yield)
return(out)
})
b.GA.t <- reactive({
Return <- c(b.GA()$R)
Yields <- c(b.GA()$Y)
out <- as.data.frame(cbind(Return,Yields))
})
#---------------------------------------------------------------------------
# bootstrap Base Yield Plot
#---------------------------------------------------------------------------
boot.Yldp <- reactive({
mp <- input$p.i
u <- as.numeric(input$ui)
mult <- mult()
boot.s <- b.YA()$boot.s/u
boot.Yu <- b.YA()$boot.Yu/u
boot.Yl <- b.YA()$boot.Yl/u
boot.Ym <- b.YA()$boot.Ym/u
par(xaxs='i',yaxs='i',bty='l')
plot(boot.s,boot.Ym,type='l',ylim=c(0,max(boot.Yu)),
ylab=paste('Expected Mean Yield',mult),xlab=paste("Escapement",mult))
polygon(c(boot.s,rev(boot.s)),c(boot.Yu,rev(boot.Yl)),col=tcol('grey',50),border=NA)
legend('topright',paste(mp,'% CI'),lty=2,box.lty=0)
out <-recordPlot()
return(out)
})
#---------------------------------------------------------------------------
# bootstrap Base Run Plot
#---------------------------------------------------------------------------
boot.Recp <- reactive({
u <- as.numeric(input$ui)
mp <- input$p.i
mult <- mult()
boot.s <- b.YA()$boot.s/u
boot.Ru <- b.YA()$boot.Ru/u
boot.Rl <- b.YA()$boot.Rl/u
boot.Rm <- b.YA()$boot.Rm/u
par(xaxs='i',yaxs='i',bty='l')
plot(boot.s,boot.Rm,type='l',ylim=c(0,max(boot.Ru)),
xlab=paste("Escapement",mult),ylab=paste('Expected Mean Recruit',mult))
polygon(c(boot.s,rev(boot.s)),c(boot.Ru,rev(boot.Rl)),col=tcol('grey',50),border=NA)
legend('topright',paste(mp,'% CI'),lty=2,box.lty=0)
abline(0,1,col=2)
out <-recordPlot()
return(out)
})
#=======================================================================
# Plots and Tables Outputs
#=======================================================================
#=======================================================================
# Panel 1: SR Analyses SEction
#=======================================================================
#-----------------------------------------------------------------------
# SR plot
#-----------------------------------------------------------------------
srplot <- function(){
u <- as.numeric(input$ui)
x <- sr.data()
par <-SR.out()
xp <- x/u
SRp <- SRp()[,1:4]/u
# Draw Base SR Plot
replayPlot(base.p())
# Add CI
if(input$show.int==TRUE){
with(SRp,polygon(c(s,rev(s)),c(upr,rev(lwr)),col=tcol('grey',50),border=NA))
}
# Add Years
if(input$show.points==TRUE) {
pointLabel(xp$S,xp$R, labels=as.character(x$Yr), cex= 1,col=4)}
# Add Escapment Goal Range
if(input$show.eg==TRUE) {
abline(v=input$egl/u,col=2,lty=1,lwd=2)
abline(v=input$egu/u,col=2,lty=1,lwd=2)
}
# Add Smsy
t1 <- ''
l1 <- 0
if(input$show.smsy==TRUE) {
abline(v=par$Smsy/u,col=1,lty=2)
t1 <- 'Smsy'
l1 <- 2
}
# Add Smax
t2 <- ''
l2 <- 0
if(input$show.smax==TRUE) {
abline(v=par$Smax/u,col=1,lty=3)
t2 <- 'Smax'
l2 <- 3
}
legend('topright',c(t1,t2),lty=c(l1,l2),bty='n')
}
output$p <- renderPlot({ srplot()})
output$down <- downloadHandler(
filename = function() {
paste("myreport","png", sep = ".")
# paste("myreport", input$report, sep = ".")
},
content = function(file){
# if(input$report == "png")
png(file)
# else
# pdf(file)
srplot()
dev.off()
}
)
#-----------------------------------------------------------------------
# SR Yield plot
#-----------------------------------------------------------------------
output$py <- renderPlot({
u <- as.numeric(input$ui)
x <- sr.data()
par <-SR.out()
SRp <- SRp()[,1:4]/u
xp <- x/u
# Plot base Yiled plot
replayPlot(base.py())
# Add CI
if(input$show.int==TRUE){
with(SRp,polygon(c(s,rev(s)),c(upr-s,rev(lwr-s)),col=tcol('grey',50),border=NA))
}
# Add Years
if(input$show.points==TRUE) {
pointLabel(xp$S,(xp$R-xp$S), labels=as.character(x$Yr), cex= 1,col=4)
# text((R-S)~S,data=xp, labels=x$Yr, cex= 1, pos=3,col=4)
}
# Add Escapement Goadl Range
if(input$show.eg==TRUE) {
abline(v=input$egl/u,col=2,lty=1,lwd=2)
abline(v=input$egu/u,col=2,lty=1,lwd=2)
}
# Add Smsy
t1 <- ''
l1 <- 0
if(input$show.smsy==TRUE) {
abline(v=par$Smsy/u,col=1,lty=2)
t1 <- 'Smsy'
l1 <- 2
}
# Add Smax
t2 <- ''
l2 <- 0
if(input$show.smax==TRUE) {
abline(v=par$Smax/u,col=1,lty=3)
t2 <- 'Smax'
l2 <- 3
}
# Add legend
legend('topright',c(t1,t2),lty=c(l1,l2),box.lty=0)
})
#-----------------------------------------------------------------------
# Plot time serise
#-----------------------------------------------------------------------
output$srt <- renderPlot({
x <- sr.data()
u <- as.numeric(input$ui)
mult <- mult()
par(yaxs='i',bty='l')
plot(R/u~Yr,data=x,type='l',ylim=c(0,with(x,max(R,S)/u)),
main=input$caption,
xlab='Brood Year',
ylab=paste('Spawner / Recruit',mult))
lines(S/u~Yr,data=x,lty=2)
# Add Escapement Goal range
if(input$show.eg==TRUE) {
with(x,polygon(c(Yr,rev(Yr)),c(rep(input$egl/u,length(Yr)),rev(rep(input$egu/u,length(Yr)))),col=tcol('grey',50),border=NA))
}
legend('topright',c('Spawner','Recruit'),lty=c(2,1),box.lty=0)
})
output$runesc <- renderPlot({
x <- data()[,c(1:3)]
names(x) <-c('Yr','S','R')
u <- as.numeric(input$ui)
mult <- mult()
par(yaxs='i',bty='l')
plot(R/u~Yr,data=x,type='l',ylim=c(0,with(x,max(R,S)/u)),
main=input$caption,
xlab='Year',
ylab=paste('Run / Escapement',mult))
# Add Escapement Goal range
if(input$show.eg==TRUE) {
with(x,polygon(c(Yr,rev(Yr)),c(rep(input$egl/u,length(Yr)),rev(rep(input$egu/u,length(Yr)))),col=tcol('grey',50),border=NA))
}
lines(S/u~Yr,data=x,lty=2)
legend('topright',c('Run','Escapement'),lty=c(1,2),box.lty=0)
})
#-----------------------------------------------------------------------
# Plot Residual Plot
#-----------------------------------------------------------------------
output$Resid <- renderPlot({
year <- sr.data()$Yr
resid <-residuals(SR())
par(bty='l')
plot(resid~year,xlab='Year',ylab='Residuals'
)
abline(h=0)
model <- gam(resid~s(year),family=gaussian, fit =TRUE)
pred.year <- data.frame(year, predict.gam(model,se = TRUE))
lines(pred.year$year, pred.year$fit,lwd=2,col=4)
pred.year$ciu <- pred.year$fit + 2*pred.year$se.fit
pred.year$cil <- pred.year$fit - 2*pred.year$se.fit
with(pred.year,polygon(c(year,rev(year)),c(ciu,rev(cil)),col=tcol('grey',50),border=NA))
})
#-----------------------------------------------------------------------
# SR Bootstrap Parameters Distribution
#-----------------------------------------------------------------------
# Bootstrap summary
output$bsummary <- renderPrint({
print(summary(boot()[,c(5,2,1,3,6:10)]),digits=c(3,10,3,0,0,0,0,0,0))
})
output$bquantile <- renderPrint({
print(apply(boot()[,c(5,2,1,3,6:10)],2,function(x) quantile(x,probs=c(0.025,0.975),na.rm=TRUE)),digits=c(3,10,3,3,0,0,0,0,0,0))
})
output$bhist <- renderPlot({
par(mfrow=c(3,3),mar = c(1.75,1.5,1.5,1.75),xaxs='i',yaxs='i',bty='l')
plot(density(boot()$alpha),main='Ricker alpha',xlab='',ylab='')
plot(density(boot()$beta),main='Ricker beta',xlab='',ylab='')
plot(density(boot()$phi),main='AR1 phi',xlab='',ylab='')
plot(density(boot()$Seq), main='SEQ',xlab='',ylab='')
plot(density(boot()$Smsy),main='Smsy',xlab='',ylab='')
plot(density(boot()$Smax), main='Smax',xlab='',ylab='')
# plot(density(boot()$MSY,na.rm=T), main='MSY',xlab='',ylab='',xlim=c(0,quantile(boot()$MSY,0.99)))
# plot(density(boot()$Rmax,na.rm=T), main='Rmax',xlab='',ylab='',xlim=c(0,quantile(boot()$Rmax,0.99)))
})
#=======================================================================
# Panel 2: Smsy Analyses Section
#=======================================================================
#-----------------------------------------------------------------------
# Smsy Optimum Profile Plot
#-----------------------------------------------------------------------
output$bsmsy <- renderPlot({
mp.1 <- input$p1/100
ap <- input$p3/100
u <- as.numeric(input$ui)
mult <- mult()
boot.s <- b.SA()$boot.s/u
Y.prob.st <- b.SA.st()$Y.prob.st
Y.prob.1 <- b.SA()$Y.prob.1
#---------------------------------------------------------------------------
par(xaxs='i',yaxs='i',bty='l')
plot(boot.s,Y.prob.st[1,],type='l',col=1, ylim=c(0,1),ylab = 'Probability',
xlab=paste('Escapement',mult),main=paste0('MSY Yield probability curve'))
lines(boot.s,Y.prob.st[2,],lty = 2,col=1)
lines(boot.s,Y.prob.st[3,],lty = 4,col=1)
abline(h = 0.9,lwd=2,col=1)
lines(boot.s,Y.prob.1,lty = 1,col=4)
abline(h = ap,lwd=2,col=2)
tex <- c('90% MSY','80% MSY','70% MSY',paste0(input$p1,'% MSY'))
legend('topright',tex,lty=c(1,2,4,1),
col=c(1,1,1,4),box.lty=0)
})
#------------------------------------------------------------------------
# Escapement goal table output
#------------------------------------------------------------------------
# Smsy Goal BEG Out
SA.BEG <- reactive({
u <- as.numeric(input$ui)
BEG.st <- u*round(b.SA.st()$BEG.st/u)
BEG.1 <- u*round(b.SA()$BEG.1/u)
t.BEG.st1 <- paste('90% MSY achieving 90% Probability',BEG.st[1,1],'-',BEG.st[1,2])
t.BEG.st2 <- paste('80% MSY achieving 90% Probability',BEG.st[2,1],'-',BEG.st[2,2])
t.BEG.st3 <- paste('70% MSY achieving 90% Probability',BEG.st[3,1],'-',BEG.st[3,2])
t.BEG.1 <- paste(paste0(input$p1,'% MSY achieving ',input$p3,'% Probability'),BEG.1[1],'-',BEG.1[2])
out <- list(tst1=t.BEG.st1,tst2=t.BEG.st2,tst3=t.BEG.st3,t1=t.BEG.1)
return(out)
})
output$bsmsyt <-renderText({
paste(SA.BEG()$tst1,SA.BEG()$tst2,SA.BEG()$tst3,SA.BEG()$t1,sep='\n')
})
#---------------------------------------------------------------------------
# Smsy Yield and Recruit Plot
#---------------------------------------------------------------------------
output$bsmsy.y <- renderPlot({
u <- as.numeric(input$ui)
BEG.1 <- b.SA()$BEG.1/u
BEG.2 <- b.SA()$BEG.2/u
# Plot base Yield Plot
replayPlot(boot.Yldp())
# Plot escapement goal range
abline(v=BEG.1,lty=1,col=3)
abline(v=BEG.2,lty=4,col=4)
})
output$bsmsy.r <- renderPlot({
u <- as.numeric(input$ui)
BEG.1 <- b.SA()$BEG.1/u
BEG.2 <- b.SA()$BEG.2/u
# Plot base recruit Plot
replayPlot(boot.Recp())
# Plot escapement goal range
abline(v=BEG.1,lty=1,col=3)
abline(v=BEG.2,lty=4,col=4)
})
#=======================================================================
# Panel 3: Tab 2: Yield Goal Analyses
#=======================================================================
# Yield Plot
output$byield <- renderPlot({
u <- as.numeric(input$ui)
yg <- input$y1/u
replayPlot(boot.Yldp())
abline(h=yg,lwd=2,col=2)
})
# Print Escapement Goal Range
output$byt <-renderText({
mp <- input$p.i
u <- as.numeric(input$ui)
BEG.l <- u*round(b.YAg()$BEG.l/u)
BEG.m <- u*round(b.YAg()$BEG.m/u)
BEG.u <- u*round(b.YAg()$BEG.u/u)
t.BEG.l <- paste('Lower',mp,'% Limit',BEG.l[1],'-',BEG.l[2])
t.BEG.m <- paste('Mean ',BEG.m[1],'-',BEG.m[2])
t.BEG.u <- paste('Upper',mp,'% Limit',BEG.u[1],'-',BEG.u[2])
paste(t.BEG.l,t.BEG.m,t.BEG.u,sep='\n')
})
# Optimum Yield Proflie Plot
output$byp <- renderPlot({
u <- as.numeric(input$ui)
mult <- mult()
yg <- input$y1
ypg <- input$y1p/100
boot.s <- b.YA()$boot.s/u
boot.Yp <- b.YAg()$boot.Yp
s <- SRp()$s/u
Yci <- SRp()$prof.Yci
par(xaxs='i',yaxs='i',bty='l')
plot(boot.s,boot.Yp,type='l',ylim=c(0,1),ylab = 'Probability',xlab=paste("Escapement",mult),
main=paste('Minimum',yg,'Yield probability plot'))
lines(s,Yci,col='grey')
abline(h = ypg,lwd=2,col=2)
})
# Print Optimum Yield Proflie Goal Range
output$bypt <-renderText({
u <- as.numeric(input$ui)
BEG.p <- u*round(b.YApg()/u)
paste('Escapement Goal Range:',BEG.p[1],'-',BEG.p[2])
})
# Recruit Plot
output$breturn <- renderPlot({
u <- as.numeric(input$ui)
rg <- input$r1/u
# Plot Base Recruit Plot
replayPlot(boot.Recp())
abline(h=rg,lwd=2,col=2)
})
# Print Base Recruit Goal
output$brt <-renderText({
u <- as.numeric(input$ui)
mp <- input$p.i
BEG.l <- u*round(b.RAg()$BEG.l/u)
BEG.m <- u*round(b.RAg()$BEG.m/u)
BEG.u <- u*round(b.RAg()$BEG.u/u)
t.BEG.l <- paste('Lower',mp,'% Limit',BEG.l[1],'-',BEG.l[2])
t.BEG.m <- paste('Mean ',BEG.m[1],'-',BEG.m[2])
t.BEG.u <- paste('Upper',mp,'% Limit',BEG.u[1],'-',BEG.u[2])
paste(t.BEG.l,t.BEG.m,t.BEG.u,sep='\n')
})
# Minimum Recruit Proflie Plot
output$brp <- renderPlot({
rg <- input$r1
rpg <- input$r1p/100
u <- as.numeric(input$ui)
mult <- mult()
boot.s <- b.YA()$boot.s/u
boot.Yp <- b.RAg()$boot.Rp
par(xaxs='i',yaxs='i',bty='l')
plot(boot.s,boot.Yp,type='l',ylim=c(0,1),ylab = 'Probability',xlab=paste("Escapement",mult),
main=paste('Minimum',rg,'Recruit probability Plot'))
abline(h = rpg,lwd=2,col=2)
})
# Optimum Recruit Proflie Escapement Goal
output$brpt <-renderText({
u <- as.numeric(input$ui)
BEG.p <- u*round(b.RApg()/u)
paste('Escapement Goal Range:',BEG.p[1],'-',BEG.p[2])
})
#=======================================================================
# Panel 3 Tab 3: Smax Goal Analyses
#=======================================================================
#-----------------------------------------------------------------------
# Smax Profile Plot
#-----------------------------------------------------------------------
output$bsmax1 <- renderPlot({
mp <- input$sp/100
ap <- input$sp1/100
u <- as.numeric(input$ui)
mult <- mult()
bn <- isolate(input$bn)
boot.s <- b.SMX()$boot.s/u
R.prob.st <- b.SMX.st()$R.prob.st
R.prob <- b.SMX()$R.prob
#---------------------------------------------------------------------------
par(xaxs='i',yaxs='i',bty='l')
plot(boot.s,R.prob.st[1,],type='l', col=1, ylim=c(0,1),ylab = 'Probability',
xlab=paste('Escapement',mult),main=paste0('Rmax probability curve'))
lines(boot.s,R.prob.st[2,],lty = 2,col=1)
lines(boot.s,R.prob.st[3,],lty = 4,col=1)
abline(h = 0.9,lwd=2,col=1)
lines(boot.s,R.prob,lty = 1,col=4)
abline(h = ap,lwd=2,col=2)
tex <- c('90% Rmax','80% Rmax','70% Rmax',paste0(input$sp,'% Rmax'))
legend('topright',tex,lty=c(1,2,4,1),col=c(1,1,1,4),box.lty=0)
})
#------------------------------------------------------------------------
# Escapement goal table output
#------------------------------------------------------------------------
# Smax Goal BEG Out
SM.BEG <- reactive({
u <- as.numeric(input$ui)
BEG.st <- u*round(b.SMX.st()$BEG.st/u)
BEG <- u*round(b.SMX()$BEG/u)
t.BEG.st1 <- paste('90% Rmax achieving 90% Probability',BEG.st[1,1],'-',BEG.st[1,2])
t.BEG.st2 <- paste('80% Rmax achieving 90% Probability',BEG.st[2,1],'-',BEG.st[2,2])
t.BEG.st3 <- paste('70% Rmax achieving 90% Probability',BEG.st[3,1],'-',BEG.st[3,2])
t.BEG.1 <- paste(paste0(input$sp,'% Rmax achieving ',input$sp1,'% Probability'),BEG[1],'-',BEG[2])
out <- list(tst1=t.BEG.st1,tst2=t.BEG.st2,tst3=t.BEG.st3,t1=t.BEG.1)
return(out)
})
output$bsmaxt <-renderText({
paste(SM.BEG()$tst1,SM.BEG()$tst2,SM.BEG()$tst3,SM.BEG()$t1,sep='\n')
})
#-----------------------------------------------------------------------
# Recruit plot with Smax Goal Range
#-----------------------------------------------------------------------
output$bsmax.r <- renderPlot({
u <- as.numeric(input$ui)
BEG.1 <- b.SMX()$BEG/u
# Plot Base Recruit Plot
replayPlot(boot.Recp())
# Plot Smax Escapement Goal Range
abline(v=BEG.1,lwd=1,col=4)
})
#-----------------------------------------------------------------------
# Distribution of Mean Recruit in Smax Goal Range
#-----------------------------------------------------------------------
output$bsmax <- renderPlot({
u <- as.numeric(input$ui)
BEG.1 <- b.SMX()$BEG/u
# Plot base Yield Plot
replayPlot(boot.Yldp())
# Plot escapement goal range
abline(v=BEG.1,col=4)})
#=======================================================================
# Panel 3: Tab 4: User Defined Escapement Goal Range Analyses
#=======================================================================
#-----------------------------------------------------------------------
# 3.0: SRp.G: Bootstrape CI, PI
#-----------------------------------------------------------------------
SRp.G <- eventReactive(input$Run,{
#-----------------------------------------------------------------------
progress <- Progress$new(session, min=1, max=15)
on.exit(progress$close())
progress$set(message = 'Bootstrap Profiling in progress',
detail = 'This may take a while...')
for (i in 1:15) {
progress$set(value = i)
Sys.sleep(0.5)}
#-----------------------------------------------------------------------
lg <- input$lg
ug <- input$ug
s <- seq(lg,ug,length.out=101)
# Prediction model s range
D <- floor(mean(log10(sr.data()$S)))
sd <- s/(10^D)
# Predicttion
pred <- predictSE(SR(), newdata=data.frame(s=sd), se.fit = TRUE)
# Predicted ln(R/S)
lRS <- pred$fit
# Model SE
se <- pred$se.fit
# Model st.Residual
res <- sd(as.vector(SR()$residuals))
# Model df
foo <-as.data.frame(SR()$dims)
dft <- foo$N-foo$p
# Calculatge SE for Prediction interval
pse <- sqrt(se^2+res^2)
# Calculate tdist
# tf <- qt((1-mp)/2,dft,lower.tail=FALSE)
# Calculate CI-PI
boot.t <- matrix(0,nrow=101,ncol=1000)
for (i in 1:101){
boot.t[i,] <- rt(1000,dft)
}
bRci <- as.vector(exp(boot.t*se+lRS)*s)
bYci <- as.vector(exp(boot.t*se+lRS)*s - s)
bRpi <- as.vector(exp(boot.t*pse+lRS)*s)
bYpi <- as.vector(exp(boot.t*pse+lRS)*s - s)
out <- data.frame(cbind(bRci,bYci,bRpi,bYpi))
names(out) <- c('bRci','bYci','bRpi','bYpi')
return(out)
})
#-----------------------------------------------------------------------
# Plot distribution of Recruit and Yield at Given Escapement Range
#-----------------------------------------------------------------------
output$bGAf <- renderPlot({
par(mfrow=c(2,1),xaxs='i',yaxs='i',bty='l')
u <- as.numeric(input$ui)
mult <- mult()
rg <- input$rg/u
yg <- input$yg/u
plot(density(b.GA.t()$R/u),main='Expected Mean Recruit',xlab=paste("Recruit",mult),ylab='')
lines(density(SRp.G()$bRci/u), col = 'grey')
abline(v=rg,lty=2,col=2)
plot(density(b.GA.t()$Y/u),main='Expected Mean Yields',xlab=paste("Yield",mult),ylab='')
abline(v=yg,lty=2,col=2)
lines(density(SRp.G()$bYci/u), col = 'grey')
})
output$bGAs <- renderPrint({
print(summary(b.GA.t()),digits=0)
})
# Calculate Probability meeting target
output$bGAt <- renderText({
rg <- input$rg
yg <- input$yg
prg <- sum(ifelse(b.GA.t()$Return>rg,1,0))/length(b.GA.t()$Return)
pyg <- sum(ifelse(b.GA.t()$Yields>yg,1,0))/length(b.GA.t()$Yields)
t.prg <- paste('Meeting Target Recruit:',round(100*prg,0),'%')
t.pyg <- paste('Meeting Target Yields:',round(100*pyg,0),'%')
paste(t.prg,t.pyg,sep='\n')
})
output$bGAfp <- renderPlot({
par(mfrow=c(2,1))
u <- as.numeric(input$ui)
rg <- input$rg/u
lg <- input$lg/u
ug <- input$ug/u
yg <- input$yg/u
# Plot Base Recruit Plot
replayPlot(boot.Recp())
abline(h=rg,lwd=2,col=2)
abline(v=c(lg,ug),lty=2,col=2)
replayPlot(boot.Yldp())
abline(h=yg,lwd=2,col=2)
abline(v=c(lg,ug),lty=2,col=2)
})
#-----------------------------------------------------------------------
# Plot distribution of Recruit and Yield at Given Escapement Range
# SR model based CI and PI
#-----------------------------------------------------------------------
output$bGASR <- renderPlot({
par(mfrow=c(2,1),xaxs='i',yaxs='i',bty='l', cex=1.2)
u <- as.numeric(input$ui)
mult <- mult()
rg <- input$rg/u
yg <- input$yg/u
plot(density(SRp.G()$bRpi/u), main='Expected Annual Recruit',xlab=paste("Recruit",mult),ylab='')
abline(v=rg,lty=2,col=2)
plot(density(SRp.G()$bYpi/u), main='Expected Annual Yields',xlab=paste("Yield",mult),ylab='')
abline(v=yg,lty=2,col=2)
})
output$bGASRs <- renderPrint({
dat <- SRp.G()
dat <- dat[,c(3,4)]
names(dat) <- c('Recruit','Yields')
print(summary(dat),digits=0)
})
# Calculate Probability meeting target
output$bGASRt <- renderText({
rg <- input$rg
yg <- input$yg
prg <- sum(ifelse(SRp.G()$bRci>rg,1,0))/length(SRp.G()$bRci)
pyg <- sum(ifelse(SRp.G()$bYci>yg,1,0))/length(SRp.G()$bYci)
t.prg <- paste('Meeting Target Recruit CI:',round(100*prg,0),'%')
t.pyg <- paste('Meeting Target Yields CI:',round(100*pyg,0),'%')
prg1 <- sum(ifelse(SRp.G()$bRpi>rg,1,0))/length(SRp.G()$bRpi)
pyg1 <- sum(ifelse(SRp.G()$bYpi>yg,1,0))/length(SRp.G()$bYpi)
t.prg1 <- paste('Meeting Target Recruit PI:',round(100*prg1,0),'%')
t.pyg1 <- paste('Meeting Target Yields PI:',round(100*pyg1,0),'%')
paste(t.prg,t.pyg,t.prg1,t.pyg1,sep='\n')
})
# Optimum Recruit Proflie Plot
output$SRrp <- renderPlot({
par(mfrow=c(1,2),xaxs='i',yaxs='i',bty='l')
rg <- input$rg
yg <- input$yg
u <- as.numeric(input$ui)
mult <- mult()
s <- SRp()$s/u
Rci <- SRp()$prof.Rci
Rpi <- SRp()$prof.Rpi
Yci <- SRp()$prof.Yci
Ypi <- SRp()$prof.Ypi
plot(s,Rci,type='l',ylim=c(0,1),ylab = 'Probability',xlab=paste("Escapement",mult),
main=paste('Minimum',rg,'Recruit probability Plot'))
lines(s,Rpi,lty=2)
plot(s,Yci,type='l',ylim=c(0,1),ylab = 'Probability',xlab=paste("Escapement",mult),
main=paste('Minimum',yg,'Yield probability Plot'))
lines(s,Ypi,lty=2)
})
#=======================================================================
# Panel 4: Management Strategy Evaluation
#=======================================================================
#-----------------------------------------------------------------------
# Initialize
#-----------------------------------------------------------------------
MSE.int <- eventReactive(input$InitRun,{
#-----------------------------------------------------------------------
# import brood table
x <- brood.table()
# Just retrive brood recuit by age
x <- x[complete.cases(x),-c(1,2,dim(x)[2])]
# Calculate brood recurit age prop
p.x <- x/rowSums(x)
# Calculate mean age recruit
p.i <- colMeans(p.x)
# Set Drishelet
D <- input$D
phi <- input$phi
# first age
fage <- input$fage
# number of age groups
nages <- dim(x)[2]
# last age
lage <- fage + nages-1
years <- input$simy
burnin <-input$burnin
train <- input$train
# Total Simulation Years
nyrs <- burnin+train+years
ar1 <- function(n,cv,alpha){
ar1 <- numeric(n)
ar1[1] <- 0
for(i in 2:n){
ar1[i] <- alpha*ar1[i-1]+runif(1,-cv,cv)
}
ar1
}
e.Rec <- ar1(nyrs,SR.out()$sigma,phi)
e.p <- rdirichlet(nyrs,alpha=p.i*D)
# output data
e.pred <- exp(rnorm(nyrs,0,input$spred/100))
e.obsH <- exp(rnorm(nyrs,0,input$sobsH/100))
e.obsS <- exp(rnorm(nyrs,0,input$sobsE/100))
e.imp <- exp(rnorm(nyrs,0,input$simpH/100))
out <- list(nages=nages,e.pred = e.pred, e.obsH = e.obsH, e.obsS = e.obsS,e.imp = e.imp, e.Rec = e.Rec, e.p = e.p)
return(out)
})
#=================================================================================
# MSE Simulation Rutine
#=================================================================================
sim <- eventReactive(input$SimRun,{
#-----------------------------------------------------------------------
# Import Error Data
#-----------------------------------------------------------------------
Init <- MSE.int()
nages <- MSE.int()$nages
e.pred <- as.vector(MSE.int()$e.pred)
e.obsH <- as.vector(MSE.int()$e.obsH)
e.obsS <- as.vector(MSE.int()$e.obsS)
e.imp <- as.vector(MSE.int()$e.imp)
e.Rec <- as.vector(MSE.int()$e.Rec)
e.p <- as.matrix(MSE.int()$e.p)
# Initial Run size
R0 <- median(sr.data()$R)
# first age
fage <- input$fage
# last age
lage <- fage + nages-1
years <- input$simy
burnin <- input$burnin
train <- input$train
# Total Simulation Years
nyrs <- burnin+train+years
#-----------------------------------------------------------------------
# Import SR and management parameters
#-----------------------------------------------------------------------
EGl <- input$EGlu[1]
EGu <- input$EGlu[2]
alpha <- SR.out()$alpha
beta <- SR.out()$beta
Umsy <- SR.out()$Umsy
#-----------------------------------------------------------------------
# Create Empty vector
#-----------------------------------------------------------------------
# Recruit
R <- numeric(nyrs)
R.obs <- numeric(nyrs)
# Annual Run
N <- numeric(nyrs)
# Annual Escapement
S <- numeric(nyrs)
S.obs <- numeric(nyrs)
# Annual Harvest
H <- numeric(nyrs)
H.obs <- numeric(nyrs)
# Annuual Run by age
N.ta <- matrix(0,ncol=nages, nrow=nyrs+lage+2)
N.ta.obs <- matrix(0,ncol=nages, nrow=nyrs+lage+2)
# Annual Escapement goals
Egoals <- matrix(0,ncol=2, nrow = nyrs+1)
# Annual SR parameters
SR.sim <- matrix(0,ncol=4, nrow = nyrs)
#---------------------------------------------------------------------------
# Start simulation
#---------------------------------------------------------------------------
for (y in 1:nyrs){
# First generaion is constant
if(y<=lage) {
N.ta[y,] <- R0*exp(e.Rec[y])*e.p[y,]
}
# Anunual Run is sum of all ages
N[y] <- sum(N.ta[y,])
# Predicted Run
N.pred <- N[y]*e.pred[y]
# Determine target harvest criteria
EG.l <- ifelse(input$cmode =='Middle',mean(Egoals[y,]),ifelse(input$cmode =='Upper',Egoals[y,2],Egoals[y,1]))
if(y<=(burnin+train)){
# Before management: Harvest is at Umsy
H.target <- N.pred*Umsy
} else {
# Management based on Escapement goal
H.target <- ifelse(N.pred < EG.l,0,min(input$maxH,(N.pred-EG.l)*input$maxHr))
}
# Actual Harvest
H[y] <- min(H.target*e.imp[y],0.99*N[y])
# Actual Escapement
S[y] <- N[y] - H[y]
# Calculate Future Recruits based on SR
R[y] <- alpha*S[y]*exp(-beta*S[y]+e.Rec[y])
# Fill Future Return by age
for (a in 1:nages){ N.ta[y+fage+a-1,a] <- R[y]*e.p[y,a] }
# Observed Escapement
S.obs[y] <- S[y]*e.obsS[y]
#Observed Harvest
H.obs[y] <- H[y]*e.obsH[y]
#Age comp
p.age <- N.ta[y,]/N[y]
#Observed age comp
p.age.ob <-rmultinom(1,input$Nobage,p.age)/input$Nobage
#Observed Run by age (Assume age comp est is accurate)
N.ta.obs[y,] <- sum(S.obs[y],H.obs[y])*p.age.ob
# Create Recruitment data based on observed harvest and escapement
if(y>lage) {R.obs[y-lage] <- sum(diag(N.ta.obs[(y-nages):y,]))}
#-------------------------------------------------------------------------------
# Active harvest management: Set Escapment Goal
#-------------------------------------------------------------------------------
if(y>=(burnin+train)) {
# Start esimating SR model parameters
# Assume data have been collected since train
R.est <- R.obs[(burnin+1):(y-lage)]
S.est <- S.obs[(burnin+1):(y-lage)]
# Calcultate SR parameters
lnRPS <- log(R.est/S.est)
srfit <- lm(lnRPS~S.est)
lnalpha.est <- coef(srfit)[1]
beta.est <- -coef(srfit)[2]
Smsy.est <- lnalpha.est*(0.5-0.07*lnalpha.est)/beta.est
Smax.est <- 1/beta.est
SR.sim[y,] <- c(lnalpha.est,beta.est,Smsy.est,Smax.est)
EG.m <- ifelse(input$EGm =='Smsy',Smsy.est,Smax.est)
EG.l <- round(EGl*EG.m,-floor(log10(EGl*EG.m))+1)
EG.u <- round(EGu*EG.m,-floor(log10(EGu*EG.m))+1)
# Board of fish: change escapement goal every bord cycle
if((y-burnin-train)%%input$EGY==0){Egoals[y+1,] <- c(EG.l,EG.u)}
else {Egoals[y+1,] <- Egoals[y,]}
}# End of EG management
} # End simulation
#-------------------------------------------------------------------------------
# Data Output
#-------------------------------------------------------------------------------
# Put NA on years Escaoement goals were not calculated
Egoals[1:(burnin+train),] <- NA
SR.sim[1:(burnin+train-1),] <- NA
# data output
out <- list(N=N,S=S,H = H,R=R, R.obs=R.obs, SR.sim=SR.sim,Egoals=Egoals)
return(out)
})
#=================================================================================
#--------------------------------------------------------------------------------
# Simulation Annual Change
#--------------------------------------------------------------------------------
output$runsim <- renderPlot({
par(cex=1.3)
years <- input$simy
burnin <- input$burnin
train <- input$train
# Total Simulation Years
nyrs <- seq(1,burnin+train+years)
N <- sim()$N
S <- sim()$S
H <- sim()$H
Egoals <- sim()$Egoals
SR <- sim()$SR.sim
u <- as.numeric(input$ui)
mult <- mult()
par(xaxs='i',yaxs='i',bty='l')
plot(nyrs,N/u,type='l',ylim=c(0,max(N))/u,col=1,
main=input$caption,
xlab='Year',
ylab=paste('Run / Escapement',mult))
lines(nyrs,S/u,lty=2,col=2)
lines(nyrs,H/u,lty=3,col=3)
lines(nyrs,Egoals[nyrs,1]/u,col=4)
lines(nyrs,Egoals[nyrs,2]/u,col=4)
col <- ifelse(input$EGm =='Smsy',3,4)
tn <- ifelse(input$EGm =='Smsy','Smsy','Smax')
preS <- ifelse(input$EGm =='Smsy',SR.out()$Smsy,SR.out()$Smax)
lines(nyrs,SR[nyrs,col]/u,col=6)
abline(h=preS/u,col=6,lty=2)
legend('topleft',c('Run','Escapement','Harvest',tn),lty=c(1,2,3,1),col=c(1,2,3,6),bty='n')
})
#--------------------------------------------------------------------------------
# Simulation Summary
#--------------------------------------------------------------------------------
output$simsum <- renderPrint({
options(scipen=999)
years <- input$simy
burnin <- input$burnin
train <- input$train
x <- data.frame(N = sim()$N, S = sim()$S, H = sim()$H)
xs <- x[(burnin+train+1):(burnin+train+years),]
print(summary(xs,digits=c(0,0,0)))
})
#--------------------------------------------------------------------------------
# Simulation Histogram
#--------------------------------------------------------------------------------
output$simhist <- renderPlot({
par(mfrow=c(1,3),mar = c(2,2,2,2), cex=1.2)
options(scipen=999)
years <- input$simy
burnin <- input$burnin
train <- input$train
x <- data.frame(N = sim()$N, S = sim()$S, H = sim()$H)
xs <- x[(burnin+train+1):(burnin+train+years),]
hist(xs$N, main='Run size',xlab='',ylab='')
hist(xs$S, main='Escapement',xlab='',ylab='')
hist(xs$H, main='Harvest',xlab='',ylab='')
})
#================================================================================
# Save multiple Simulation Results
#================================================================================
# Set temporary memory: M
memory <- reactiveValues(dat = NULL)
# Retrieve simulaiton retuslts data
yvals <- reactive({
x <- data.frame(N = sim()$N, S = sim()$S, H = sim()$H)
return(x)
})
# Save to data.frame
xvals <- reactive({
# Keep previous data
isolate(dat <- memory$dat)
if (is.null(dat)) {
memory$dat <- data.frame(yvals())
} else {
alt <- data.frame(yvals())
memory$dat <- data.frame(dat,alt)
}
return(memory$dat)
})
observe({ if (input$SimClear == 0)
return()
memory$dat <- NULL
})
# Download data ----
output$simdownload <- downloadHandler(
filename = function() {"Simdata.csv"},
content = function(file) {
write.csv(xvals(), file, row.names = FALSE)
})
#================================================================================
#--------------------------------------------------------------------------------
# Simulation Table Output
#--------------------------------------------------------------------------------
#output$prtxval <- renderDataTable({(xvals())})
output$altsim.sum <- renderPrint({
options(scipen=999)
years <- input$simy
burnin <- input$burnin
train <- input$train
# Total Simulation Years
nyrs <- burnin+train+years
x <- xvals()[(burnin+train+1):nyrs,]
alts <- dim(x)[2]
N.alt <- data.frame(x[,seq(1,alts,3)])
S.alt <- data.frame(x[,seq(2,alts,3)])
H.alt <- data.frame(x[,seq(3,alts,3)])
out <- list(N=summary(N.alt),S=summary(S.alt),H=summary(H.alt))
return(out)
})
#--------------------------------------------------------------------------------
# Simulation Comparative figures
#--------------------------------------------------------------------------------
output$altsim.N <- renderPlot({
par(mfrow=c(3,1),mar = c(2,2,2,2),cex=1.1)
u <- as.numeric(input$ui)
mult <- mult()
years <- input$simy
burnin <- input$burnin
train <- input$train
# Total Simulation Years
nyrs <- seq(1,burnin+train+years)
x <- xvals()
alts <- dim(x)[2]
N.alt <- data.frame(x[,seq(1,alts,3)])
S.alt <- data.frame(x[,seq(2,alts,3)])
H.alt <- data.frame(x[,seq(3,alts,3)])
n.rep <- dim(N.alt)[2]
par(xaxs='i',yaxs='i',bty='l')
plot(nyrs,N.alt[,1]/u,type='l',ylim=c(0,max(N.alt))/u,col=1,
xlab='Year', main=paste('Run',mult))
for(i in 1:n.rep){
lines(nyrs,N.alt[,i]/u, col = i)
}
legend('topleft',paste('Alt',seq(1,n.rep)),lty=1,col=seq(1,n.rep),bty='n',cex=0.8)
plot(nyrs,S.alt[,1]/u,type='l',ylim=c(0,max(S.alt))/u,col=1,
xlab='Year', main=paste('Escapement',mult))
for(i in 1:n.rep){
lines(nyrs,S.alt[,i]/u, col = i)
}
legend('topleft',paste('Alt',seq(1,n.rep)),lty=1,col=seq(1,n.rep),bty='n',cex=0.8)
plot(nyrs,H.alt[,1]/u,type='l',ylim=c(0,max(H.alt))/u,col=1,
main=paste('Harvest',mult), xlab='Year')
for(i in 1:n.rep){
lines(nyrs,H.alt[,i]/u, col = i)
}
legend('topleft',paste('Alt',seq(1,n.rep)),lty=1,col=seq(1,n.rep),bty='n',cex=0.8)
})
output$altsim.H <- renderPlot({
par(mfrow=c(1,3),mar = c(2,2,2,2),cex=1.1)
years <- input$simy
burnin <- input$burnin
train <- input$train
# Total Simulation Years
nyrs <- burnin+train+years
x <- xvals()[(burnin+train+1):nyrs,]
alts <- dim(x)[2]
N.alt <- data.frame(x[,seq(1,alts,3)])
S.alt <- data.frame(x[,seq(2,alts,3)])
H.alt <- data.frame(x[,seq(3,alts,3)])
tN.alt <- melt(N.alt)
tS.alt <- melt(S.alt)
tH.alt <- melt(H.alt)
plot(value~variable,data=tN.alt, main='Run', xlab = 'Models')
plot(value~variable,data=tS.alt, main='Escapement', xlab = 'Models')
plot(value~variable,data=tH.alt, main='Harvest', xlab = 'Models')
})
}) #End Server
# Create Shiny app ----
shinyApp(ui, server)
|
1d4db04e9c8136a0336d195dc0ea8651e0ae9dc2
|
103b59cc7623bfc4f974a7ee310a2cfe30c205bb
|
/statistic-data-analysis/Project.R
|
3c3376be0f15e6020921d087a59e7138404c28ef
|
[] |
no_license
|
bmiddag/ugent-projects
|
3ef392ab863ff1b4335d327b91e16a48fb09cdde
|
4eee35670a8e9911c58b0562a6b65583565d2e64
|
refs/heads/master
| 2021-01-11T00:09:22.926996
| 2016-10-12T22:58:48
| 2016-10-12T22:58:48
| 70,746,814
| 2
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 7,766
|
r
|
Project.R
|
#########################################
# Project: Statistische Gegevensanalyse #
# Naam: Bart Middag #
# Richting: 3de bachelor informatica #
# Academiejaar: 2013-2014 #
#########################################
# Voorbereiding project
nsim <- 10000 # aantal simulaties
set.seed(98765) # seed voor reproduceerbare resultaten
setwd("C:/BART/UNIF/Statistische Gegevensanalyse/Project")
############
# OPGAVE 1 #
############
# Voorbereiding opgave 1
zwarte_spar <- read.csv("ZwarteSpar.csv",header = T)
str(zwarte_spar)
summary(zwarte_spar)
colnames(zwarte_spar) <- tolower(colnames(zwarte_spar))
attach(zwarte_spar)
# OPGAVE 1A
# Hoogte met/zonder meststoffen en groei berekenen
f_height0 <- height0[fertilizer == "F"]
f_height5 <- height5[fertilizer == "F"]
nf_height0 <- height0[fertilizer == "NF"]
nf_height5 <- height5[fertilizer == "NF"]
f_growth <- f_height5 - f_height0
nf_growth <- nf_height5 - nf_height0
height_growth <- height5 - height0
# Boxplots
boxplot(height_growth~fertilizer, main="Hoogteverschil na 5 jaar", xlab="Gebruik van meststoffen", ylab="Hoogtetoename (inch)", xaxt="n")
axis(side=1, at=1:2, labels=c("Met meststoffen","Zonder meststoffen"))
# We kijken naar de verdeling van de groei en zien of ze normaal verdeeld is
plot(density(f_growth))
plot(density(nf_growth))
plot(density(height_growth))
qqnorm(f_growth); qqline(f_growth, col = 2)
qqnorm(nf_growth); qqline(nf_growth, col = 2)
qqnorm(height_growth); qqline(height_growth, col = 2)
# We kijken of de groei normaal verdeeld is - de Shapiro-Wilk test geeft ons duidelijke resultaten.
shapiro.test(f_growth) # Niet normaal verdeeld, de t-test mogen we dus niet gebruiken!
shapiro.test(nf_growth) # Wel normaal verdeeld
shapiro.test(height_growth) # Algemen normaal verdeeld
# We bekijken het verschil in gemiddeldes
mean_diff <- mean(f_growth) - mean(nf_growth)
# We bepalen de kans dat een willekeurige permutatie een resultaat geeft >= het gemiddelde in deze situatie.
means <- numeric()
for(i in 1:nsim) {
permutation <- sample(height_growth)
means[i] <- mean(permutation[fertilizer == "F"]) - mean(permutation[fertilizer == "NF"])
}
hist(means)
means[nsim+1] <- mean_diff
means_p <- sum(means >= mean_diff)/(nsim+1)
# Op basis van 10000 permutaties is de kans ongeveer 1/10001.
# We bepalen het betrouwbaarheidsinterval.
differences <- numeric()
for(i in 1:nsim) {
differences[i] <- mean(sample(f_growth,replace = T)) - mean(sample(nf_growth, replace = T))
}
differences[nsim+1] <- mean_diff
differences <- sort(differences)
interval <- c(differences[0.05*(nsim+1)],differences[(1-0.05)*(nsim+1)])
cat(paste0("We kunnen met 95% zekerheid stellen dat de extra groei zal liggen tussen ", interval[1], " en ", interval[2], " inch."))
# OPGAVE 1B
# Diameter met/zonder competitie en groei berekenen
c_diameter0 <- diameter0[competition == "C"]
c_diameter5 <- diameter5[competition == "C"]
nc_diameter0 <- diameter0[competition == "NC"]
nc_diameter5 <- diameter5[competition == "NC"]
c_growth <- c_diameter5 - c_diameter0
nc_growth <- nc_diameter5 - nc_diameter0
diameter_growth <- diameter5 - diameter0
# Boxplots
boxplot(diameter_growth~competition, main="Verschil in diameter na 5 jaar", xlab="Competitie van andere bomen", ylab="Diametertoename (inch)", xaxt="n")
axis(side=1, at=1:2, labels=c("Met competitie","Zonder competitie"))
# We kijken naar de verdeling van de groei en zien of ze normaal verdeeld is
plot(density(c_growth))
plot(density(nc_growth))
qqnorm(c_growth); qqline(c_growth, col = 2)
qqnorm(nc_growth); qqline(nc_growth, col = 2)
# We kijken of de groei normaal verdeeld is - de Shapiro-Wilk test geeft ons duidelijke resultaten.
shapiro.test(c_growth) # Wel normaal verdeeld
shapiro.test(nc_growth) # Wel normaal verdeeld
# We mogen de t-test dus gebruiken.
t.test(c_growth,nc_growth)
# Er is dus een negatief effect op de toename in diameter als er competitie is.
# OPGAVE 1C
# We bepalen de associatie tussen de toenames van de hoogte en van de diameter
height_diameter <- cor(height_growth, diameter_growth)
cor.test(height_growth, diameter_growth)
# We plotten het verband
scatter.smooth(height_growth, diameter_growth, main="Verband tussen toenames in de hoogte en in de diameter", xlab="Toename in hoogte (inch)", ylab="Toename in diameter (inch)")
# OPGAVE 1D
# We zetten dit om naar een logische eenheid voor R, zodat R niet NC en NF beschouwt maar F en C.
# Als we zouden werken met een model dat NC en NF beschouwt, moeten we het effect van F en C inverteren en dat is verwarrend.
fertilizer_bool <- as.logical(fertilizer == "F")
competition_bool <- as.logical(competition == "C")
lmfit <- lm(height5~fertilizer_bool+competition_bool)
summary(lmfit)
plot(lmfit)
coef(lmfit)
# OPGAVE 1E
lmfit_height0 <- lm(height5~fertilizer_bool+competition_bool+height0)
summary(lmfit_height0)
plot(lmfit_height0)
coef(lmfit_height0)
# OPGAVE 1F
lmfit_height0$coeff %*% c(1,F,F,T)
lmfit_height0$coeff %*% c(1,T,F,T)
lmfit_height0$coeff %*% c(1,F,T,T)
lmfit_height0$coeff %*% c(1,T,T,T)
############
# OPGAVE 2 #
############
# Voorbereiding opgave 2
krabben <- read.csv("Krabben.csv",header = T)
str(krabben)
summary(krabben)
# colnames(krabben) <- tolower(colnames(krabben)) # Ze zijn al lowercase.
attach(krabben)
library(MASS)
# OPGAVE 2A
satellites_percent <- (length(satell[satell=='TRUE']) / length(satell))*100
satellites_percent
# OPGAVE 2B
# Grootte van het schild in functie van aanwezigheid van satellieten berekenen
t_width <- width[satell == T]
f_width <- width[satell == F]
# Boxplots
boxplot(width~satell, main="Schildgrootte bij aanwezigheid satellieten", xlab="Aanwezigheid satellieten", ylab="Schildgrootte (cm)", xaxt="n")
axis(side=1, at=1:2, labels=c("Geen satelliet","Minstens één satelliet"))
# We kijken naar de verdeling van beide groepen en zien of ze normaal verdeeld zijn
plot(density(t_width))
plot(density(f_width))
qqnorm(t_width); qqline(t_width, col = 2)
qqnorm(f_width); qqline(f_width, col = 2)
# We kijken of de groepen normaal verdeeld zijn - de Shapiro-Wilk test geeft ons duidelijke resultaten.
shapiro.test(t_width) # Wel normaal verdeeld
shapiro.test(f_width) # Wel normaal verdeeld
# We mogen de t-test dus gebruiken.
t.test(t_width,f_width)
# Er is dus een duidelijk verband tussen de schildgrootte en de aanwezigheid van satellieten.
# OPGAVE 2C
krabben <- krabben[,c(1,3,2)] # kolom 2 en 3 switchen
index <- sample(c(rep("training", 130), rep("test", 43))) # test: training/3
krabben_train <- krabben[index == "training",]
krabben_test <- krabben[index == "test",]
# Trainen van modellen
lda_train <- lda(satell ~ ., data = krabben_train)
qda_train <- qda(satell ~ ., data = krabben_train)
# Validatie (functie uit practicum 10)
K <- 5 #aantal folds
own.cv <- function(x,y, K = 5,method = lda){
f <- method
n <- nrow(x)
grid <- rep(1:K, n%/%K+1 )[1:n]
id <- sample(grid)
preds <- rep(NA,n)
for(i in 1:K){
f.model <- f( x[id != i,],y[id != i])
preds[id == i]<- predict(f.model,newdata = x[id == i,])$class
}
preds-1
}
# Schijnbaar foutenpercentage lineaire discriminantanalyse
preds.cvlda<-own.cv(krabben_train[,1:2], krabben_train[,3], K = K, method = lda)
sum(krabben_train$satell != preds.cvlda)/nrow(krabben_train) #cross-validation error
table(preds.cvlda,krabben_train$satell)/130
# Schijnbaar foutenpercentage kwadratische discriminantanalyse
preds.cvqda<-own.cv(krabben_train[,1:2],krabben_train[,3],K = K,method = qda)
sum(krabben_train$satell != preds.cvqda)/nrow(krabben_train) #cross-validation error
table(preds.cvqda,krabben_train$satell)/130
# QDA > LDA, maar dit hangt af van de seed.
# Predictiefout
sum(predict(qda_train,newdata = krabben_test)$class != krabben_test$satell)/43
|
18a30c20463cae60576f6c5aeaf26a0d9ba46068
|
ac746dd6f2e113bc6587bc2fe975a858448fc30f
|
/R/mapper.R
|
cfe7be52426da04b632a9cbf1e1328563aeb235e
|
[] |
no_license
|
emcramer/covid19project
|
e750de1d9304a01dba035ea25db17adffb159333
|
0e709b62e90d2b6f11f7b6bc97370993f59b6e8c
|
refs/heads/master
| 2021-12-14T14:52:32.753957
| 2021-12-11T18:58:48
| 2021-12-11T18:58:48
| 253,441,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,730
|
r
|
mapper.R
|
libs <- c("tidyverse", "dplyr", "ggplot2", "lubridate")
easypackages::libraries(libs)
# get the data
countyData <- read.csv("https://static.usafacts.org/public/data/covid-19/covid_confirmed_usafacts.csv")
# pivot the data to be longer along the date dimension, and remove appended "X"
convDate <- function(x) {
print(colnames(x))
tmp <- gsub("X", "", x$Date)
}
countyDateData <- pivot_longer(countyData
, cols = starts_with("X")
, names_to = "Date"
, values_to = "Count")
countyDateData$Date <- gsub("X", "", countyDateData$Date) %>%
gsub("\\.", "-", .) %>%
mdy()
# set the region/state w/name
nationalData <- countyDateData %>%
add_column(region= setNames(tolower(state.name), state.abb)[toupper(countyDateData$State)]) %>%
group_by(County.Name) %>%
filter(row_number() == n()) %>%
ungroup() %>%
group_by(region) %>%
summarise(TotalCases = sum(Count))
# merge the data with geographic info
mergedStates <- inner_join(map_data("state"), nationalData, by="region")
# plot the cases
mapPlotter <- function(df) {
p <- ggplot(data=df) +
geom_polygon(aes(x=long
, y=lat
, group=group
, fill=log(TotalCases))
, color="white"
, size=0.2
) +
labs(title="Contiguous USA COVID-19 Cases") +
scale_fill_continuous(name="Number of Cases"
, low = "lightblue"
, high = "darkblue"
, breaks=seq(1, ceiling(max(log(df$TotalCases))), 2)
, na.value = "grey50") +
theme_void()
p
}
mapPlotter(mergedStates)
|
5d692b98f890374384b16de9e1f0d49f3a03a760
|
a94308678716ab60f03956e503f554a767e73733
|
/R/DIF.Logistic.R
|
c50d2696f0d0f0f4257fc9adb861cec1de2e9ac1
|
[] |
no_license
|
cswells1/MeasInv
|
9f9cb20da68b695cc1f65fc5c80f92ea31b030e7
|
b74acffcf8ec0d6886f7081882aa3965306eb4af
|
refs/heads/master
| 2023-07-14T21:35:32.915150
| 2021-09-12T22:50:49
| 2021-09-12T22:50:49
| 405,707,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,769
|
r
|
DIF.Logistic.R
|
#' Logistic regression DIF method
#'
#' @param data numeric: either the data matrix only, or the data matrix plus the vector of group membership.
#' @param group factor: the vector of group membership
#' @param sig.level numeric: the significance level
#' @param purify logical: Default is FALSE.
#' @param output.filename character: either the file path or file name to save the output
#'
#' @return Each list of the output has a data frame with the likelihood-ratio test statistic (G2),
#' its respective p-value, the change in Nagelkerke’s pseudo-R2 between two nested models.
#' In the data frame for both uniform and nonuniform DIF effect simultaneously, the DIF classification is reported.
#' @export
#'
DIF.Logistic <- function(data,group,sig.level,purify,output.filename){
test.length <- length(data[1,])
G2.uni <- matrix(-999,test.length,ncol=1)
G2.nonuni <- matrix(-999,test.length,ncol=1)
G2.both <- matrix(-999,test.length,ncol=1)
pval.uni <- matrix(-999,test.length,ncol=1)
pval.nonuni <- matrix(-999,test.length,ncol=1)
pval.both <- matrix(-999,test.length,ncol=1)
change.R2.uni <- matrix(-999,test.length,ncol=1)
change.R2.nonuni <- matrix(-999,test.length,ncol=1)
change.R2.both <- matrix(-999,test.length,ncol=1)
x <- apply(data[,1:test.length],1,sum)
for (i in 1:test.length){
Model.1 <- lrm(data[,i] ~ x)
Model.3 <- lrm(data[,i] ~ x + group + group*x)
G2.both[i] <- round(Model.3$stats[3] - Model.1$stats[3],3)
change.R2.both[i] <- round(Model.3$stats[10]-Model.1$stats[10],3)
}
df.b <- Model.3$stats[4]-Model.1$stats[4]
pval.both <- round(1-pchisq(G2.both,df.b),3)
flag.items <- ifelse(pval.both < sig.level,1,0)
DIF.items <- seq(1,test.length)
nonDIF.items <- DIF.items[flag.items==0]
x.pur <- apply(data[,nonDIF.items],1,sum)
if(purify==TRUE){
x <- x.pur
}
if(purify==FALSE){
x <- x
}
for (i in 1:test.length){
Model.1 <- lrm(data[,i] ~ x)
Model.2 <- lrm(data[,i] ~ x + group)
Model.3 <- lrm(data[,i] ~ x + group + group*x)
G2.uni[i] <- round(Model.2$stats[3] - Model.1$stats[3],3)
change.R2.uni[i] <- round(Model.2$stats[10]-Model.1$stats[10],3)
G2.nonuni[i] <- round(Model.3$stats[3] - Model.2$stats[3],3)
change.R2.nonuni[i] <- round(Model.3$stats[10]-Model.2$stats[10],3)
G2.both[i] <- round(Model.3$stats[3] - Model.1$stats[3],3)
change.R2.both[i] <- round(Model.3$stats[10]-Model.1$stats[10],3)
}
df.u <- Model.2$stats[4]-Model.1$stats[4]
pval.uni <- round(1-pchisq(G2.uni,df.u),3)
df.n <- Model.3$stats[4]-Model.2$stats[4]
pval.nonuni <- round(1-pchisq(G2.nonuni,df.n),3)
df.b <- Model.3$stats[4]-Model.1$stats[4]
pval.both <- round(1-pchisq(G2.both,df.b),3)
class <- matrix("Neg",test.length,ncol=1)
class <- ifelse(pval.both < sig.level & change.R2.both >= .035, "Moderate", class)
class <- ifelse(pval.both < sig.level & change.R2.both >= .07, "Large", class)
items <- paste("item",seq(1,test.length),sep="")
out.stats <- data.frame(items,G2.uni,pval.uni,change.R2.uni,
G2.nonuni,pval.nonuni,change.R2.nonuni,
G2.both,pval.both,change.R2.both,class)
colnames(out.stats) <- c("Item","G^2(Uniform)","p-value(Uniform)","Change-R^2(Uniform)",
"G^2(Non-uniform)","p-value(Non-uniform)","Change-R^2(Non-uniform)",
"G^2(Simult.)","p-value(Simult.)","Change-R^2(Simult.)",
"DIF Classif.")
save.stats <- list(uniform=out.stats[,1:4], non.uniform=out.stats[,5:7], both=out.stats[,8:11])
sink(paste(output.filename,".txt",sep=""),append=FALSE,split=TRUE)
cat("\nTest for Uniform DIF\n\n")
print(out.stats[,1:4],row.names=FALSE)
cat("\nTest for Non-uniform DIF\n\n")
print(out.stats[,c(1,5:7)],row.names=FALSE)
cat("\nTest for Uniform and Non-uniform DIF\n\n")
print(out.stats[,c(1,8:11)],row.names=FALSE)
sink()
return(save.stats)
}
#Harrell, F. E. (2020). rms: Regression Modeling Strategies. R package version 6.1-0. https://CRAN.R-project.org/package=rms
#Swaminathan, H. & Rogers, H. J. (1990). Detecting differential item functioning using logistic regression procedures. Journal of Educational Measurement, 27, 361-370.
#Zenisky, A. L., Hambleton, R. K., & Robin, F. (2003). Detection of differential item functioning in large-scale state assessments: A study evaluating a two-stage approach. Educational and Psychological Measurement, 63, 51-64.
#Zumbo, B. D. (1999). A handbook on the theory and methods of differential item functioning (DIF): Logistic regression modeling as a unitary framework for binary and Likert-type (ordinal) item scores. Ottawa, ON: Directorate of Human Resources Research and Evaluation, Department of National Defense.
|
2884752a0a4d6914223ef12d780051d440daf2f8
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.networking/R/backupgateway_operations.R
|
437a51e66030a462b494fb1dc03604ce05e50e79
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 34,693
|
r
|
backupgateway_operations.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include backupgateway_service.R
NULL
#' Associates a backup gateway with your server
#'
#' @description
#' Associates a backup gateway with your server. After you complete the association process, you can back up and restore your VMs through the gateway.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_associate_gateway_to_server/](https://www.paws-r-sdk.com/docs/backupgateway_associate_gateway_to_server/) for full documentation.
#'
#' @param GatewayArn [required] The Amazon Resource Name (ARN) of the gateway. Use the
#' [`list_gateways`][backupgateway_list_gateways] operation to return a
#' list of gateways for your account and Amazon Web Services Region.
#' @param ServerArn [required] The Amazon Resource Name (ARN) of the server that hosts your virtual
#' machines.
#'
#' @keywords internal
#'
#' @rdname backupgateway_associate_gateway_to_server
backupgateway_associate_gateway_to_server <- function(GatewayArn, ServerArn) {
op <- new_operation(
name = "AssociateGatewayToServer",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$associate_gateway_to_server_input(GatewayArn = GatewayArn, ServerArn = ServerArn)
output <- .backupgateway$associate_gateway_to_server_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$associate_gateway_to_server <- backupgateway_associate_gateway_to_server
#' Creates a backup gateway
#'
#' @description
#' Creates a backup gateway. After you create a gateway, you can associate it with a server using the [`associate_gateway_to_server`][backupgateway_associate_gateway_to_server] operation.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_create_gateway/](https://www.paws-r-sdk.com/docs/backupgateway_create_gateway/) for full documentation.
#'
#' @param ActivationKey [required] The activation key of the created gateway.
#' @param GatewayDisplayName [required] The display name of the created gateway.
#' @param GatewayType [required] The type of created gateway.
#' @param Tags A list of up to 50 tags to assign to the gateway. Each tag is a
#' key-value pair.
#'
#' @keywords internal
#'
#' @rdname backupgateway_create_gateway
backupgateway_create_gateway <- function(ActivationKey, GatewayDisplayName, GatewayType, Tags = NULL) {
op <- new_operation(
name = "CreateGateway",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$create_gateway_input(ActivationKey = ActivationKey, GatewayDisplayName = GatewayDisplayName, GatewayType = GatewayType, Tags = Tags)
output <- .backupgateway$create_gateway_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$create_gateway <- backupgateway_create_gateway
#' Deletes a backup gateway
#'
#' @description
#' Deletes a backup gateway.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_delete_gateway/](https://www.paws-r-sdk.com/docs/backupgateway_delete_gateway/) for full documentation.
#'
#' @param GatewayArn [required] The Amazon Resource Name (ARN) of the gateway to delete.
#'
#' @keywords internal
#'
#' @rdname backupgateway_delete_gateway
backupgateway_delete_gateway <- function(GatewayArn) {
op <- new_operation(
name = "DeleteGateway",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$delete_gateway_input(GatewayArn = GatewayArn)
output <- .backupgateway$delete_gateway_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$delete_gateway <- backupgateway_delete_gateway
#' Deletes a hypervisor
#'
#' @description
#' Deletes a hypervisor.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_delete_hypervisor/](https://www.paws-r-sdk.com/docs/backupgateway_delete_hypervisor/) for full documentation.
#'
#' @param HypervisorArn [required] The Amazon Resource Name (ARN) of the hypervisor to delete.
#'
#' @keywords internal
#'
#' @rdname backupgateway_delete_hypervisor
backupgateway_delete_hypervisor <- function(HypervisorArn) {
op <- new_operation(
name = "DeleteHypervisor",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$delete_hypervisor_input(HypervisorArn = HypervisorArn)
output <- .backupgateway$delete_hypervisor_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$delete_hypervisor <- backupgateway_delete_hypervisor
#' Disassociates a backup gateway from the specified server
#'
#' @description
#' Disassociates a backup gateway from the specified server. After the disassociation process finishes, the gateway can no longer access the virtual machines on the server.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_disassociate_gateway_from_server/](https://www.paws-r-sdk.com/docs/backupgateway_disassociate_gateway_from_server/) for full documentation.
#'
#' @param GatewayArn [required] The Amazon Resource Name (ARN) of the gateway to disassociate.
#'
#' @keywords internal
#'
#' @rdname backupgateway_disassociate_gateway_from_server
backupgateway_disassociate_gateway_from_server <- function(GatewayArn) {
op <- new_operation(
name = "DisassociateGatewayFromServer",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$disassociate_gateway_from_server_input(GatewayArn = GatewayArn)
output <- .backupgateway$disassociate_gateway_from_server_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$disassociate_gateway_from_server <- backupgateway_disassociate_gateway_from_server
#' Retrieves the bandwidth rate limit schedule for a specified gateway
#'
#' @description
#' Retrieves the bandwidth rate limit schedule for a specified gateway. By default, gateways do not have bandwidth rate limit schedules, which means no bandwidth rate limiting is in effect. Use this to get a gateway's bandwidth rate limit schedule.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_get_bandwidth_rate_limit_schedule/](https://www.paws-r-sdk.com/docs/backupgateway_get_bandwidth_rate_limit_schedule/) for full documentation.
#'
#' @param GatewayArn [required] The Amazon Resource Name (ARN) of the gateway. Use the
#' [`list_gateways`](https://docs.aws.amazon.com/aws-backup/latest/devguide/API_BGW_ListGateways.html)
#' operation to return a list of gateways for your account and Amazon Web
#' Services Region.
#'
#' @keywords internal
#'
#' @rdname backupgateway_get_bandwidth_rate_limit_schedule
backupgateway_get_bandwidth_rate_limit_schedule <- function(GatewayArn) {
op <- new_operation(
name = "GetBandwidthRateLimitSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$get_bandwidth_rate_limit_schedule_input(GatewayArn = GatewayArn)
output <- .backupgateway$get_bandwidth_rate_limit_schedule_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$get_bandwidth_rate_limit_schedule <- backupgateway_get_bandwidth_rate_limit_schedule
#' By providing the ARN (Amazon Resource Name), this API returns the
#' gateway
#'
#' @description
#' By providing the ARN (Amazon Resource Name), this API returns the gateway.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_get_gateway/](https://www.paws-r-sdk.com/docs/backupgateway_get_gateway/) for full documentation.
#'
#' @param GatewayArn [required] The Amazon Resource Name (ARN) of the gateway.
#'
#' @keywords internal
#'
#' @rdname backupgateway_get_gateway
backupgateway_get_gateway <- function(GatewayArn) {
op <- new_operation(
name = "GetGateway",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$get_gateway_input(GatewayArn = GatewayArn)
output <- .backupgateway$get_gateway_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$get_gateway <- backupgateway_get_gateway
#' This action requests information about the specified hypervisor to which
#' the gateway will connect
#'
#' @description
#' This action requests information about the specified hypervisor to which the gateway will connect. A hypervisor is hardware, software, or firmware that creates and manages virtual machines, and allocates resources to them.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_get_hypervisor/](https://www.paws-r-sdk.com/docs/backupgateway_get_hypervisor/) for full documentation.
#'
#' @param HypervisorArn [required] The Amazon Resource Name (ARN) of the hypervisor.
#'
#' @keywords internal
#'
#' @rdname backupgateway_get_hypervisor
backupgateway_get_hypervisor <- function(HypervisorArn) {
op <- new_operation(
name = "GetHypervisor",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$get_hypervisor_input(HypervisorArn = HypervisorArn)
output <- .backupgateway$get_hypervisor_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$get_hypervisor <- backupgateway_get_hypervisor
#' This action retrieves the property mappings for the specified hypervisor
#'
#' @description
#' This action retrieves the property mappings for the specified hypervisor. A hypervisor property mapping displays the relationship of entity properties available from the on-premises hypervisor to the properties available in Amazon Web Services.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_get_hypervisor_property_mappings/](https://www.paws-r-sdk.com/docs/backupgateway_get_hypervisor_property_mappings/) for full documentation.
#'
#' @param HypervisorArn [required] The Amazon Resource Name (ARN) of the hypervisor.
#'
#' @keywords internal
#'
#' @rdname backupgateway_get_hypervisor_property_mappings
backupgateway_get_hypervisor_property_mappings <- function(HypervisorArn) {
op <- new_operation(
name = "GetHypervisorPropertyMappings",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$get_hypervisor_property_mappings_input(HypervisorArn = HypervisorArn)
output <- .backupgateway$get_hypervisor_property_mappings_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$get_hypervisor_property_mappings <- backupgateway_get_hypervisor_property_mappings
#' By providing the ARN (Amazon Resource Name), this API returns the
#' virtual machine
#'
#' @description
#' By providing the ARN (Amazon Resource Name), this API returns the virtual machine.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_get_virtual_machine/](https://www.paws-r-sdk.com/docs/backupgateway_get_virtual_machine/) for full documentation.
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the virtual machine.
#'
#' @keywords internal
#'
#' @rdname backupgateway_get_virtual_machine
backupgateway_get_virtual_machine <- function(ResourceArn) {
op <- new_operation(
name = "GetVirtualMachine",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$get_virtual_machine_input(ResourceArn = ResourceArn)
output <- .backupgateway$get_virtual_machine_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$get_virtual_machine <- backupgateway_get_virtual_machine
#' Connect to a hypervisor by importing its configuration
#'
#' @description
#' Connect to a hypervisor by importing its configuration.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_import_hypervisor_configuration/](https://www.paws-r-sdk.com/docs/backupgateway_import_hypervisor_configuration/) for full documentation.
#'
#' @param Host [required] The server host of the hypervisor. This can be either an IP address or a
#' fully-qualified domain name (FQDN).
#' @param KmsKeyArn The Key Management Service for the hypervisor.
#' @param Name [required] The name of the hypervisor.
#' @param Password The password for the hypervisor.
#' @param Tags The tags of the hypervisor configuration to import.
#' @param Username The username for the hypervisor.
#'
#' @keywords internal
#'
#' @rdname backupgateway_import_hypervisor_configuration
backupgateway_import_hypervisor_configuration <- function(Host, KmsKeyArn = NULL, Name, Password = NULL, Tags = NULL, Username = NULL) {
op <- new_operation(
name = "ImportHypervisorConfiguration",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$import_hypervisor_configuration_input(Host = Host, KmsKeyArn = KmsKeyArn, Name = Name, Password = Password, Tags = Tags, Username = Username)
output <- .backupgateway$import_hypervisor_configuration_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$import_hypervisor_configuration <- backupgateway_import_hypervisor_configuration
#' Lists backup gateways owned by an Amazon Web Services account in an
#' Amazon Web Services Region
#'
#' @description
#' Lists backup gateways owned by an Amazon Web Services account in an Amazon Web Services Region. The returned list is ordered by gateway Amazon Resource Name (ARN).
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_list_gateways/](https://www.paws-r-sdk.com/docs/backupgateway_list_gateways/) for full documentation.
#'
#' @param MaxResults The maximum number of gateways to list.
#' @param NextToken The next item following a partial list of returned resources. For
#' example, if a request is made to return `MaxResults` number of
#' resources, `NextToken` allows you to return more items in your list
#' starting at the location pointed to by the next token.
#'
#' @keywords internal
#'
#' @rdname backupgateway_list_gateways
backupgateway_list_gateways <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListGateways",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "Gateways")
)
input <- .backupgateway$list_gateways_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .backupgateway$list_gateways_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$list_gateways <- backupgateway_list_gateways
#' Lists your hypervisors
#'
#' @description
#' Lists your hypervisors.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_list_hypervisors/](https://www.paws-r-sdk.com/docs/backupgateway_list_hypervisors/) for full documentation.
#'
#' @param MaxResults The maximum number of hypervisors to list.
#' @param NextToken The next item following a partial list of returned resources. For
#' example, if a request is made to return `maxResults` number of
#' resources, `NextToken` allows you to return more items in your list
#' starting at the location pointed to by the next token.
#'
#' @keywords internal
#'
#' @rdname backupgateway_list_hypervisors
backupgateway_list_hypervisors <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListHypervisors",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "Hypervisors")
)
input <- .backupgateway$list_hypervisors_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .backupgateway$list_hypervisors_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$list_hypervisors <- backupgateway_list_hypervisors
#' Lists the tags applied to the resource identified by its Amazon Resource
#' Name (ARN)
#'
#' @description
#' Lists the tags applied to the resource identified by its Amazon Resource Name (ARN).
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_list_tags_for_resource/](https://www.paws-r-sdk.com/docs/backupgateway_list_tags_for_resource/) for full documentation.
#'
#' @param ResourceArn [required] The Amazon Resource Name (ARN) of the resource's tags to list.
#'
#' @keywords internal
#'
#' @rdname backupgateway_list_tags_for_resource
backupgateway_list_tags_for_resource <- function(ResourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$list_tags_for_resource_input(ResourceArn = ResourceArn)
output <- .backupgateway$list_tags_for_resource_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$list_tags_for_resource <- backupgateway_list_tags_for_resource
#' Lists your virtual machines
#'
#' @description
#' Lists your virtual machines.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_list_virtual_machines/](https://www.paws-r-sdk.com/docs/backupgateway_list_virtual_machines/) for full documentation.
#'
#' @param HypervisorArn The Amazon Resource Name (ARN) of the hypervisor connected to your
#' virtual machine.
#' @param MaxResults The maximum number of virtual machines to list.
#' @param NextToken The next item following a partial list of returned resources. For
#' example, if a request is made to return `maxResults` number of
#' resources, `NextToken` allows you to return more items in your list
#' starting at the location pointed to by the next token.
#'
#' @keywords internal
#'
#' @rdname backupgateway_list_virtual_machines
backupgateway_list_virtual_machines <- function(HypervisorArn = NULL, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListVirtualMachines",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "VirtualMachines")
)
input <- .backupgateway$list_virtual_machines_input(HypervisorArn = HypervisorArn, MaxResults = MaxResults, NextToken = NextToken)
output <- .backupgateway$list_virtual_machines_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$list_virtual_machines <- backupgateway_list_virtual_machines
#' This action sets the bandwidth rate limit schedule for a specified
#' gateway
#'
#' @description
#' This action sets the bandwidth rate limit schedule for a specified gateway. By default, gateways do not have a bandwidth rate limit schedule, which means no bandwidth rate limiting is in effect. Use this to initiate a gateway's bandwidth rate limit schedule.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_put_bandwidth_rate_limit_schedule/](https://www.paws-r-sdk.com/docs/backupgateway_put_bandwidth_rate_limit_schedule/) for full documentation.
#'
#' @param BandwidthRateLimitIntervals [required] An array containing bandwidth rate limit schedule intervals for a
#' gateway. When no bandwidth rate limit intervals have been scheduled, the
#' array is empty.
#' @param GatewayArn [required] The Amazon Resource Name (ARN) of the gateway. Use the
#' [`list_gateways`](https://docs.aws.amazon.com/aws-backup/latest/devguide/API_BGW_ListGateways.html)
#' operation to return a list of gateways for your account and Amazon Web
#' Services Region.
#'
#' @keywords internal
#'
#' @rdname backupgateway_put_bandwidth_rate_limit_schedule
backupgateway_put_bandwidth_rate_limit_schedule <- function(BandwidthRateLimitIntervals, GatewayArn) {
op <- new_operation(
name = "PutBandwidthRateLimitSchedule",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$put_bandwidth_rate_limit_schedule_input(BandwidthRateLimitIntervals = BandwidthRateLimitIntervals, GatewayArn = GatewayArn)
output <- .backupgateway$put_bandwidth_rate_limit_schedule_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$put_bandwidth_rate_limit_schedule <- backupgateway_put_bandwidth_rate_limit_schedule
#' This action sets the property mappings for the specified hypervisor
#'
#' @description
#' This action sets the property mappings for the specified hypervisor. A hypervisor property mapping displays the relationship of entity properties available from the on-premises hypervisor to the properties available in Amazon Web Services.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_put_hypervisor_property_mappings/](https://www.paws-r-sdk.com/docs/backupgateway_put_hypervisor_property_mappings/) for full documentation.
#'
#' @param HypervisorArn [required] The Amazon Resource Name (ARN) of the hypervisor.
#' @param IamRoleArn [required] The Amazon Resource Name (ARN) of the IAM role.
#' @param VmwareToAwsTagMappings [required] This action requests the mappings of on-premises VMware tags to the
#' Amazon Web Services tags.
#'
#' @keywords internal
#'
#' @rdname backupgateway_put_hypervisor_property_mappings
backupgateway_put_hypervisor_property_mappings <- function(HypervisorArn, IamRoleArn, VmwareToAwsTagMappings) {
op <- new_operation(
name = "PutHypervisorPropertyMappings",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$put_hypervisor_property_mappings_input(HypervisorArn = HypervisorArn, IamRoleArn = IamRoleArn, VmwareToAwsTagMappings = VmwareToAwsTagMappings)
output <- .backupgateway$put_hypervisor_property_mappings_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$put_hypervisor_property_mappings <- backupgateway_put_hypervisor_property_mappings
#' Set the maintenance start time for a gateway
#'
#' @description
#' Set the maintenance start time for a gateway.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_put_maintenance_start_time/](https://www.paws-r-sdk.com/docs/backupgateway_put_maintenance_start_time/) for full documentation.
#'
#' @param DayOfMonth The day of the month start maintenance on a gateway.
#'
#' Valid values range from `Sunday` to `Saturday`.
#' @param DayOfWeek The day of the week to start maintenance on a gateway.
#' @param GatewayArn [required] The Amazon Resource Name (ARN) for the gateway, used to specify its
#' maintenance start time.
#' @param HourOfDay [required] The hour of the day to start maintenance on a gateway.
#' @param MinuteOfHour [required] The minute of the hour to start maintenance on a gateway.
#'
#' @keywords internal
#'
#' @rdname backupgateway_put_maintenance_start_time
backupgateway_put_maintenance_start_time <- function(DayOfMonth = NULL, DayOfWeek = NULL, GatewayArn, HourOfDay, MinuteOfHour) {
op <- new_operation(
name = "PutMaintenanceStartTime",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$put_maintenance_start_time_input(DayOfMonth = DayOfMonth, DayOfWeek = DayOfWeek, GatewayArn = GatewayArn, HourOfDay = HourOfDay, MinuteOfHour = MinuteOfHour)
output <- .backupgateway$put_maintenance_start_time_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$put_maintenance_start_time <- backupgateway_put_maintenance_start_time
#' This action sends a request to sync metadata across the specified
#' virtual machines
#'
#' @description
#' This action sends a request to sync metadata across the specified virtual machines.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_start_virtual_machines_metadata_sync/](https://www.paws-r-sdk.com/docs/backupgateway_start_virtual_machines_metadata_sync/) for full documentation.
#'
#' @param HypervisorArn [required] The Amazon Resource Name (ARN) of the hypervisor.
#'
#' @keywords internal
#'
#' @rdname backupgateway_start_virtual_machines_metadata_sync
backupgateway_start_virtual_machines_metadata_sync <- function(HypervisorArn) {
op <- new_operation(
name = "StartVirtualMachinesMetadataSync",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$start_virtual_machines_metadata_sync_input(HypervisorArn = HypervisorArn)
output <- .backupgateway$start_virtual_machines_metadata_sync_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$start_virtual_machines_metadata_sync <- backupgateway_start_virtual_machines_metadata_sync
#' Tag the resource
#'
#' @description
#' Tag the resource.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_tag_resource/](https://www.paws-r-sdk.com/docs/backupgateway_tag_resource/) for full documentation.
#'
#' @param ResourceARN [required] The Amazon Resource Name (ARN) of the resource to tag.
#' @param Tags [required] A list of tags to assign to the resource.
#'
#' @keywords internal
#'
#' @rdname backupgateway_tag_resource
backupgateway_tag_resource <- function(ResourceARN, Tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$tag_resource_input(ResourceARN = ResourceARN, Tags = Tags)
output <- .backupgateway$tag_resource_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$tag_resource <- backupgateway_tag_resource
#' Tests your hypervisor configuration to validate that backup gateway can
#' connect with the hypervisor and its resources
#'
#' @description
#' Tests your hypervisor configuration to validate that backup gateway can connect with the hypervisor and its resources.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_test_hypervisor_configuration/](https://www.paws-r-sdk.com/docs/backupgateway_test_hypervisor_configuration/) for full documentation.
#'
#' @param GatewayArn [required] The Amazon Resource Name (ARN) of the gateway to the hypervisor to test.
#' @param Host [required] The server host of the hypervisor. This can be either an IP address or a
#' fully-qualified domain name (FQDN).
#' @param Password The password for the hypervisor.
#' @param Username The username for the hypervisor.
#'
#' @keywords internal
#'
#' @rdname backupgateway_test_hypervisor_configuration
backupgateway_test_hypervisor_configuration <- function(GatewayArn, Host, Password = NULL, Username = NULL) {
op <- new_operation(
name = "TestHypervisorConfiguration",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$test_hypervisor_configuration_input(GatewayArn = GatewayArn, Host = Host, Password = Password, Username = Username)
output <- .backupgateway$test_hypervisor_configuration_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$test_hypervisor_configuration <- backupgateway_test_hypervisor_configuration
#' Removes tags from the resource
#'
#' @description
#' Removes tags from the resource.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_untag_resource/](https://www.paws-r-sdk.com/docs/backupgateway_untag_resource/) for full documentation.
#'
#' @param ResourceARN [required] The Amazon Resource Name (ARN) of the resource from which to remove
#' tags.
#' @param TagKeys [required] The list of tag keys specifying which tags to remove.
#'
#' @keywords internal
#'
#' @rdname backupgateway_untag_resource
backupgateway_untag_resource <- function(ResourceARN, TagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$untag_resource_input(ResourceARN = ResourceARN, TagKeys = TagKeys)
output <- .backupgateway$untag_resource_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$untag_resource <- backupgateway_untag_resource
#' Updates a gateway's name
#'
#' @description
#' Updates a gateway's name. Specify which gateway to update using the Amazon Resource Name (ARN) of the gateway in your request.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_update_gateway_information/](https://www.paws-r-sdk.com/docs/backupgateway_update_gateway_information/) for full documentation.
#'
#' @param GatewayArn [required] The Amazon Resource Name (ARN) of the gateway to update.
#' @param GatewayDisplayName The updated display name of the gateway.
#'
#' @keywords internal
#'
#' @rdname backupgateway_update_gateway_information
backupgateway_update_gateway_information <- function(GatewayArn, GatewayDisplayName = NULL) {
op <- new_operation(
name = "UpdateGatewayInformation",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$update_gateway_information_input(GatewayArn = GatewayArn, GatewayDisplayName = GatewayDisplayName)
output <- .backupgateway$update_gateway_information_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$update_gateway_information <- backupgateway_update_gateway_information
#' Updates the gateway virtual machine (VM) software
#'
#' @description
#' Updates the gateway virtual machine (VM) software. The request immediately triggers the software update.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_update_gateway_software_now/](https://www.paws-r-sdk.com/docs/backupgateway_update_gateway_software_now/) for full documentation.
#'
#' @param GatewayArn [required] The Amazon Resource Name (ARN) of the gateway to be updated.
#'
#' @keywords internal
#'
#' @rdname backupgateway_update_gateway_software_now
backupgateway_update_gateway_software_now <- function(GatewayArn) {
op <- new_operation(
name = "UpdateGatewaySoftwareNow",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$update_gateway_software_now_input(GatewayArn = GatewayArn)
output <- .backupgateway$update_gateway_software_now_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$update_gateway_software_now <- backupgateway_update_gateway_software_now
#' Updates a hypervisor metadata, including its host, username, and
#' password
#'
#' @description
#' Updates a hypervisor metadata, including its host, username, and password. Specify which hypervisor to update using the Amazon Resource Name (ARN) of the hypervisor in your request.
#'
#' See [https://www.paws-r-sdk.com/docs/backupgateway_update_hypervisor/](https://www.paws-r-sdk.com/docs/backupgateway_update_hypervisor/) for full documentation.
#'
#' @param Host The updated host of the hypervisor. This can be either an IP address or
#' a fully-qualified domain name (FQDN).
#' @param HypervisorArn [required] The Amazon Resource Name (ARN) of the hypervisor to update.
#' @param LogGroupArn The Amazon Resource Name (ARN) of the group of gateways within the
#' requested log.
#' @param Name The updated name for the hypervisor
#' @param Password The updated password for the hypervisor.
#' @param Username The updated username for the hypervisor.
#'
#' @keywords internal
#'
#' @rdname backupgateway_update_hypervisor
backupgateway_update_hypervisor <- function(Host = NULL, HypervisorArn, LogGroupArn = NULL, Name = NULL, Password = NULL, Username = NULL) {
op <- new_operation(
name = "UpdateHypervisor",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .backupgateway$update_hypervisor_input(Host = Host, HypervisorArn = HypervisorArn, LogGroupArn = LogGroupArn, Name = Name, Password = Password, Username = Username)
output <- .backupgateway$update_hypervisor_output()
config <- get_config()
svc <- .backupgateway$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.backupgateway$operations$update_hypervisor <- backupgateway_update_hypervisor
|
764c66551bb88a2d3edd50f52781783e6428ce43
|
277b330d9b385fd1e6ff507acd83d233dfc05543
|
/Intro_CRSP_Kenneth_French.R
|
98764ce37ae6b5a9162eefef1ea3a1db87575a6d
|
[] |
no_license
|
nikaash123/Quantitative-Asset-Management
|
828cf521f92a079829c73e996856fc2752d5f20d
|
33f298f4ba40cfafa98b8fe556c812a7de832cb4
|
refs/heads/master
| 2020-03-31T19:52:58.845547
| 2018-10-11T04:10:32
| 2018-10-11T04:10:32
| 152,516,078
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,326
|
r
|
Intro_CRSP_Kenneth_French.R
|
### Problem Set 1 by Nikhil Gupta ###
#Question 1
library(lubridate)
library(data.table)
library(tidyr)
library(xts)
library(moments)
data_1 = read.csv("C:/Users/nikaa/Desktop/Lectures/Spring_2018/Quantitative_Asset_Management/Home_Works/HW1/Full_Data.csv", header=TRUE,
stringsAsFactors = FALSE,na.strings=c("",".","NA","C","A","S","T","P") )
FF = read.csv("C:/Users/nikaa/Desktop/Lectures/Spring_2018/Quantitative_Asset_Management/Home_Works/HW1/FF.csv", header=TRUE,
stringsAsFactors = FALSE )
# Default values of Delisting Returns nd Returns converted to NA above
PS1_Q1 <- function(data_1){
#head(data_1)
#data_1[data_1$date<= "12/31/1985",]
Date <- mdy(data_1$date)
#RET <- as.factor(data_1$RET)
#DLRET <- as.factor(data_1$DLRET)
crsp_stocks <- as.data.table(cbind(data_1[,1],Date,data_1[,3:8]))
#crsp_stocks <- as.data.table(data_2)
colnames(crsp_stocks)[1] <- "PERMNO"
#head(crsp_stocks)
# Data Cleaning
# Remove rows with missing both DLRET and RET
final <- crsp_stocks[!(is.na(crsp_stocks$DLRET)) | !(is.na(crsp_stocks$RET)),]
# Remove Rows with missing PRC
final1 <- final[!(is.na(final$PRC)) ,]
#
final1$mktcap <- abs(final1$PRC*final1$SHROUT)
final1$ret_final <- ifelse(is.na(final1$DLRET) , final1$RET, final1$DLRET)
final1$ret_final <- ifelse((!(is.na(final1$DLRET)) & !(is.na(final1$RET))),((1+as.numeric(final1$RET))*(1+as.numeric(final1$DLRET)) - 1),final1$ret_final)
#final3 <- final1[!(is.na(final1$DLRET)) & !(is.na(final1$RET)),]
#head(final1)
a <- final1[order(as.Date(final1$Date, format="%m/%d/%Y")),]
a1 <- a[((a$EXCHCD == 1) | (a$EXCHCD == 2) | (a$EXCHCD == 3)) ,]
a2 <- a1[((a1$SHRCD == 10) | (a1$SHRCD == 11)) ,]
#Date1 <- mdy(a2$Date)
a3 <- cbind.data.frame(a2$PERMNO, a2$Date, a2$mktcap, a2$ret_final)
colnames(a3)[1] <- "PERMNO"
colnames(a3)[2] <- "Date"
colnames(a3)[3] <- "Mkt_Cap"
colnames(a3)[4] <- "Return"
#a4 <- a3[is.na(a3$Date),]
str(a3)
#mdy(a3$Date)
xtsdata1=xts(a3,order.by=as.Date((a3$Date),"%m/%d/%Y")) ## Took 3
epm1=endpoints(xtsdata1,"months")
#sum <-0
mkt_cap<-0
mkt_cap1<-0
#for (i in (2:length(epm1)-1)){
n <- length(epm1)-1
for (i in (1:n)){
end = epm1[i+1]
start = epm1[i]+1
mkt_cap <- a3$Mkt_Cap[start:end]
mkt_cap1[i] <- sum(mkt_cap)
}
# Equal Weighted Returns
ewretd <-0
for (i in (1:n)){
end = epm1[i+1]
start = epm1[i]+1
ret <- (as.numeric(as.character(a3$Return[start:end])))
ewretd[i] <- mean(ret)
}
# Calculating the final market returns Better Approach
uniq_dates <- unique(a3$Date)
uniq_stocks <- unique(a3$PERMNO)
vwretd <-0
for (i in 2:length(uniq_dates)){
#for (i in 2:100){
#i = 3
Lag_Market_Cap <-0
months <- a3[which(a3$Date == uniq_dates[i]),]
market_cap <- cbind(a3$PERMNO[which(a3$Date == uniq_dates[i-1])], a3$Mkt_Cap[which(a3$Date == uniq_dates[i-1])])
colnames(market_cap) <- c("PERMNO", "MarketCap")
merged_data <- merge(months, market_cap, by = "PERMNO")
#merged_data1 <- merged_data[!duplicated(merged_data),]
Lag_Market_Cap[i-1] <- sum(as.numeric(as.character(merged_data$MarketCap)))
vwretd[i-1] <- sum(as.numeric(as.character(merged_data$Return)) * as.numeric(as.character(merged_data$MarketCap))) / Lag_Market_Cap[i-1]
#equal_weighted_return[i-1] <- sum(return) / length(return)
}
FF_Mkt <- FF$Mkt.RF + FF$RF
vwretd_f <- vwretd[6:1103]*100
err <- abs(FF_Mkt-vwretd_f)
cbind(FF$Date,FF_Mkt,vwretd_f,err)
cor(FF_Mkt,vwretd_f)
index_test <- which.max(err)
#n1 = length(uniq_dates)
mkt_cap_lag<-0
year = year(uniq_dates)
month = month(uniq_dates)
mkt_cap_lag[1] <- 0
mkt_cap_lag[2:n] <- mkt_cap1[1:(n-1)]/1000000
#mkt_cap1/1000000
final_data <- as.data.table(cbind(year,month,mkt_cap_lag,ewretd,vwretd))
colnames(final_data)[3] = "Stock_Lag_MV"
colnames(final_data)[4] = "Stock_Ew_Return"
colnames(final_data)[5] = "Stock_Vw_Return"
return(final_data)
}
final_data <- PS1_Q1(data_1)
# Question 2
ps1_q2 <- function(final_data,FF){
n<-1104
est_mkt_excess_ret <- final_data$Stock_Vw_Return[7:n] - FF$RF[1:(n-6)]/100
actual_mkt_excess_ret <- FF$Mkt.RF[1:(n-6)]/100
est_mkt_excess_mean1 <- (1+mean(est_mkt_excess_ret))^12 - 1
est_mkt_excess_mean1
est_mkt_excess_sd1 <- sqrt((var(est_mkt_excess_ret) + (1+mean(est_mkt_excess_ret))^2)^12 - (1+mean(est_mkt_excess_ret))^24)
est_mkt_excess_sd1
est_mkt_excess_sr1 <- est_mkt_excess_mean1/est_mkt_excess_sd1
est_mkt_excess_mean <- mean(est_mkt_excess_ret)*12
est_mkt_excess_sd <- sd(est_mkt_excess_ret)*sqrt(12)
est_mkt_excess_sr <- est_mkt_excess_mean/est_mkt_excess_sd
est_mkt_excess_skew <- skewness(est_mkt_excess_ret)
est_mkt_excess_kurt <- kurtosis(est_mkt_excess_ret) - 3
act_mkt_excess_mean1 <- (1+mean(actual_mkt_excess_ret))^12 - 1
act_mkt_excess_mean1
act_mkt_excess_sd1 <- sqrt((var(actual_mkt_excess_ret) + (1+mean(actual_mkt_excess_ret))^2)^12 - (1+mean(actual_mkt_excess_ret))^24)
act_mkt_excess_sd1
act_mkt_excess_sr1 <- act_mkt_excess_mean1/act_mkt_excess_sd1
act_mkt_excess_sr1
act_mkt_excess_mean <- mean(actual_mkt_excess_ret)*12
act_mkt_excess_sd <- sd(actual_mkt_excess_ret)*sqrt(12)
act_mkt_excess_sr <- act_mkt_excess_mean/act_mkt_excess_sd
act_mkt_excess_skew <- skewness(actual_mkt_excess_ret)
act_mkt_excess_kurt <- kurtosis(actual_mkt_excess_ret) - 3
final_output_2 <- matrix(0,ncol=2,nrow=5)
colnames(final_output_2) <- paste(c("Replication","Actual"),sep="")
rownames(final_output_2) <- paste(c("Annualised Mean","Annualised Std Dev","Annualised Sharpe Ratio","Skewness","Excess Kurtosis"),sep="")
final_output_2[1,1] <- est_mkt_excess_mean1
final_output_2[2,1] <- est_mkt_excess_sd1
final_output_2[3,1] <- est_mkt_excess_sr1
final_output_2[4,1] <- est_mkt_excess_skew
final_output_2[5,1] <- est_mkt_excess_kurt
final_output_2[1,2] <- act_mkt_excess_mean1
final_output_2[2,2] <- act_mkt_excess_sd1
final_output_2[3,2] <- act_mkt_excess_sr1
final_output_2[4,2] <- act_mkt_excess_skew
final_output_2[5,2] <- act_mkt_excess_kurt
return(final_output_2)
}
final_output_2 <- ps1_q2(final_data,FF)
# Question 3
ps1_q3 <- function(final_data,FF){
FF_Mkt <- FF$Mkt.RF + FF$RF
vwretd_f <- final_data$Stock_Vw_Return[7:1104]*100
cor_3 <- cor(FF_Mkt,vwretd_f)
cor_3a<- sprintf("%.8f",cor_3)
max_abs_diff <- max(abs(FF_Mkt-vwretd_f))/100
max_abs_diff_3a <- sprintf("%.8f",max_abs_diff)
final_output_3 <- matrix(0,ncol=1,nrow=2)
rownames(final_output_3) <- paste(c("Correlation","Maximum Absolute Difference"),sep="")
final_output_3[1,1] <- cor_3a
final_output_3[2,1] <- max_abs_diff_3a
return(final_output_3)
}
final_output_3 <- ps1_q3(final_data,FF)
|
e6f5580844ebe500ef8c9105493cd6ea4f721a77
|
57222f96e553dd2802316928f2f2c7825ef05197
|
/032-client-data-and-query-string/server.r
|
53405a3bcabb5361d62300b865f49e71b99762f2
|
[
"MIT"
] |
permissive
|
rstudio/shiny-examples
|
6815bb4d8198e4b90765926a4865fdef1d1dc935
|
c7bf00db4a8a68e579e39ed07d516a33661a853e
|
refs/heads/main
| 2023-08-17T16:35:03.411795
| 2023-08-03T19:51:30
| 2023-08-03T19:51:30
| 13,722,949
| 2,046
| 4,576
|
NOASSERTION
| 2023-08-03T19:51:31
| 2013-10-20T17:05:23
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,106
|
r
|
server.r
|
list_to_string <- function(obj, listname) {
if (is.null(names(obj))) {
paste(listname, "[[", seq_along(obj), "]] = ", obj,
sep = "", collapse = "\n")
} else {
paste(listname, "$", names(obj), " = ", obj,
sep = "", collapse = "\n")
}
}
function(input, output, session) {
# Print out clientData, which is a reactiveValues object.
# This object is list-like, but it is not a list.
output$summary <- renderText({
# Find the names of all the keys in clientData
cnames <- names(session$clientData)
# Apply a function to all keys, to get corresponding values
allvalues <- lapply(cnames, function(name) {
item <- session$clientData[[name]]
if (is.list(item)) {
list_to_string(item, name)
} else {
paste(name, item, sep=" = ")
}
})
paste(allvalues, collapse = "\n")
})
# Parse the GET query string
output$queryText <- renderText({
query <- parseQueryString(session$clientData$url_search)
# Return a string with key-value pairs
paste(names(query), query, sep = "=", collapse=", ")
})
}
|
3d648ab07d2b8592920de003973cefb2cf272cad
|
f21c8b03b3c4d882fd7709b9aaec5cb347bf7e0e
|
/man/som_lcp.Rd
|
2c73a69d541d4f9f61a8134b8f01fcfa050a46fc
|
[] |
no_license
|
mschmidt000/oposSOM.PT
|
a2e67e7da2cb70887d884c65020ba8a46787167a
|
e1c49dc2c849eb46dba4c7389adb21ea43508938
|
refs/heads/main
| 2022-12-27T13:30:21.171303
| 2020-10-05T11:15:01
| 2020-10-05T11:15:01
| 301,334,606
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 737
|
rd
|
som_lcp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/som_lcp.R
\name{som_lcp}
\alias{som_lcp}
\title{SOM Least Cost Path (LCP) function}
\usage{
som_lcp(env, root = c(), tips = c(), lineages = c())
}
\arguments{
\item{env}{An enviroment produced by the oposSOM pipeline.}
\item{root}{A single group label defined as the developmental source.}
\item{tips}{A vector of group labels defined as the developmental sinks.}
\item{lineages}{A list of vectors of group labels sorted by pseudotime.}
}
\description{
This function calculate the difference portraits between the root and the tips
to generate gradient trajectories along topocrafic least cost paths
}
\keyword{LCP}
\keyword{distance}
\keyword{topografic}
|
86cf13485981b25db1310f6d3ee7ff49a64b5c25
|
ae2434d333a9a945af6724c0d764fe26c22221af
|
/tests/testthat/test_standardCols.R
|
ab2bba6b629215cd195d71f21a8672c14f4ae716
|
[] |
permissive
|
neckl004/NecklenJacobTools
|
33949a300dc9165185db34e35b96de284c9add4f
|
f9940973c91f2a88c41988e58d1982590a43af7a
|
refs/heads/master
| 2021-01-25T10:00:16.981959
| 2018-03-06T22:01:32
| 2018-03-06T22:01:32
| 122,780,726
| 0
| 0
|
MIT
| 2018-02-24T21:25:51
| 2018-02-24T21:19:51
| null |
UTF-8
|
R
| false
| false
| 313
|
r
|
test_standardCols.R
|
context("Standardize the columns of a matrix")
test_that("standardCols standardizes columns and returns a matrix of identical size", {
a = matrix(rnorm(15),nrow=3,ncol=5)
b = standardCols(a)
nrow(a) == nrow(b) && ncol(a) == ncol(b)
})
|
03e8c2a0f3fe190d9221e60b022ec7f7c37fe802
|
216b84b5193584bd70ec8ba00945939b461f375a
|
/Joel_MEAR_Issue.R
|
17788990fd0ad970fa6adfce99caefd721d2e35b
|
[] |
no_license
|
dc-larson/Multi-Level_Modeling_Water_Customers
|
501235c8dc45c7ded1f6277a817c4d0ed559647d
|
3f90ff15e406521ab81738d39aa19aa58bf6756e
|
refs/heads/master
| 2020-12-10T02:54:02.331992
| 2020-01-13T21:53:43
| 2020-01-13T21:53:43
| 233,486,239
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,846
|
r
|
Joel_MEAR_Issue.R
|
load(file = "MEAR_issue.rda")
## Replicating my analysis so you can see where I get tripped up.##
##Had to remove NA values in order to compute means. The lme4 model just handles them for you
###This could be a potential issue.
wtp = mean(mydata$participation_willingness, na.rm = TRUE)
pc = mean(mydata$participation_cost, na.rm = TRUE)
ps = mean(mydata$participation_scenario, na.rm = TRUE)
ck = mean(mydata$climate_knowledge, na.rm = TRUE)
resp = mean(mydata$respondent, na.rm = TRUE)
cbel = mean(mydata$cclimate_bel_index, na.rm = TRUE)
ccimp = mean(mydata$cclimate_change_impacts, na.rm = TRUE)
pnw = mean(mydata$cpnw_climate_worsening, na.rm = TRUE)
use = mean(mydata$cwatershed_use_freq, na.rm = TRUE)
pli = mean(mydata$cPl_Identity, na.rm = TRUE)
pld= mean(mydata$cPl_Dependence, na.rm = TRUE)
pla = mean(mydata$cPl_Attachment, na.rm = TRUE)
age = mean(mydata$cage, na.rm = TRUE)
edu = mean(mydata$ceducation, na.rm = TRUE)
polit = mean(mydata$cconservative_attitude, na.rm = TRUE)
inc = mean(mydata$cincome, na.rm = TRUE)
# Now that I have computed means for my scaled covariates I can extract model fixed effects.
cf = fixef(mod10)
## Now that I have done this construct Joels equation
p1A = cbind(1,1,0,0,ck,cbel,ccimp,pnw,use,pli,pld,pla,age,edu,polit,inc,0,0)%*%cf
## So for some reason it is returning an NA value for p1A
print(cf)
## If you look at the fixed effects, I have mapped everything on correctly...I think?
###The only thing I can't seem to address are the interaction effects between participation_cost:participaion_scenario
## Are these also given a value of "0" since we are only interested at looking at Private Forest Land?
### My next step was to compute the equation below but it won't return anything useful.
## Now comput odds
oddsp1A = exp(p1A)
#Now do probability
probp1A = oddsp1A/1+oddsp1A
|
6404ae1c3d8c1e0b07e9393270d4e97e6712a42e
|
16d1b8d98710bbdfe15113d1c869bae56f16eded
|
/Code/Twitter Descriptive/old/term_freq_alternative.R
|
e26cc2ed74e52bcee48663cbdd843382b5e1353c
|
[] |
no_license
|
lubrunn/DSP_sonstigerCode_Abgabe
|
4651cd7ac893971c035f7a6a509529880d6133b8
|
be8885287c5edbc37b22ddf5e80c590e1ba7f9bb
|
refs/heads/main
| 2023-03-22T07:39:29.852517
| 2021-03-19T14:00:32
| 2021-03-19T14:00:32
| 349,191,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,094
|
r
|
term_freq_alternative.R
|
string_error <- "current state affair chump"
error_row <- df %>% filter(str_detect(text, string_error))
b <- df %>% filter(date_variable == "2018-11-30")
threshold_single <- 830
####### old
b %>% group_by(date_variable) %>%
table(unlist(strsplit(tolower(text), " ")))
b %>%
separate_rows(text, sep = ' ') %>%
group_by(date_variable, language_variable, text) %>%
summarise(n = n())
b %>% filter(
likes_count >= likes_filter &
retweets_count >= retweets_filter &
#long_tweet == long
tweet_length >= length_filter)%>%
tidytext::unnest_tokens(word, text) %>%
group_by(date_variable, language_variable, word) %>%
summarise(n = n())
select<-function(){
term_frequency1 <- try(if (a <- NULL) print("aha"))
term_frequency2 <- 2
if(is(term_frequency1, "try-error")) term_frequency2 else term_frequency1
}
###### to alternatives for term_freq computation because first is very quick but throws
# random non reproduciable errors at times, other funciton is more consistent but takes
# twice as long
termfrequency <- select()
|
08aa04215e0b80531be689f619744d35d0c6570b
|
7513790d15902b75b9ff193e1c341d13362fdf70
|
/SuperFarmerDA/tests/testthat/testystrategia_DKA.R
|
6cd551d6a4ff06f3b1d197fc87b29b75d8c65e1f
|
[] |
no_license
|
ambroziakd/KursRprojekt2
|
2ae5f046a16b0c710beb3233ba66ebf41a2230a0
|
ca96ab8d9292bdd52c90d2deb16e89ed60225400
|
refs/heads/master
| 2020-06-21T18:50:26.422000
| 2017-03-17T18:02:39
| 2017-03-17T18:02:39
| 74,774,696
| 0
| 1
| null | 2017-03-17T18:02:39
| 2016-11-25T16:49:44
|
HTML
|
UTF-8
|
R
| false
| false
| 3,687
|
r
|
testystrategia_DKA.R
|
## plik strategia_DKA.R
test_that("strategia_DKA zwraca wektor",{
expect_true(is.vector(strategia_DKA(c(7, 0, 0, 0, 0, 0, 0))))
})
test_that("strategy_DKA dokonuje wymiany",{
expect_equal(unname(strategia_DKA(c(7, 0, 0, 0, 0, 0, 0)))[2], 1)
})
test_that("wymiana_na_tansze zwraca ramke danych",{
zwierzeta <- data.frame(
krolik=c(1, 1, 59),
owca=c(0, 6, 24),
swinia=c(0, 12, 20),
krowa=c(1, 36, 11),
duzy_pies=c(1, 36, 1),
kon=c(2, 72, 4),
maly_pies=c(0,6,4),
row.names=c("w_zagrodzie", "krolikowartosc", "w_stadzie"))
expect_true(is.data.frame(wymiana_na_tansze(zwierzeta)))
})
test_that("wymiana_na_tansze dokonuje wymiany",{
zwierzeta <- data.frame(
krolik=c(1, 1, 59),
owca=c(0, 6, 24),
swinia=c(0, 12, 20),
krowa=c(1, 36, 11),
duzy_pies=c(1, 36, 1),
kon=c(2, 72, 4),
maly_pies=c(0,6,4),
row.names=c("w_zagrodzie", "krolikowartosc", "w_stadzie"))
expect_equal(wymiana_na_tansze(zwierzeta)[1,2],1)
})
test_that("oddaj_do_stada zwraca ramke danych",{
zwierzeta <- data.frame(
krolik=c(1, 1, 59),
owca=c(0, 6, 24),
swinia=c(0, 12, 20),
krowa=c(1, 36, 11),
duzy_pies=c(1, 36, 1),
kon=c(2, 72, 4),
maly_pies=c(0,6,4),
row.names=c("w_zagrodzie", "krolikowartosc", "w_stadzie"))
expect_true(is.data.frame(oddaj_do_stada(6, 2, zwierzeta)))
})
test_that("oddaj_do_stada oddaje do stada",{
zwierzeta <- data.frame(
krolik=c(1, 1, 59),
owca=c(0, 6, 24),
swinia=c(0, 12, 20),
krowa=c(1, 36, 11),
duzy_pies=c(1, 36, 1),
kon=c(2, 72, 4),
maly_pies=c(0,6,4),
row.names=c("w_zagrodzie", "krolikowartosc", "w_stadzie"))
expect_equal(oddaj_do_stada(6, 2, zwierzeta)[1,6],0)
})
test_that("dodaj_do_zagrody zwraca ramke danych",{
zwierzeta <- data.frame(
krolik=c(1, 1, 59),
owca=c(0, 6, 24),
swinia=c(0, 12, 20),
krowa=c(1, 36, 11),
duzy_pies=c(1, 36, 1),
kon=c(2, 72, 4),
maly_pies=c(0,6,4),
row.names=c("w_zagrodzie", "krolikowartosc", "w_stadzie"))
expect_true(is.data.frame(dodaj_do_zagrody(6, 2, zwierzeta)))
})
test_that("dodaj_do_zagrody dodaje do zagrody",{
zwierzeta <- data.frame(
krolik=c(1, 1, 59),
owca=c(0, 6, 24),
swinia=c(0, 12, 20),
krowa=c(1, 36, 11),
duzy_pies=c(1, 36, 1),
kon=c(2, 72, 4),
maly_pies=c(0,6,4),
row.names=c("w_zagrodzie", "krolikowartosc", "w_stadzie"))
expect_equal(dodaj_do_zagrody(6, 2, zwierzeta)[1,6], 4)
})
test_that("czy_stac_nas zwraca TRUE",{
zwierzeta <- data.frame(
krolik=c(7, 1, 53),
owca=c(1, 6, 23),
swinia=c(0, 12, 20),
krowa=c(0, 36, 12),
duzy_pies=c(0, 36, 2),
kon=c(0, 72, 6),
maly_pies=c(0,6,4),
row.names=c("w_zagrodzie", "krolikowartosc", "w_stadzie"))
expect_true(czy_stac_nas(3, zwierzeta))
})
test_that("wymiana_na_drozsze zwraca ramke danych",{
zwierzeta <- data.frame(
krolik=c(7, 1, 53),
owca=c(1, 6, 23),
swinia=c(0, 12, 20),
krowa=c(0, 36, 12),
duzy_pies=c(0, 36, 2),
kon=c(0, 72, 6),
maly_pies=c(0,6,4),
row.names=c("w_zagrodzie", "krolikowartosc", "w_stadzie"))
expect_true(is.data.frame(wymiana_na_drozsze(zwierzeta)))
})
test_that("wymiana_na_drozsze dokonuje wymiany",{
zwierzeta <- data.frame(
krolik=c(7, 1, 53),
owca=c(1, 6, 23),
swinia=c(0, 12, 20),
krowa=c(0, 36, 12),
duzy_pies=c(0, 36, 2),
kon=c(0, 72, 6),
maly_pies=c(0,6,4),
row.names=c("w_zagrodzie", "krolikowartosc", "w_stadzie"))
expect_equal(wymiana_na_drozsze(zwierzeta)[1,3],1)
})
|
8b29ed71f75b0b9bc17f925f027e5081d94975b3
|
a0e3e006caf18ff920ea1cfe91c9f13c253dfc09
|
/simulation/tests/testthat.R
|
cbe8c71dae591ea60d6a934b5dc04acefa2bb6d2
|
[
"MIT"
] |
permissive
|
linnykos/simulation
|
a9bb151de04e6f57067778e96e008be2c279da5b
|
f6598a14bc762e63558584327c306834be104538
|
refs/heads/master
| 2022-04-13T08:45:58.033611
| 2020-03-24T19:01:21
| 2020-03-24T19:01:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(simulation)
test_check("simulation")
|
d6f176b08c71b4febe7bef46f14965381f566a95
|
cd298aa036d3c974c18c22623ad1a58d120463df
|
/man/dhillon_emb.Rd
|
f7f618402b45a964f43961069a82712c4a47ac59
|
[
"MIT"
] |
permissive
|
YutongWangUMich/corgi
|
eaecf19836c27985f7e4129e6b2cb40f6b8bd08b
|
d7d6f2fab63065268ddc5f463d45e74d9f121a64
|
refs/heads/master
| 2020-04-18T21:20:00.199195
| 2019-10-24T20:29:02
| 2019-10-24T20:29:02
| 167,762,558
| 0
| 0
|
MIT
| 2019-04-08T06:30:03
| 2019-01-27T02:47:21
|
R
|
UTF-8
|
R
| false
| true
| 849
|
rd
|
dhillon_emb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_corgi.R
\name{dhillon_emb}
\alias{dhillon_emb}
\title{The embedding as defined in Dhillon, Inderjit S. "Co-clustering documents and words using bipartite spectral graph partitioning." Proceedings of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining. ACM, 2001.}
\usage{
dhillon_emb(X, Y, k)
}
\arguments{
\item{X}{gene-by-cell expression matrix for batch 1}
\item{Y}{gene-by-cell expression matrix for batch 2}
\item{k}{k-1 is the dimension of the latent embedding}
}
\description{
The embedding as defined in Dhillon, Inderjit S. "Co-clustering documents and words using bipartite spectral graph partitioning." Proceedings of the seventh ACM SIGKDD international conference on Knowledge discovery and data mining. ACM, 2001.
}
|
8ac1882ddfe3e2fcab327fa0c202c2cbff114981
|
711e35a8a54f04234725d9b97501b94279c0379f
|
/R/use_files.R
|
b0aebfa800a41833ebe0a2a468cb3616e5c8e2ad
|
[
"MIT"
] |
permissive
|
AartGoossens/golem
|
70f461df103bba8955e35455a4e779aa95ac1be7
|
cdc25bbfd7d1fd9ad55043d689b8afcb3410dd26
|
refs/heads/dev
| 2020-09-20T22:36:05.438064
| 2019-11-27T20:45:21
| 2019-11-27T20:45:21
| 224,607,705
| 0
| 0
|
NOASSERTION
| 2019-11-28T08:41:00
| 2019-11-28T08:40:59
| null |
UTF-8
|
R
| false
| false
| 2,768
|
r
|
use_files.R
|
#' Use Files
#'
#' These functions download files from external sources and install them inside the appropriate directory.
#'
#' @inheritParams add_module
#' @param url String representation of URL for the file to be downloaded
#' @param dir Path to the dir where the file while be created.
#' @export
#' @rdname use_files
#' @importFrom glue glue
#' @importFrom cli cat_bullet
use_external_js_file <- function(
url,
name,
pkg = get_golem_wd(),
dir = "inst/app/www",
open = TRUE,
dir_create = TRUE
){
old <- setwd(normalizePath(pkg))
on.exit(setwd(old))
new_file <- glue::glue("{name}.js")
dir_created <- create_dir_if_needed(
dir,
dir_create
)
if (!dir_created){
cat_red_bullet(
"File not added (needs a valid directory)"
)
return(invisible(FALSE))
}
dir <- normalizePath(dir)
where <- file.path(
dir, new_file
)
if ( !check_file_exist(where) ) {
return(invisible(FALSE))
}
if ( tools::file_ext(url) != "js") {
cat_red_bullet(
"File not added (URL must end with .js extension)"
)
return(invisible(FALSE))
}
utils::download.file(url, where)
cat_green_tick(glue::glue("File created at {where}"))
cat_red_bullet(
glue::glue(
'To link to this file, go to the `golem_add_external_resources()` function in `app_ui.R` and add `tags$script(src="www/{name}.js")`'
)
)
if (rstudioapi::isAvailable() & open){
rstudioapi::navigateToFile(where)
} else {
cat_red_bullet(glue::glue("Go to {where}"))
}
}
#' @export
#' @rdname use_files
use_external_css_file <- function(
url,
name,
pkg = get_golem_wd(),
dir = "inst/app/www",
open = TRUE,
dir_create = TRUE
){
old <- setwd(normalizePath(pkg))
on.exit(setwd(old))
new_file <- glue::glue("{name}.css")
dir_created <- create_dir_if_needed(
dir,
dir_create
)
if (!dir_created){
cat_red_bullet(
"File not added (needs a valid directory)"
)
return(invisible(FALSE))
}
dir <- normalizePath(dir)
where <- file.path(
dir, new_file
)
if ( !check_file_exist(where) ) {
return(invisible(FALSE))
}
if ( tools::file_ext(url) != "css") {
cat_red_bullet(
"File not added (URL must end with .css extension)"
)
return(invisible(FALSE))
}
utils::download.file(url, where)
cat_green_tick(glue::glue("File created at {where}"))
cat_red_bullet(
glue::glue(
'To link to this file, go to the `golem_add_external_resources()` function in `app_ui.R` and add `tags$script(src="www/{name}.css")`'
)
)
if (rstudioapi::isAvailable() & open){
rstudioapi::navigateToFile(where)
} else {
cat_red_bullet(glue::glue("Go to {where}"))
}
}
|
8404f289e8e9d1f32234acd0e41aebe847777a80
|
bb9504bfabd84b5c2b662095b33188cf35264343
|
/man/external_crossval.Rd
|
02e21c7cdde230a82f97681a6f2e4666dc511a5c
|
[] |
no_license
|
bbuchsbaum/rMVPA
|
5f34425859cc6f0a69f223992b43e12a2e9c7f11
|
bd178d4967a70c8766a606810f5de6f3e60b9c22
|
refs/heads/master
| 2023-05-25T16:08:02.323323
| 2023-05-01T01:43:15
| 2023-05-01T01:43:15
| 18,340,070
| 14
| 11
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,835
|
rd
|
external_crossval.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mvpa_iterate.R
\name{external_crossval}
\alias{external_crossval}
\title{#' external_crossval
#' @keywords internal
#' @importFrom stats predict
#' external_crossval
#' @keywords internal
#' @importFrom stats predict
external_crossval <- function(roi, mspec, id, compute_performance=TRUE, return_fit=FALSE, permute=FALSE) {
# Prepare the training data
xtrain <- tibble::as_tibble(neuroim2::values(roi$train_roi), .name_repair=.name_repair)
# Permute the training labels if required
ytrain <- if (permute) {
sample(y_train(mspec))
} else {
y_train(mspec)
}
# Get the testing labels
ytest <- y_test(mspec)
# Get the ROI indices
ind <- neuroim2::indices(roi$train_roi)
# Train the model and handle any errors
result <- try(train_model(mspec, xtrain, ytrain, indices=ind,
param=mspec$tune_grid,
tune_reps=mspec$tune_reps))
if (inherits(result, "try-error")) {
# Log a warning if there's an error during model training
flog.warn("error fitting model %s : %s", id, attr(result, "condition")$message)
# Store error messages and return a tibble with the error information
emessage <- if (is.null(attr(result, "condition")$message)) "" else attr(result, "condition")$message
tibble::tibble(class=list(NULL), probs=list(NULL), y_true=list(ytest),
fit=list(NULL), error=TRUE, error_message=emessage)
} else {
# Make predictions using the trained model
pred <- predict(result, tibble::as_tibble(neuroim2::values(roi$test_roi), .name_repair=.name_repair), NULL)
# Convert predictions to a list
plist <- lapply(pred, list)
plist$y_true <- list(ytest)
plist$test_ind <- list(as.integer(seq_along(ytest)))
# Create a tibble with the predictions
ret <- tibble::as_tibble(plist, .name_repair = .name_repair)
# Wrap the results and return the fitted model if required
cres <- if (return_fit) {
wrap_result(ret, mspec$design, result$fit)
} else {
wrap_result(ret, mspec$design)
}
# Compute performance and return a tibble with the results and any warnings
if (compute_performance) {
tibble::tibble(result=list(cres), indices=list(ind),
performance=list(compute_performance(mspec, cres)), id=id,
error=FALSE, error_message="~",
warning=!is.null(result$warning),
warning_message=if (is.null(result$warning)) "~" else result$warning)
} else {
tibble::tibble(result=list(cres), indices=list(ind), performance=list(NULL), id=id,
error=FALSE, error_message="~",
warning=!is.null(result$warning),
warning_message=if (is.null(result$warning)) "~" else result$warning)
}
}
}}
\usage{
external_crossval(
roi,
mspec,
id,
compute_performance = TRUE,
return_fit = FALSE,
permute = FALSE
)
}
\arguments{
\item{roi}{A list containing train_roi and test_roi elements.}
\item{mspec}{A model specification object.}
\item{id}{A unique identifier for the model.}
\item{compute_performance}{Logical, whether to compute performance metrics (default: TRUE).}
\item{return_fit}{Logical, whether to return the fitted model (default: FALSE).}
\item{permute}{Logical, whether to permute the training labels (default: FALSE).}
}
\value{
A tibble with performance metrics, fitted model (optional), and any warnings or errors.
}
\description{
External Cross-Validation
}
\details{
This function performs external cross-validation on the provided ROI and model specification.
It returns a tibble with performance metrics, fitted model (optional), and any warnings or errors.
}
|
17b57266437f567576769b5549a3983b4a354d00
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/tests/Pkgs/exNSS4/R/nss4.R
|
b63081b7385918bff1e864944c8e577d340d8adc
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 1,824
|
r
|
nss4.R
|
setClass("pubClass", representation("numeric", id = "integer"))
setClass("privCl", representation(x = "numeric", id = "integer"))
.showMe <- function(object)
cat("show()ing object of class ", class(object),
" and slots named\n\t",
paste(slotNames(object), collapse=", "), "\n")
setMethod("show", "pubClass", .showMe)
setMethod("show", "privCl", .showMe)
setMethod("plot", "pubClass", function(x, ...) plot(as(x, "numeric"), ...))
setMethod("plot", "privCl", function(x, ...) plot(x@x, ...))
## this is exported:
assertError <- function(expr)
stopifnot(inherits(try(expr, silent = TRUE), "try-error"))
## this one is not:
assertWarning <- function(expr)
stopifnot(inherits(tryCatch(expr, warning = function(w)w), "warning"))
if(isGeneric("colSums")) {
stop("'colSums' is already generic -- need new example in test ...")
} else {
setGeneric("colSums")
stopifnot(isGeneric("colSums"))
}
assertError(setGeneric("pubGenf"))# error: no skeleton
setGeneric("pubGenf", function(x,y) standardGeneric("pubGenf"))
## a private generic {not often making sense}:
setGeneric("myGenf", function(x,y){ standardGeneric("myGenf") })
setMethod("myGenf", "pubClass", function(x, y) 2*x)
## "(x, ...)" not ok, as generic has no '...':
assertError(setMethod("pubGenf", "pubClass", function(x, ...) { 10*x } ))
## and this is ok
setMethod("pubGenf", c(x="pubClass"), function(x, y) { 10*x } )
### "Same" class as in Matrix (but different 'Extends'!) {as in Rmpfr}
## "atomic vectors" (-> ?is.atomic ) -- exactly as in "Matrix":
## ---------------
setClassUnion("atomicVector", ## "double" is not needed, and not liked by some
members = c("logical", "integer", "numeric",
"complex", "raw", "character"))
setClassUnion("array_or_vector",
members = c("array", "matrix", "atomicVector"))
|
9ccc486be164166902a45493e2b9b80dcd0eac0e
|
e3cad415435dd7f7f7f1be653272c4655a24f219
|
/tests/testthat.R
|
268852200634f44d64423fde230d7839800a98e4
|
[
"MIT"
] |
permissive
|
katerine-dev/queridodiario
|
f55ea5a7db817f402f8d79c468818c7517bf2e4f
|
34b40bd2b71030e7ccc19b3fbe338ec5602aead4
|
refs/heads/master
| 2023-07-20T11:10:11.585563
| 2021-08-24T13:03:43
| 2021-08-24T13:03:43
| 346,433,278
| 15
| 1
|
NOASSERTION
| 2021-08-24T13:03:44
| 2021-03-10T17:12:20
|
R
|
UTF-8
|
R
| false
| false
| 80
|
r
|
testthat.R
|
library(testthat)
library(queridodiario)
testthat::test_check("queridodiario")
|
49e24528ba7a29a921a05c3468d6ef6a66655668
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/textreg/man/tm_gregexpr.Rd
|
eb8bd8c33efb85fbd17481ea93ae93046c93425b
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 923
|
rd
|
tm_gregexpr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text_searching.R
\name{tm_gregexpr}
\alias{tm_gregexpr}
\title{Call gregexpr on the content of a tm Corpus.}
\usage{
tm_gregexpr(pattern, corpus, ignore.case = FALSE, perl = FALSE,
fixed = FALSE, useBytes = FALSE)
}
\arguments{
\item{pattern}{See gregexpr}
\item{corpus}{Either a character vector or tm Corpus object.}
\item{ignore.case}{See gregexpr}
\item{perl}{See gregexpr}
\item{fixed}{See gregexpr}
\item{useBytes}{See gregexpr}
}
\value{
This method gives results exactly as if \code{\link{gregexpr}} were called on the Corpus
represented as a list of strings.
See gregexpr.
}
\description{
Pull out content of a tm corpus and call gregexpr on that content represented
as a list of character strings.
}
\details{
If 'corpus' is already a character vector, it just calls
gregexpr with no fuss (or warning).
}
\seealso{
gregexpr
}
|
6aad7b3f3d992844295cf7ab213f6cdc7d56e863
|
986e46d230dab8ed82d560dc533193fee32e993e
|
/scripts/circuit.r
|
392b22b4d66d72cfeb55562dbd1659a961626c55
|
[
"MIT"
] |
permissive
|
XieConnect/SecureMA
|
adc58d66627f1cc801243946fdf62811fba4b616
|
3ebfe07bae68446707cb19839532a8a6069eef08
|
refs/heads/master
| 2020-05-26T18:19:42.517279
| 2014-05-29T16:00:25
| 2014-05-29T16:15:56
| 16,626,910
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 977
|
r
|
circuit.r
|
#!/usr/bin/Rscript
# simulate the binary circuit for estimating n and epsilon
args <- commandArgs(TRUE)
# Input two shares for X
x1 <- as.integer(args[1])
x2 <- as.integer(args[2])
# to customize inputs
maxN <- 80
bitLength <- 80
# input randomization parameters
randa <- 0
randb <- 0
## init
est <- 1
n <- 0
x <- x1 + x2
print(paste("Input x: ", x))
# Estimate n and est
for (i in 1:maxN) {
if (est < x) {
est = 2*est
n = n+1
}
}
print(paste("[Rough estimate] est: ", est))
print(paste("[Rough estimate] n: ", n))
## scale epsilon
jEnd <- maxN - n
est <- x - est
for (i in 1:maxN) {
if (i <= jEnd) {
est <- 2 * est
}
}
print(paste("Scaled epsilon: ", est))
print("-- Server: ---")
print(paste("est - randa: ", est - randa))
print(paste(" or (est + fieldSize): ", est - randa + 2^bitLength))
print(paste("n - randb: ", n - randb))
print("")
print("-- Client: ---")
print(paste("- randa: ", randa))
print(paste("- randb: ", randb))
|
99c1a355fbcdd623beeaf8b9c467cea8683984cf
|
f044402735a52fa040c5cbc76737c7950406f8b2
|
/BrCa_Age_Associated_TMA/Packages/biostatUtil/R/utils.R
|
52e7f3c0b29e7b2e329388d3e16e694bd94bfcd3
|
[] |
no_license
|
BCCRCMO/BrCa_AgeAssociations
|
5cf34f3b2370c0d5381c34f8e0d2463354c4af5d
|
48a11c828a38a871f751c996b76b77bc33d5a3c3
|
refs/heads/master
| 2023-03-17T14:49:56.817589
| 2020-03-19T02:18:21
| 2020-03-19T02:18:21
| 247,175,174
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,657
|
r
|
utils.R
|
# magrittr placeholder
globalVariables(".")
#' Collapse unique levels of variable into one string
#' @noRd
collapse_var <- function(x, collapse = " || ") {
return(paste(unique(x), collapse = collapse))
}
#' Print confidence interval wrapper
#' @noRd
printCI <- function(z) {
paste0(z[1], " (", z[2], " - ", z[3], ")")
}
#' Count number of missing elements
#' @noRd
n_missing <- function(x, na.rm = FALSE) {
return(sum(is.na(x), na.rm = na.rm))
}
#' Missing Value Formatting
#'
#' Takes a numeric vector and replaces all missing codes with NA and returns a
#' factor if the variable is categorical or a numeric variable if it's numeric.
#'
#' @param y a vector.
#' @param type whether the variable is `"cat"` (categorical) or
#' `"cont"` (continuous). Defaults to `"cat"`.
#' @param codes vector of missing codes to replace with `NA`
#' @return A categorical or numerical vector with all missing formatted as
#' `NA`.
#' @author Aline Talhouk, Derek Chiu
#' @export
#'
#' @examples
#' y <- c(1:10, "Unk", 12)
#' formatNA(y)
formatNA <- function(y, type = c("cat", "cont"), codes = c("", "Unk", "N/A")) {
y[y %in% c(codes, NA)] <- NA
res <- switch(match.arg(type), cat = factor(y), cont = as.numeric(y))
return(res)
}
#' Generate a legend
#'
#' Given a ggplot object, generates a legend
#'
#' @param a.gplot ggplot object
#' @return ggplot object with legend
#' @author Aline Talhouk
#' @export
g_legend <- function(a.gplot) {
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(purrr::map_chr(tmp$grobs, ~ .x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
#' Get the p-value
#' @param x an object from [survival::survdiff()]
#' @return the Chi-squared p-value
#' @references Christos Hatzis
#' (https://stat.ethz.ch/pipermail/r-help/2007-April/130676.html)
#' @export
getPval <- function(x) {
return(stats::pchisq(x$chisq, length(x$n) - 1, lower.tail = FALSE))
}
#' Standard error of the mean
#'
#' @param x input vector
#' @param missing.value values that are missing
#' @param return.missing.value the value to return where there are missing values
#' @return The standard error of the mean of `x`
#' @author Samuel Leung
#' @references http://en.wikipedia.org/wiki/Standard_error
#' @export
sem <- function(x, missing.value = NA, return.missing.value = NA) {
x <- x[!is.na(x)]
if (!is.na(missing.value))
x <- x[!x %in% missing.value]
return(ifelse(length(x) == 0, return.missing.value,
sqrt(stats::var(x) / length(x))))
}
## CONSTANTS ##
# Dates
MM.DD.YYYY <- "%m/%d/%Y"
DD.MM.YYYY <- "%d/%m/%Y"
DD_MMM_YY <- "%d-%b-%y"
YYYY_MM_DD <- "%Y-%m-%d"
YYYYMMDD <- "%Y%m%d"
DDMMYYYY <- "%d%m%Y"
MMDDYYYY <- "%m%d%Y"
DATE.ORIGIN <- as.Date("1970-01-01")
NUM.DAYS.IN.YEAR <- 365.241 #365.25
NUM.DAYS.IN.MONTH <- 30.5
# Styles
COL.TH.STYLE <- "border-bottom: 1px solid grey; border-top: 4px double grey; text-align: center; padding-right:10px; padding-right:10px;"
ROW.TH.STYLE <- "text-align: center; padding-right:10px; padding-right:10px;"
TABLE.CAPTION.STYLE <- "display: table-caption; text-align: left;"
ROW.TD.STYLE.FOR.MULTI.COX <- "border-bottom: 1px solid grey; text-align: center; padding-right:10px; padding-right:10px;"
ROW.TD.STYLE.FOR.MULTI.COX.ALIGN.TOP <- "border-bottom: 1px solid grey; text-align: center; vertical-align: text-top; padding-right:10px; padding-right:10px;"
# Values
VALUE.CODING.INIT.TREATMENT.NO <- "No treatment"
VALUE.CODING.INIT.TREATMENT.CHEMO.ONLY <- "Chemo only"
VALUE.CODING.INIT.TREATMENT.RT.ONLY <- "Radiation only"
VALUE.CODING.INIT.TREATMENT.VAG.BRACHY.ONLY <- "Vag Brachy only"
VALUE.CODING.INIT.TREATMENT.BOTH <- "Both"
# Events
OS.EVENT <- "os.event"
OS.CENSOR <- "os.censor"
DSS.EVENT <- "dss.event"
DSS.CENSOR <- "dss.censor"
RFS.EVENT <- "rfs.event"
RFS.CENSOR <- "rfs.censor"
# Missing codes
# missing value code for values that are explicitily indicated as missing from
# data source e.g. "X" in grade
MISSING.EXPLICIT <- "N/A"
# missing because values was not found (e.g. in data files) but the value must
# exist somewhere.
MISSING.UNK <- "Unk"
# data point not mentioned in data file.
MISSING...NOT.FOUND.IN.DATA.FILE <- ""
# missing value code for values that are explicitily indicated as missing from
# data source e.g. "X" in grade
MISSING.BIOMARKER.EXPLICIT <- MISSING.UNK
# data point not mentioned in data file.
MISSING.BIOMARKER...NOT.FOUND.IN.DATA.FILE <- ""
# combined missing codes
ALL.MISSING.CODES <- unique(c(
MISSING.EXPLICIT,
MISSING...NOT.FOUND.IN.DATA.FILE,
MISSING.UNK,
MISSING.BIOMARKER.EXPLICIT,
MISSING.BIOMARKER...NOT.FOUND.IN.DATA.FILE
))
# Labels
FIRTH.THRESHOLD <- 0.8 # percent of censor cases to use Firth in Cox model
FIRTH.CAPTION <- "<sup>F</sup>" # text to indicate values are Firth corrected
BCSS.TITLE <- "Breast cancer specific survival"
BCSS.XLAB <- "Total follow-up (years)"
BCSS.YLAB <- "Cumulative breast cancer specific survival (BCSS)"
DSS.TITLE <- "Disease specific survival (DSS)"
DSS.XLAB <- BCSS.XLAB
DSS.YLAB <- DSS.TITLE
OS.TITLE <- "Overall survival"
OS.XLAB <- DSS.XLAB
OS.YLAB <- OS.TITLE
RFS.TITLE <- "Any relapse-free survival"
RFS.XLAB <- paste(RFS.TITLE, "time")
RFS.YLAB <- RFS.TITLE
DRFS.TITLE <- "Distant relapse-free survival"
DRFS.XLAB <- paste(DRFS.TITLE, "time")
DRFS.YLAB <- DRFS.TITLE
LRFS.TITLE <- "Rocal relapse-free survival"
LRFS.XLAB <- paste(LRFS.TITLE, "time")
LRFS.YLAB <- LRFS.TITLE
RRFS.TITLE <- "regional relapse-free survival"
RRFS.XLAB <- paste(RRFS.TITLE, "time")
RRFS.YLAB <- RRFS.TITLE
LRRFS.TITLE <- "Locoregional relapse-free survival"
LRRFS.XLAB <- paste(LRRFS.TITLE, "time")
LRRFS.YLAB <- LRRFS.TITLE
|
843698a79ff535cd732d9a0d14fef301b6d04216
|
20964628865adf294fa4dace057d8f8657bada4a
|
/R/present.R
|
3120d2d86d248c58c6193604cf72d6466279f74f
|
[] |
no_license
|
asrenninger/rgs
|
24dfadfb5880b59f5de0778255ecebca156446b5
|
8df2dc1a52190d3c0da000db1fc6d4bd87b50288
|
refs/heads/main
| 2023-07-31T17:01:30.289116
| 2021-09-07T23:38:20
| 2021-09-07T23:38:20
| 398,044,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,087
|
r
|
present.R
|
##################################
### HOUSE HUNTERS
# 2 processing
##################################
## packages
library(tidyverse)
library(gt)
library(tidytext)
library(tm)
library(widyr)
library(sf)
library(glue)
## palette
pal <- scico::scico(n = 9, palette = 'hawaii')
## table 1
tibble(activity = c("sex", "socializing", "relaxing", "praying", "eating", "exercising", "housework", "working", "commuting"),
happiness = c(4.7, 4.0, 3.9, 3.8, 3.8, 3.8, 3.0, 2.7, 2.6),
hours = c(0.2, 2.3, 2.2, 0.4, 0.2, 0.2, 1.1, 6.9, 1.6)) %>%
gt() %>%
data_color(
columns = vars(happiness),
colors = scales::col_numeric(
palette = scico::scico(n = 5, palette = 'hawaii'),
domain = c(2, 5))
) %>%
tab_header(
title = "Reported Happiness by Activity",
subtitle = "Relating Time and Satisfaction"
) %>%
tab_source_note(
source_note = md("Reference: Layard, R. (2011) 'Happiness'")
) %>%
gtsave("happiness.png", expand = 10)
## load in corpus
new_stops <- filter(stop_words, !str_detect(word, "want|need|like|love"))
read_csv("data/words_new.csv") %>%
filter(!str_detect(text, "\\[.*?\\]")) %>%
mutate(end = case_when(str_detect(lag(text), "\\.") ~ "start",
str_detect(text, "\\.") ~ "end",
line == "3" ~ "start")) %>%
group_by(uniqueID, end) %>%
mutate(start = 1:n()) %>%
mutate(start = case_when(!is.na(end) ~ start)) %>%
ungroup() %>%
fill(start) %>%
group_by(start, uniqueID) %>%
view()
read_csv("data/words_new.csv") %>%
filter(!str_detect(text, "\\[.*?\\]")) %>%
mutate(end = case_when(str_detect(lag(text), "\\.|\\?|\\!|\\]") ~ "start",
str_detect(text, "\\.|\\?|\\!|\\]") ~ "end",
line == "3" ~ "start")) %>%
mutate(end = case_when(str_detect(text, "\\[") ~ "start",
str_detect(text, "\\]") ~ "end",
str_detect(lead(text), "\\[") ~ "end",
TRUE ~ end)) %>%
mutate(x = paste(end, lag(end), sep = "_")) %>%
filter(x != "end_end" & x != "start_start") %>%
select(-x) %>%
group_by(uniqueID, end) %>%
mutate(start = 1:n()) %>%
mutate(start = case_when(!is.na(end) ~ start)) %>%
ungroup() %>%
fill(start) %>%
group_by(start, uniqueID) %>%
summarise(text = str_c(text, collapse = " ")) %>%
arrange(uniqueID, start) %>%
filter(!str_detect(text, "\\[.*?\\]")) %>%
rename(line = start) %>%
write_csv("data/captions_new.csv")
videos <-
bind_rows(read_csv("data/videos_old.csv"),
read_csv("data/videos_new.csv")) %>%
select(-date, -captions)
corpus_line <-
bind_rows(read_csv("data/captions_old.csv"),
read_csv("data/captions_new.csv")) %>%
mutate(text = str_to_lower(text))
corpus_tidy <-
bind_rows(read_csv("data/words_old.csv"),
read_csv("data/words_new.csv") %>%
filter(!str_detect(text, "\\[.*?\\]")) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words)) %>%
left_join(videos)
counts <- videos %>% filter(type == "national") %>% group_by(location) %>% summarise(n = n())
rnaturalearth::ne_download(scale = 'large', type = 'populated_places', category = 'cultural', returnclass = 'sf')
tigris::states(cb = TRUE, class = 'sf') %>%
filter(STUSPS %in% unique(tigris::fips_codes$state)[1:51][-c(2, 12)]) %>%
transmute(location = STUSPS) %>%
left_join(counts) %>%
st_as_sf() %>%
st_transform(2163) %>%
select(n) %>%
plot()
## names data
names <-
read_csv("https://raw.githubusercontent.com/hadley/data-baby-names/master/baby-names.csv") %>%
mutate(name = tolower(name)) %>%
rename(word = name)
places <-
read_delim("https://raw.githubusercontent.com/grammakov/USA-cities-and-states/master/us_cities_states_counties.csv", delim = '|') %>%
janitor::clean_names() %>%
mutate(city = tolower(city)) %>%
filter(city != "beach")
library(ggraph)
library(igraph)
cities <-
places %>%
filter(state_short %in% videos$location) %>%
distinct(city, .keep_all = TRUE) %>%
drop_na(state_short) %>%
group_by(state_short) %>%
group_split() %>%
map(function(x){ x %>% pull(city) %>% glue_collapse("|") })
library(furrr)
plan(multisession, workers = 6)
tictoc::tic()
corpus_line %>%
mutate(text = future_map_chr(text,
function(x){
reduce(map_chr(cities, function(y){ str_remove_all(x, y) }), str_c)
})) %>%
filter(str_detect(text, "rapids"))
tictoc::tic()
corpus_line %>%
mutate(text = str_to_lower(text)) %>%
group_by(word) %>%
filter(n() > 9) %>%
pairwise_cor(word, line, sort = TRUE) %>%
filter(correlation > .15) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()
library(igraph)
library(ggraph)
corpus_line %>%
mutate(text = str_to_lower(text)) %>%
mutate(word = str_replace_all(text, ",", "")) %>%
unnest_tokens(word, text) %>%
filter(!word %in% new_stops$word,
!word %in% c("yeah", "hunter", "house", "hmm", "feel", "like", "music", "", "suzanne", "scripps", "network", "gonna", "week", "ago")) %>%
mutate(word = SnowballC::wordStem(word)) %>%
mutate(word = removeNumbers(word)) %>%
filter(word != "") %>%
group_by(word) %>%
filter(n() > 100) %>%
pairwise_cor(word, line, sort = TRUE) %>%
slice(1:500) %>%
graph_from_data_frame() %>%
ggraph(layout = "kk") +
geom_edge_link(size = 1, colour = '#c7c7c7') +
geom_node_point(colour = pal[1], size = 3) +
geom_node_text(aes(label = name), vjust = 1.8, size = 3) +
theme_void() +
ggsave("viz/correlations.png", height = 6, width = 8, dpi = 300)
bigrams <-
corpus_line %>%
unnest_tokens(word, text, token = "ngrams", n = 2) %>%
mutate(word = str_replace_all(word, ",", "")) %>%
separate(word, c("word1", "word2"), sep = " ") %>%
filter(!word1 %in% new_stops$word,
!word2 %in% new_stops$word,
!word1 %in% c("yeah", "hunter", "house", "hmm", "feel", "like", "music", "", "suzanne"),
!word2 %in% c("yeah", "hunter", "house", "hmm", "feel", "like", "music", "", "suzanne")) %>%
mutate(word1 = SnowballC::wordStem(word1),
word2 = SnowballC::wordStem(word2)) %>%
mutate(word1 = removeNumbers(word1),
word2 = removeNumbers(word2)) %>%
filter(word1 != "",
word2 != "")
corpus_line %>%
unnest_tokens(word, text, token = "ngrams", n = 2) %>%
mutate(word = str_replace_all(word, ",", "")) %>%
separate(word, c("word1", "word2"), sep = " ") %>%
filter(!word1 %in% new_stops$word,
!word2 %in% new_stops$word,
!word1 %in% c("yeah", "hunter", "house", "hmm", "feel", "like", "music", "", "suzanne", "scripps", "network", "gonna", "week", "ago"),
!word2 %in% c("yeah", "hunter", "house", "hmm", "feel", "like", "music", "", "suzanne", "scripps", "network", "gonna", "week", "ago")) %>%
mutate(word1 = SnowballC::wordStem(word1),
word2 = SnowballC::wordStem(word2)) %>%
mutate(word1 = removeNumbers(word1),
word2 = removeNumbers(word2)) %>%
filter(word1 != "",
word2 != "") %>%
drop_na() %>%
count(word1, word2, sort = TRUE) %>%
filter(n > 50) %>%
ggplot(aes(x = reorder(word1,-n), y = reorder(word2, -n), fill = n)) +
geom_tile(alpha = 0.8, colour = "white") +
viridis::scale_fill_viridis(option = 'plasma') +
# scico::scale_fill_scico(palette = 'hawaii') +
coord_flip() +
theme_minimal() +
theme(legend.position = "right") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
labs(x = "first word in pair",
y = "second word in pair") +
ggsave("viz/correlations.png", height = 6, width = 8, dpi = 300)
viridis::scale_fill_viridis(option = 'magma')
|
ed8eed52bf76241820cd4c63d296f52f0ddada13
|
31b12b7aeb512e246008b24cb25c3cae01284822
|
/scripts/rcourse_lesson1.R
|
447068e551375044ddb83bbf4902abf2950ca4b3
|
[] |
no_license
|
Efsilvaa/ModeloProjetoML
|
fc3fe5aae7715f06c42b24cccba0e62b8100df59
|
a51e9d35c22dbc8fe05f69b5a90d9ac1d3e29095
|
refs/heads/master
| 2020-03-07T06:33:32.822407
| 2018-04-02T22:58:39
| 2018-04-02T22:58:39
| 127,325,969
| 0
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,203
|
r
|
rcourse_lesson1.R
|
## LOAD PACKAGES ####
library(dplyr)
library(ggplot2)
## READ IN DATA AND ORGANIZE ####
### Read in data
data = read.table("data/rcourse_lesson1_data.txt", header=T, sep="\t")
# Look at dimension of data
dim(data)
# Look at first few rows of data
head(data)
# Look at final few rows of data
tail(data)
# Look at number of data points in each group
xtabs(~group, data)
# Subset out bilinguals
data_bl = data %>%
# Filter to only include bilinguals
filter(group == "bilingual")
# Look at make-up of data (dimension, first and final few rows)
dim(data_bl)
head(data_bl)
tail(data_bl)
# Look at number of data ponits by 1) group and 2) type for just bilinguals
xtabs(~group, data_bl)
xtabs(~type, data_bl)
## MAKE FIGURES ####
# By group
data.plot = ggplot(data, aes(x = group, y = rt)) +
# Make the figure a boxplot, fill says to what the color should correspond to,
# here it is the same as the x variable
geom_boxplot(aes(fill = group)) +
# Add a title
ggtitle("Reaction Times by Group") +
# Customize the x-axis label
xlab("Group") +
# Customize the y-axis label
ylab("Reaction times in ms") +
# Remove dark background
theme_classic() +
# These are extras to make the figure (in my opinion) prettier,
# look up each command to learn more
theme(text=element_text(size=18), title=element_text(size=18),
legend.position="none")
# Write figure to a pdf in the 'figures' folder
pdf("figures/data.pdf")
# Call plot
data.plot
# Close pdf call
dev.off()
# Within bilinguals by proficiency
data_bl.plot = ggplot(data_bl, aes(x = type, y = rt)) +
# Make the figure a boxplot, fill says to what the color should correspond to,
# here it is NOT the same as the x variable, this is how you get grouped boxplots
geom_boxplot(aes(fill = type)) +
# Add a title
ggtitle("Reaction Times by L2 Proficiency Level") +
# Customize the x-axis label
xlab("Proficiency in L2") +
# Customize the y-axis label
ylab("Reaction times in ms") +
# Remove dark background
theme_classic() +
# These are extras to make the figure (in my opinion) prettier,
# look up each command to learn more
theme(text=element_text(size=18), title=element_text(size=18),
legend.position="none")
# Write figure to a pdf in the 'figures' folder
pdf("figures/data_bl.pdf")
# Call plot
data_bl.plot
# Close pdf call
dev.off()
# Within bilinguals by proficiency with monolinguals in plot
data_blwml.plot = ggplot(data, aes(x = group, y = rt)) +
# Make the figure a boxplot, fill says to what the color should correspond to,
# here it is NOT the same as the x variable, this is how you get grouped boxplots
geom_boxplot(aes(fill = type)) +
# Add a title
ggtitle("Reaction Times by L2 Proficiency Level") +
# Customize the x-axis label
xlab("Proficiency in L2") +
# Customize the y-axis label
ylab("Reaction times in ms") +
# Remove dark background
theme_classic() +
# These are extras to make the figure (in my opinion) prettier,
# look up each command to learn more
theme(text=element_text(size=18), title=element_text(size=18),
legend.position="none")
# Write figure to a pdf in the 'figures' folder
pdf("figures/data_blwml.pdf")
# Call plot
data_blwml.plot
# Close pdf call
dev.off()
## RUN DESCRIPTIVE STATISTICS ####
# Summarise data
data_sum = data %>%
# Say what you want to summarise by, here it's 'group'
group_by(group) %>%
# Get mean, standard deviation, maximum, and minimum reaction times for each group
summarise(rt_mean = mean(rt),
rt_sd = sd(rt),
rt_max = max(rt),
rt_min = min(rt)) %>%
# Ungroup the data so future analyses can be done on the data frame as a whole,
# not by group
ungroup()
data_sum
# Summarise data for bilinguals
data_bl_sum = data_bl %>%
# Say what you want to summarise by, here it's type
group_by(type) %>%
# Get mean, standard deviation, maximum, and minimum reaction times for each type
summarise(rt_mean = mean(rt),
rt_sd = sd(rt),
rt_max = max(rt),
rt_min = min(rt)) %>%
# Ungroup the data so future analyses can be done on the data frame as a whole,
# not by type
ungroup()
data_bl_sum
|
c2e9ded66862f6c3fba0380d7c20281131f7c411
|
89c6b11ff16630bb9eaae1dc83034739f3cba846
|
/notes.R
|
094e297ca60db142ef8947f82be16af2452c0a8b
|
[] |
no_license
|
Data8021/LOLGameDataProcessing
|
34918ba7cd9ac5e587f537d43b20c3a116426681
|
ecc9fe87ad7867244b7d04d13acd65b8b486c531
|
refs/heads/master
| 2021-01-10T18:01:46.982509
| 2016-01-30T03:33:18
| 2016-01-30T03:33:18
| 50,451,089
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,853
|
r
|
notes.R
|
library(dplyr)
library(lubridate)
## Load functions
fromSeconds <- function(x){
if (!is.numeric(x)) stop("x must be numeric")
if (length(x)<=0)return(x)
unlist(
lapply(x,
function(i){
if (i >= 3600) {
y <- seconds_to_period(i)
sprintf('%02d:%02d:%02d', y@hour, minute(y), second(y))
} else {
y <- seconds_to_period(i)
sprintf('%02d:%02d', minute(y), second(y))
}
}
)
)
}
## Load game details
load("fullGameList.Rda")
##
fullGameList[[i]][["gameId"]]
fullGameList[[i]][["platformId"]]
fullGameList[[i]][["gameCreation"]]
as.POSIXct((fullGameList[[i]][["gameCreation"]]/1000), origin = "1970-01-01", tz = "UTC")
fullGameList[[i]][["gameDuration"]]
fromSeconds(fullGameList[[i]][["gameDuration"]])
fullGameList[[i]][["queueId"]] ## Unnecesary
fullGameList[[i]][["mapId"]] ## Not sure how to handle 1 vs 11
fullGameList[[i]][["seasonId"]] ## likely not Unnecesary
fullGameList[[i]][["gameVersion"]] ## Not sure how to handle yet
fullGameList[[i]][["gameMode"]] ## Unnecesary
fullGameList[[i]][["gameType"]] ## Unnecesary
names(fullGameList[[i]])
names(fullGameList[[i]][["teams"]])
names(fullGameList[[i]][["participants"]])
names(fullGameList[[i]][["participants"]][["stats"]])
names(fullGameList[[1]][["participants"]][["timeline"]])
timelineD <- fullGameList[[i]][["participants"]][["timeline"]]
fullGameList[[i]][["participants"]][["stats"]][["participantId"]]
allGameData[[i, "masteries"]] <- fullGameList[[i]][["participants"]][["masteries"]][[1]]
fullGameList[[i]][["participants"]][["runes"]][[1]]
yTest <- fullGameList[[i]][["participants"]][["timeline"]][["creepsPerMinDeltas"]][1, "10-20"]
masteriesRunesTest <- data.frame(numMasteries = as.numeric(),
numRunes = as.numeric())
for (i in 1:length(fullGameList)) {
masteriesRunesTemp <- data.frame(numMasteries = as.numeric(),
numRunes = as.numeric())
if ("masteries" %in% names(fullGameList[[i]][["participants"]])) {
for (j in 1:10) {
if (!is.null(fullGameList[[i]][["participants"]][["masteries"]][[j]])) {
masteriesRunesTemp[j, 1] <- nrow(fullGameList[[i]][["participants"]][["masteries"]][[j]])
}
if (!is.null(fullGameList[[i]][["participants"]][["runes"]][[j]])) {
masteriesRunesTemp[j, 2] <- nrow(fullGameList[[i]][["participants"]][["runes"]][[j]])
}
}
}
masteriesRunesTest <- rbind(masteriesRunesTest, masteriesRunesTemp)
}
creepMinTest <- vector()
for (i in 1:length(fullGameList)) {
print(names(fullGameList[[i]][["participants"]][["timeline"]][["creepsPerMinDeltas"]]))
}
timelineTest <- vector()
for (i in 1:length(fullGameList)) {
timelineTest[i] <- length(names(fullGameList[[i]][["participants"]][["timeline"]]))
}
lanesTest <- data.frame(player1Lane = as.character(),
player2Lane = as.character(),
player3Lane = as.character(),
player4Lane = as.character(),
player5Lane = as.character(),
stringsAsFactors = FALSE)
for (i in 1:length(fullGameList)) {
lanesTemp <- data.frame(player1Lane = as.character(),
player2Lane = as.character(),
player3Lane = as.character(),
player4Lane = as.character(),
player5Lane = as.character(),
stringsAsFactors = FALSE)
lanesTemp[1, "player1Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[1]]
lanesTemp[1, "player2Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[2]]
lanesTemp[1, "player3Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[3]]
lanesTemp[1, "player4Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[4]]
lanesTemp[1, "player5Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[5]]
lanesTemp[2, "player1Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[6]]
lanesTemp[2, "player2Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[7]]
lanesTemp[2, "player3Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[8]]
lanesTemp[2, "player4Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[9]]
lanesTemp[2, "player5Lane"] <- fullGameList[[i]][["participants"]][["timeline"]][["lane"]][[10]]
lanesTest <- rbind(lanesTest, lanesTemp)
}
save(lanesTest, file="data/lanesTest.Rda")
save(rolesLanesTest, file="data/rolesLanesTest.Rda")
# What do we do with lanes/roles?
# What are the best transformations.
|
3bc7cd058c5058f09055c723fa53d7c041ab9007
|
f9055f2e316129ec21665a10200fda9ad856f975
|
/R/data_selection.R
|
e9ffdb30ea35f385239ccc733fdc912fadc77bfd
|
[
"MIT"
] |
permissive
|
bnicenboim/eegUtils
|
879b4a8173acc29de17b1dc0ca7ec8453dbc2522
|
09eb5fcc983d24d058bfa4575d5e8a2537fcfe21
|
refs/heads/master
| 2020-03-11T03:16:30.714963
| 2018-05-03T18:53:24
| 2018-05-03T18:53:24
| 129,742,381
| 0
| 0
| null | 2018-04-16T12:47:20
| 2018-04-16T12:47:19
| null |
UTF-8
|
R
| false
| false
| 8,749
|
r
|
data_selection.R
|
#' Select timerange
#'
#' Generic function for selecting specific time ranges from a given dataset.
#' Input can be a dataframe, or an object of class \code{eeg_data} or
#' \code{eeg_epochs}. Note this finds the closest times to those specified, so
#' times out may not correspond exactly to requested times.
#'
#' @author Matt Craddock, \email{matt@mattcraddock.com}
#' @param data Data from which to select
#' @param ... Further arguments passed to or from other methods.
#'
#' @export
select_times <- function(data, ...) {
UseMethod("select_times", data)
}
#' @param time_lim A character vector of two numbers indicating the time range
#' to be selected e.g. c(min, max)
#' @importFrom dplyr filter
#' @return Data frame with only data from within the specified range.
#' @export
#' @describeIn select_times Default select times function
select_times.default <- function(data, time_lim = NULL, ...) {
if ("time" %in% colnames(data)) {
if (length(time_lim) == 1) {
warning("Must enter two timepoints when selecting a time range;
using whole range.")
} else if (length(time_lim) == 2) {
time_lim[1] <- data$time[which.min(abs(data$time - time_lim[1]))]
time_lim[2] <- data$time[which.min(abs(data$time - time_lim[2]))]
data <- dplyr::filter(data, time >= time_lim[1] & time <= time_lim[2])
}
} else {
warning("No time column found.")
}
return(data)
}
#' @param df_out Returns a data frame rather than an object of the same type that was passed in
#' @importFrom dplyr filter select
#' @export
#'
#' @describeIn select_times Select times from an eeg_data object
select_times.eeg_data <- function(data, time_lim = NULL, df_out = FALSE, ...) {
proc_data <- as.data.frame(data)
proc_data <- select_times(proc_data, time_lim = time_lim)
if (df_out) {
return(proc_data)
} else {
data$events <- dplyr::filter(data$events, event_time >= time_lim[1],
event_time <= time_lim[2])
data$signals <- dplyr::select(proc_data, -sample, -time)
data$timings <- tibble::tibble(time = proc_data$time, sample = proc_data$sample)
if (!is.null(data$reference$ref_data)) {
data$reference$ref_data <- data$reference$ref_data[data$timings$sample,]
}
return(data)
}
}
#' @importFrom dplyr filter select
#' @export
#' @describeIn select_times Select times in \code{eeg_epoch} objects
select_times.eeg_epochs <- function(data, time_lim = NULL,
df_out = FALSE, ...) {
proc_data <- as.data.frame(data)
proc_data <- select_times(proc_data, time_lim = time_lim)
if (df_out) {
return(proc_data)
} else {
data$events <- dplyr::filter(data$events, time >= time_lim[1],
time <= time_lim[2])
data$signals <- dplyr::select(proc_data, -sample, -time, -epoch)
data$timings <- tibble::tibble(time = proc_data$time,
sample = proc_data$sample,
epoch = proc_data$epoch)
if (!is.null(data$reference$ref_data)) {
data$reference$ref_data <- data$reference$ref_data[data$timings$sample]
}
return(data)
}
}
#' @export
#' @describeIn select_times Select times in \code{eeg_evoked} objects
select_times.eeg_evoked <- function(data, time_lim = NULL,
df_out = FALSE, ...) {
data$signals <- as.data.frame(data)
data$signals <- select_times(data$signals, time_lim = time_lim)
if (df_out) {
return(data$signals)
} else {
return(data)
}
}
#' Select electrodes from a given dataset.
#'
#' This is a generic function for selection of electrodes from an EEG dataset.
#'
#' @author Matt Craddock, \email{matt@mattcraddock.com}
#'
#' @param data An EEG dataset.
#' @param ... Arguments used with related methods
#'
#' @export
#'
select_elecs <- function(data, ...) {
UseMethod("select_elecs", data)
}
#' @param electrode A character vector of electrode labels for selection or
#' removal.
#' @param keep Defaults to TRUE. Set to false to *remove* the selected
#' electrodes.
#'
#' @return Data frame with only data from the chosen electrodes
#'
#' @describeIn select_elecs Select electrodes from a generic data frame.
#' @export
select_elecs.default <- function(data, electrode = NULL, keep = TRUE, ...) {
if ("electrode" %in% colnames(data)) {
if (all(electrode %in% data$electrode)) {
if (keep) {
data <- data[data$electrode %in% electrode, ]
} else {
data <- data[!data$electrode %in% electrode, ]
}
} else {
warning(paste("Electrode(s) not found:",
electrode[!electrode %in% data$electrode],
". Returning all data."))
}
} else {
if (all(electrode %in% colnames(data))) {
if (keep) {
data <- data[, colnames(data) %in% electrode, drop = FALSE]
} else {
data <- data[, !colnames(data) %in% electrode, drop = FALSE]
}
}
}
return(data)
}
#' @param df_out Defaults to FALSE. Set to TRUE to return a dataframe rather
#' than an \code{eeg_data} object.
#'
#' @return \code{eeg_data} object with selected electrodes removed/kept.
#'
#' @export
#' @describeIn select_elecs Select electrodes from a \code{eeg_data} object.
select_elecs.eeg_data <- function(data, electrode, keep = TRUE,
df_out = FALSE, ...) {
if (all(electrode %in% colnames(data$signals))) {
if (keep) {
data$signals <- data$signals[colnames(data$signals) %in% electrode]
} else {
data$signals <- data$signals[!colnames(data$signals) %in% electrode]
}
} else {
cat("Electrode(s) not found:",
electrode[!electrode %in% colnames(data$signals)],
". Returning all data.")
warning()
}
if (df_out) {
return(as.data.frame(data))
} else {
return(data)
}
}
#' Select epochs from eeg_data
#'
#' This is a generic function for selecting epochs from an epoched data set.
#'
#' @author Matt Craddock, \email{matt@mattcraddock.com}
#'
#' @param data \code{eeg_epochs} object from which to select epochs.
#' @param ... Parameters passed to specific methods
#' @export
select_epochs <- function(data, ...) {
UseMethod("select_epochs", data)
}
#' @describeIn select_epochs Select from generic object
#' @export
select_epochs.default <- function(data, ...) {
warning(paste("select_epochs does not know how to handle object of class",
class(data),
"and can only be used on eeg_epochs objects."))
}
#' @describeIn select_epochs Select epochs from \code{eeg_data} objects.
#' @export
select_epochs.eeg_data <- function(data, ...) {
if (data$continuous) {
stop("Data is not epoched.")
} else {
warning("oops, shouldn't end up here.")
}
}
#' @param epoch_events Select epochs containing any of the specified events. Can
#' be numeric or character vector. Will override any epoch_no input
#' @param epoch_no Select epochs by epoch number.
#' @param keep Defaults to TRUE, meaning select the specified epochs. Set to
#' FALSE to remove specified epochs.
#' @param df_out Output a data.frame instead of an eeg_data object.
#' @describeIn select_epochs Selection of epochs from \code{eeg_epochs} objects.
#' @export
select_epochs.eeg_epochs <- function(data, epoch_events = NULL, epoch_no = NULL,
keep = TRUE, df_out = FALSE, ...) {
# First check if epoch_events has been passed; if it's numeric, select epochs
# based on event_type. If it's a character vector, check if those labels exist
# in the data.
if (is.numeric(epoch_events)) {
sel_rows <- data$events$event_type %in% epoch_events
if (keep == FALSE) {
sel_rows <- !sel_rows
}
epoch_no <- as.numeric(data$events$epoch[sel_rows])
} else if (is.character(epoch_events)) {
check_ev <- epoch_events %in% list_events(data)$event_label
if (!all(check_ev)) {
stop("Event label not found, check with list_events.")
} else {
epoch_events <- epoch_events[check_ev]
}
sel_rows <- data$events$event_label %in% epoch_events
if (keep == FALSE) {
sel_rows <- !sel_rows
}
epoch_no <- as.numeric(data$events$epoch[sel_rows])
}
if (is.numeric(epoch_no)) {
sel_rows <- data$timings$epoch %in% epoch_no
if (keep == FALSE) {
sel_rows <- !sel_rows
}
data$signals <- data$signals[sel_rows, ]
if (!is.null(data$reference)) {
data$reference$ref_data <- data$reference$ref_data[sel_rows]
}
data$timings <- data$timings[sel_rows, ]
data$events <- data$events[data$events$epoch %in% epoch_no, ]
}
if (df_out) {
as.data.frame(data)
} else {
data
}
}
|
07a2d78aa3eacba4e4adcdc589c3a68d820033a7
|
372ad64ae50d9a2b4b45f16ee0a549169e3c66fb
|
/00-misc-tests/tmp06-nostalgia.R
|
0a5bf85f449a4be6ee97dce3fd1cf73cb7081871
|
[] |
no_license
|
btmonier/shiny-tests
|
10b2a805e8600533d33cd7193356d373e2ad9bd0
|
4afe16e258bb54e4b0933853565ffd7b173904cd
|
refs/heads/master
| 2021-09-14T20:09:18.369859
| 2018-05-18T17:22:16
| 2018-05-18T17:22:16
| 105,826,889
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
tmp06-nostalgia.R
|
# Text plots?
# Load packages
require(txtplot)
# Load test data
datobj <- read.csv("deseq-test.csv", header = TRUE, row.names = 1)
# MA
txtplot(
x = log10(datobj[, 1]),
y = datobj[, 2],
xlab = "log 2 fold change",
ylab = "normalized mean",
ylim = c(-4, 4)
)
# Volcano
txtplot(
x = datobj[, 2],
y = -log10(datobj[, 4]),
xlab = "log2 fold change",
ylab = "-log10(p-value)",
xlim = c(-10, 10),
ylim = c(0, 250)
)
# Output text
writeLines(
capture.output(
txtplot(
x = datobj[, 2],
y = -log10(datobj[, 4]),
xlab = "log2 fold change",
ylab = "-log10(p-value)",
xlim = c(-10, 10),
ylim = c(0, 250)
)
),
con = "output.txt",
sep = "\n"
)
tmp <- read.csv("ia.csv", header = TRUE)
plot(tmp$lng, tmp$lat)
library(d3scatter)
library(crosstalk)
|
6b6d3ad1f28356fa6cdcdfce77444263e4bf392b
|
da1dc5317dd8723f6fc549492cae6f6f1a6e57fd
|
/loaddata/mergeData.R
|
7184f8fad39c3988803e5a43568858feadc9a848
|
[] |
no_license
|
quace/BusinessIntelligence
|
cfe402ad31ebf094ea0f524db0b2a5e052290e9a
|
f170becfdc5ff6479872da661d08287dda806322
|
refs/heads/master
| 2021-08-29T19:26:46.327734
| 2017-12-14T18:53:14
| 2017-12-14T18:53:14
| 108,522,007
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,243
|
r
|
mergeData.R
|
#transform(fullData2,Name = sub("^\\S+\\s","",Name))
#fullData2 <- rename(fullData2, c("Name"="Name2"))
#testfullData <- transform(fullData,Name2 = (paste(substring(Name,1,1),". ",sub("^\\S+\\s","",Name),sep="")))
#mergedData <- merge(x = testfullData, y = fullData2[,c("Name2","Photo","Flag","Club.Logo","Value","Wage")], by = "Name2")
#write.csv(mergedData,file="MergedData5.csv")
#mergedData <- merge(x = fullData3[,!colnames(fullData3) %in% c("Photo","Flag","Club.Logo","Value","Wage")], y = fullData2[,c("Name","Photo","Flag","Club.Logo","Value","Wage")], by = "Name")
#write.csv(unique(mergedData),file="MergedData13.csv")
#add ID to player names
IDextendedData <- transform(playerIDdata, ID = sub("/player/(.+?)/.*", "\\1", playerIDdata$url))
#write.csv(IDextendedData,file="IDextendedData.csv")
#add this ID to the fullData frame matching on player name
#IDextendedFullData <- merge(x = fullData, y = IDextendedData[,c("Name","ID")],by="Name")
IDextendedFullData <- cbind(fullData, IDextendedData[,c("ID","url")])
#write.csv(IDextendedFullData,file="IDextendedFullData.csv")
#merge this with the completeDataset
MergedCompleteData <- merge(x = IDextendedFullData, y = fullData2[,c("ID","Photo","Flag","Club.Logo","Value","Wage")],by="ID")
#write.csv(MergedCompleteData,file="MergedCompleteData3.csv")
MergeCompleteData <- merge(fullData, fullData2[,c("ID","Potential")], by="ID")
write.csv(MergeCompleteData,file="MergedCompleteDataNEW.csv")
handlePrices <- function(data){
for(i in 1:length(data)){
price = data[i]
if(grepl("K",price)){
price = substr(price, 1, nchar(price)-1)
price = as.numeric(price) * 1000
} else if(grepl("M",price)){
price = substr(price, 1, nchar(price)-1)
price = as.numeric(price)*1000000
}
if(is.na(price)){price=0}
data[i] = price
}
return (data)
}
MergedCompleteData <- transform(MergedCompleteData, ValueUnified = handlePrices(gsub("???","",Value)))
MergedCompleteData <- transform(MergedCompleteData, WageUnified = handlePrices(gsub("???","",Wage)))
write.csv(MergedCompleteData,file="MergedCompleteData3.csv")
drops <- c("X","X.1","X.2","X.3")
fullData <- fullData[ , !(names(fullData) %in% drops)]
write.csv(fullData,file="Data2.csv")
|
b0dae1b5bef02fc8cc1795a6b61000f07f129e11
|
e6a9d96db33e6cd819e4f1dbd60c80b0eb53a615
|
/R/agTrendTMB-package.R
|
2cc3293c286e37ddd83cc61f400a8886ecb1fcda
|
[] |
no_license
|
dsjohnson/agTrendTMB
|
42384a16831a7f21b4bfe61ad61075b6555cac8f
|
403c55da56e891c8126b912ab8958346d7f1d3c8
|
refs/heads/master
| 2023-01-10T19:30:07.323254
| 2020-11-10T01:19:36
| 2020-11-10T01:19:36
| 307,476,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 629
|
r
|
agTrendTMB-package.R
|
#' @rawNamespace useDynLib(agTrendTMB, .registration=TRUE); useDynLib(agTrendTMB_TMBExports)
#' @importFrom stats coef cov.wt dnorm lm model.matrix optim pnorm qnorm sd
#' @keywords internal
"_PACKAGE"
# The following block is used by usethis to automatically manage
# roxygen namespace tags. Modify with care!
## usethis namespace: start
## usethis namespace: end
NULL
.onAttach <- function(library, pkgname)
{
info <-utils::packageDescription(pkgname)
package <- info$Package
version <- info$Version
date <- info$Date
packageStartupMessage(
paste(package, version, paste("(",date, ")", sep=""), "\n")
)
}
|
8db5cf667db9fcf774b8567d1ca759025ebe1153
|
dbd36e59bc4495c18212fa4f69d7de90a80417e2
|
/man/alpha_ellipsoid.Rd
|
ff59f814b839290a792d2096db23e5e036d0ea68
|
[] |
no_license
|
cran/cda
|
5b595698cd7d0f402341e7f950370fbcb984e73e
|
7b3515d0d109ed30d5fd7bae7ed0f78aa85da59b
|
refs/heads/master
| 2020-12-25T17:37:35.997436
| 2016-08-16T23:58:00
| 2016-08-16T23:58:00
| 17,695,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,105
|
rd
|
alpha_ellipsoid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polarizability.R
\name{alpha_ellipsoid}
\alias{alpha_ellipsoid}
\title{alpha_ellipsoid}
\usage{
alpha_ellipsoid(sizes, material, medium)
}
\arguments{
\item{sizes}{matrix of cluster sizes in nm}
\item{material}{data.frame with wavelength and epsilon}
\item{medium}{refractive index of surrounding medium}
}
\value{
matrix of polarisability
}
\description{
Principal polarisability components for an ellipsoidal particle
}
\details{
This long-wavelength polarisability approximation uses the Kuwata prescription
The Kuwata prescription includes semi-empirical terms of radiative correction and dynamic depolarisation to better match the fully retarded dipolar response in a reasonable range of (subwavelength) sizes and aspect ratios.
}
\author{
baptiste Auguie
}
\references{
Kuwata et al. Resonant light scattering from metal nanoparticles: Practical analysis beyond Rayleigh approximation Appl. Phys. Lett. 83, 22 (2003)
}
\seealso{
Other user_level polarisability: \code{\link{alpha_bare}},
\code{\link{alpha_dye}}
}
|
9ceea8b4056a8aa10a1d7e7dad0f21b91bf6c72b
|
bc8bbd84cc7e88c7ce0f5849d3fb6660e6cab82a
|
/R/fts.plot.covariance.R
|
f5840c79340ca1ebf65391d35493340deb3820c6
|
[] |
no_license
|
kidzik/freqdom.fda
|
82e6c1c75e121c6ad9b3573f7f148edc1dd16cc4
|
722f2d86113ec7d1d3ddc08822a7ebb8e2695593
|
refs/heads/master
| 2022-05-20T11:48:58.626572
| 2022-04-18T16:27:32
| 2022-04-18T16:27:32
| 93,265,117
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,312
|
r
|
fts.plot.covariance.R
|
#' Contour plot for the kernels of cross-covariance operators.
#'
#' @title Contour plot for the kernels of cross-covariance operators.
#'
#' @param X an object of class \code{\link[fda]{fd}} representing a functional data sample.
#' @param Y an object of class\code{\link[fda]{fd}} representing a functional data sample.
#' @param cor if \code{FALSE} covariance kernels are plotted, if \code{TRUE} correlation kernel will be plotted.
#' @param res number of discretization points to evaluate functional data.
#' @param lags lags to plot, dafauts \code{0:3}
#' @param nlevels number of color levels for the contour plot.
#' @export
#' @keywords plotting
#' @examples
#' fts = fts.rar(100)
#'
#' # Plot covariance operators of the time series curves
#' # We chose resolution equal 150 for better precision
#' fts.plot.covariance(fts, lags=0:2, res = 150)
#'
#' # Plot correlation operators of the time series curves
#' fts.plot.covariance(fts, lags=0:2, cor = TRUE, res = 50)
#'
#' # Make the grid of levels more dense
#' fts.plot.covariance(fts, lags=0:1, nlevels = 100)
fts.plot.covariance = function(X, Y = X, cor = FALSE, res=200, lags = 0:3, nlevels=25){
if(cor==TRUE){
X=sd1(X)
Y=sd1(Y)
}
A=fts.cov.structure(X,Y,lags=lags)
fts.plot.operators(A,lags=lags,res=res,nlevels=nlevels)
}
|
d356996471061b10513f39c5a4cfbb21be438b83
|
461559b761bd4b1a8cc61131a2e57704db929f53
|
/Hand in/TestingBCa.R
|
b6872fde968b2e10a5bb3e4cbdea0480d7714de7
|
[] |
no_license
|
penguin-coding/asmt3
|
012148df0ca7de93e34d80b81d4066254cce2594
|
0f358cba59ce1bdec37e6b48abf613b7e13f12d7
|
refs/heads/master
| 2021-08-19T07:55:17.248936
| 2017-11-25T09:39:54
| 2017-11-25T09:39:54
| 108,715,297
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,718
|
r
|
TestingBCa.R
|
library(boot)
source('simulation.r')
# given the strange BCa results, I got very worried that my BCa code was
# wrong. Not finding an issue with the formulas being used to retrieve the
# alpha values, I decided a comparison with the boot package's own BCa would be
# the next reasonable point of call as a means of testing. The following is a
# small piece of code which checks that my BCa and the boot package BCa perform
# similarly in a small handful of situations (for poisson data).
our.mean <- function(d,w){return(mean(d[w]))} # needed by boot function
compare.BCAs <- function(sims, n, R, statfuncs){
# purpose : Compare performance of the boot package BCa bootstrap and
# The one I wrote for this project
#
# inputs : sims - the number of simulations to run for the test case
# n - sample size for observations
# R - number of bootstrap resamples
# statfuncs - list of functions which calculate relevant
# statistics. The first entry should be a function
# of the type required for my BCa, the second should
# be of the type required by the boot package's
# boot function
#
# output : The observed coverage of each method, as a percentage.
MyBCa <- vector(length=sims*2) # Create blank vectors to store
BootBCa <- vector(length=sims*2) # the bootstrap intervals
for (i in seq(1,2*sims,2)){
data = rpois(n, lambda=100) # generate the data
A <- bootstrap(data, n=R, func=statfuncs[[1]], # generate my own
method='BCa',check.inputs=F) # BCa bootstrap
resamps <- boot(data, statfuncs[[2]], R) # generate the
interval <- boot.ci(resamps, type='bca')$bca[4:5] # boot interval
MyBCa[i] <- A[1] # add my interval
MyBCa[i+1] <- A[2] # to the output
BootBCa[i] <- interval[1] # add the boot interval
BootBCa[i+1] <- interval[2] # to the output
}
coverages <- c(get.coverage(MyBCa, 100), get.coverage(BootBCa,100))
names(coverages) <- c('mine', 'boot.package')
return(coverages)
}
set.seed(666)
R <- c(999) # set to any combination
N <- c(20) # of desired values
sims <- 10000
system.time(
for (r in R){
for (n in N){
cat('\n Coverages for sample size',n,'and',r,'resamples:\n')
print(compare.BCAs(sims, n, r, list(mean, our.mean)))
}
}
)[3]
# Playing around with different settings in the above lines of code reveals
# that my method does in fact have a consistent small drop in percentage
# cover, but I still have no idea why.
|
b4a7be0bce0719cfcf7f0861661d74e8d4c5e65c
|
60d9971e3f2c2456c874d017eff9527e86368c99
|
/R_codefile.R
|
9a82f954c41caaca4f544425c3b82fac7e329bc9
|
[] |
no_license
|
Anok-swarupnarain/Santander-Customer-Prediction
|
7c2555512b7113da244832f31e196a9aec666f8c
|
0f3f809898b1011eb3dba8e878fe3fe618337f9c
|
refs/heads/master
| 2022-04-23T05:24:41.083474
| 2020-04-19T01:06:40
| 2020-04-19T01:06:40
| 256,885,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,214
|
r
|
R_codefile.R
|
#set working directory
library(readr)
setwd("C:/Users/Anok/Downloads/Edwisor Santander Project")
#get working directory
getwd
x <- c("ggplot2", "corrgram", "DMwR", "caret", "randomForest", "unbalanced", "C50", "dummies", "e1071", "Information",
"MASS", "rpart", "gbm", "ROSE", 'sampling',"scales" ,'DataCombine', 'inTrees',"ROCR","Matrix")
lapply(x, require, character.only = TRUE)
rm(x)
#load Train.csv Data File as Train
train<-read.csv("train.csv",header=TRUE)
#load Test.Csv data file as Test
test<-read.csv("test.csv",header = TRUE)
#dimensions of train data
dim(train)
#dimensions of test data
dim(test)
#structure of train data
str(train)
#structure of test data
str(test)
#summary of train data
summary(train)
#summary of test data
summary(test)
#View Data frames of test and train
View(train)
View(test)
#to know type of variables in train data
sapply(train,typeof)
#to know type of variables in test data
sapply(test,typeof)
#variable names in train data
colnames(train)
#variable names in test data
colnames(test)
#convert type of target variable in train data
train$target<-as.factor(train$target)
class(train$target)
#remove ID_code from Train data
train<-train[,-1]
#Missing Value analysis
missing_val<-data.frame(apply(train,2,
function(x){sum(is.na(x))}))
missing_val$columns<-row.names(missing_val)
names(missing_val)[1]<-"Missing_Percentage"
missing_val$Missing_Percentage<-(missing_val$Missing_Percentage/nrow(train)) * 100
missing_val <- missing_val[order(-missing_val$Missing_Percentage),]
row.names(missing_val) = NULL
missing_val = missing_val[,c(2,1)]
sum(is.na(train))
sum(is.na(test))
#missing values are in both data sets are zero .Hence did not iterate same function to test.csv data
#Let us check for class imbalance in Train data
table<-table(train$target)/length(train$target)*100
table
#90 percent of the customers in Train data has target values 0 and 10 percent has value 1
#Visualization of Class imbalance
ggplot(train, aes_string(x = train$target)) +
geom_bar(stat="count",fill = "SkyBlue") + theme_bw() +
xlab("target") + ylab('Count') + scale_y_continuous(breaks=pretty_breaks(n=10)) +
ggtitle("santander transaction") + theme(text=element_text(size=15))
#Important features
library(randomForest)
train$target<-as.factor(train$target)
rf<-randomForest(target~.,train,ntree=10,importance=TRUE)
important_variables<-importance(rf,type=2)
important_variables
#We can visually see that few of the variable like Var_81,var_26,var_53,var_12,var_139 carry important information
corr<-cor(train[,2:201])
View(corr)
#We can derive from the data that Explanatory variables are very lowly correlated
#let us split train data into train(80%) and test(20%) data sets
set.seed(1234)
train.index<-createDataPartition(train$target,p=.80,list=FALSE)
trainset<-train[train.index,]
testset<-train[-train.index,]
#logistic regression on this data
logit_model<-glm(target~.,data=trainset,family = "binomial")
summary(logit_model)
logit_Predictions <- predict(logit_model, newdata = testset[,-1], type = "response")
logit_Predictions <-ifelse(logit_Predictions > 0.5, 1,0)
ConfMatrix_LR = table(testset$target, logit_Predictions)
ConfMatrix_LR
Accuracy<-((35476+1090)*100)/(35476+504+2929+1090)
Accuracy
#Accuracy is 91.41729
library(ROCR)
pred<-prediction(logit_Predictions,testset$target)
perf <- performance(pred,"tpr","fpr")
plot(perf,colorize=TRUE)
accuracy.meas(testset$target,logit_Predictions)
#precision is 0.684
#recall is 0.271
#F is 0.191
#Accuracy has been 91.41729 but on the basis of ROC curve and f1 score our model is not performing well on this imbalanced data
#let us use SMOTE to balance data
train_rose<-ROSE(target~.,data=trainset,seed = 1)$data
test_rose<-ROSE(target~.,data = testset,seed = 1)$data
table(train_rose$target)
table(test_rose$target)
#with the help SMOTE(Synthetic Minority Oversampling Technique) We are able to balance data
#In train_rose set We have 0 as target for 80036 records and 1 as target for 79965 records
#In test_rose set We have 0 as target for 20085 records and 1 as target for 19914 records
#lets apply logistic regression on SMOTE sample synthetics data
logit_model <- glm(target ~ ., data=train_rose, family = "binomial")
logit_Predictions <- predict(logit_model, newdata = test_rose[,-1], type = "response")
logit_Predictions <- ifelse(logit_Predictions > 0.5, 1, 0)
ConfMatrix_LR <- table(test_rose$target, logit_Predictions)
ConfMatrix_LR
Accuracy<-((14349+14264)/(14349+5736+5650+14264))*100
Accuracy
#Accuracy of the model is 71.53429
library(ROCR)
pred<-prediction(logit_Predictions,test_rose$target)
perf <- performance(pred,"tpr","fpr")
plot(perf,colorize=TRUE)
accuracy.meas(logit_Predictions,test_rose$target)
#from ROC curve and F1 curve We can conclude that logistic regression is performing
#better on Synthetic data than Imbalanced Data
#Lets Build Random Forest Model
RF_model<-randomForest(target ~ ., trainset, importance = TRUE, ntree = 5)
RF_Predictions <- predict(RF_model, testset[,-1])
ConfMatrix_RF<-table(testset$target,RF_Predictions)
ConfMatrix_RF
Accuracy<-(35423+303)/(35423+303+3716+557)
Accuracy
#accuracy of the model is 89.3%
RF_Predictions<-as.double(RF_Predictions)
pred<-prediction(RF_Predictions,test$target)
perf <- performance(pred,"tpr","fpr")
plot(perf,colorize=TRUE)
accuracy.meas(RF_Predictions,testset$target)
#looking at roc curve and F1 score we can assume that Our model has underporformed on Imbalanced Data
# Let's use Random Forest on synthetic data
RF_model <- randomForest(target ~ ., train_rose, importance = TRUE, ntree = 100,seed=2)
RF_Predictions <- predict(RF_model, test_rose[,-1])
ConfMatrix_RF<-table(test_rose$target,RF_Predictions)
ConfMatrix_RF
Accuracy<-(13748+14064)/(5850+13748+14064+6337)
Accuracy
#accuracy of model with synthetic data is 69.53%
RF_Predictions<-as.double(RF_Predictions)
pred<-prediction(RF_Predictions,test_rose$target)
perf <- ROCR::performance(pred,"tpr","fpr")
plot(perf,colorize=TRUE)
accuracy.meas(RF_Predictions,test_rose$target)
# on synthetic SMOTE data Random Forest classifier model works a little better
#Lets try Naive Bayes on Imbalance data
NB_model <- naiveBayes(target ~ ., data = trainset)
NB_predictions<-predict(NB_model,testset[,2:201],type = "class")
confMatrix_NB<-table(observed=testset[,1],predicted=NB_predictions)
confMatrix_NB
Accuracy<-((35388+1493)/(35388+592+2526+1493))*100
Accuracy
#Accuracy is 92.2 % on imbalanced data
NB_predictions<-as.double(NB_predictions)
pred<-prediction(NB_predictions,testset$target)
perf<-ROCR::performance(pred,"tpr","fpr")
plot(perf,colorize=TRUE)
accuracy.meas(NB_predictions,testset$target)
#performance of Naive Bayes on Imbalanced data is very poor
#Naive Bayes on SMOTE synthetic Data
NB_model <- naiveBayes(target ~ ., data = train_rose)
NB_predictions<-predict(NB_model,test_rose[,2:201],type = "class")
confMatrix_NB<-table(observed=test_rose[,1],predicted=NB_predictions)
confMatrix_NB
Accuracy<-((15289+15236)/(15289+4796+4678+15236))*100
Accuracy
#Accuracy on SMOTE synthetic sample is 76.3141
NB_predictions<-as.double(NB_predictions)
pred<-prediction(NB_predictions,test_rose$target)
perf<-ROCR::performance(pred,"tpr","fpr")
plot(perf,colorize=TRUE)
accuracy.meas(NB_predictions,testset$target)
#accuracy on SMOTE sample sample is a lot better than that of NB on imbalanced data
#Based On Accuracy Naive Bayes is working Best .Hence choosing Naive Bayes for predicting Target class of Test.CSV
test<-read.csv("test.csv",header=T)
ID_code<-subset(test,select = ID_code)
test<-subset(test,select=-c(ID_code))
str(test)
dim(test)
View(test)
#Prediction using Naive Bayes Model
NB_predictions_test<-predict(NB_model,test[,1:200],type="class")
NB_predictions_test<-as.data.frame(NB_predictions_test)
#column bind target results with ID_code
ID_code<-cbind(ID_code,NB_predictions_test)
names(ID_code)[2] = "Target_value"
ID_code
write.csv(ID_code,"Prediction_R.csv",row.names=F)
|
87c35fdf26d0dc7c8e144b42a084bab878c088f1
|
f24ec4dcd2ce1e35b35a17b1b22994a3c7677ff2
|
/man/addcds.Rd
|
25a685f14cb9c062367768590f0f8d21f6aa27c5
|
[] |
no_license
|
fiksdala/fiksdal
|
2fbdd4256091b9cca4d57107c214aa8c30c762f4
|
873fab35441560664a26869113ff420165f570d5
|
refs/heads/master
| 2022-01-10T17:49:07.608249
| 2022-01-07T00:00:16
| 2022-01-07T00:00:16
| 118,020,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 539
|
rd
|
addcds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addcds.R
\name{addcds}
\alias{addcds}
\title{addcds}
\usage{
addcds(df, influence.object, ID)
}
\arguments{
\item{df}{dataframe}
\item{influence.object}{An object returned from influence.ME's influence()}
\item{ID}{ID name of df}
}
\description{
This function returns a dataframe with cook's distance stats obtained through
influence.ME's influence() function.
}
\examples{
addcds()
}
\keyword{cook's}
\keyword{d,}
\keyword{influence,}
\keyword{influence.ME}
|
efcf37fad6b06ebb0f2d08e70308caa15209e186
|
0d701209e55f554ad3bf2250e83ef68297a98aa4
|
/VeraCode.R
|
f2c751caea5403c4c65dfc1c46ce44f09464189c
|
[] |
no_license
|
ClaudiaVeraArias/IBC_Exercise_08
|
be06d0cd319490d748886e433f3077d6368ec607
|
7ed4ad8ddd9793c5fec81ae2aa438ac010480a19
|
refs/heads/master
| 2020-09-11T09:06:51.479617
| 2019-11-22T00:23:12
| 2019-11-22T00:23:12
| 222,015,615
| 0
| 0
| null | 2019-11-15T22:48:54
| 2019-11-15T22:48:54
| null |
UTF-8
|
R
| false
| false
| 2,961
|
r
|
VeraCode.R
|
# EXERCISE 8
# select the work directory
# instal the packages ggplot2 and select it
setwd('/Users/claudiaveraarias/Documents/ND_Classes/Fall_Semester_2019/Biocomputing/R/W11_BC/Tutorial/')
install.packages("ggplot2")
library("ggplot2")
#1) Using the score-by-score information from this game summarized in
#“UWvMSU_1-22-13.txt” generate a graph
# read the file
UWvsMSU <- read.delim("/Users/claudiaveraarias/Documents/ND_Classes/Fall_Semester_2019/Biocomputing/R/W11_BC/Tutorial/IBC_Exercise_08-master/UWvMSU_1-22-13.txt", header = TRUE, sep = "\t", dec = ".")
UW_MSU <- matrix(data = NA, nrow = 51, ncol = 3)
UW_MSU[1,]=0
# Loop to create a new table with the score sum
for(i in 1:nrow(UWvsMSU)){
if(UWvsMSU$team[i] == "UW"){
UW_MSU[i+1,2] <- UW_MSU[i, 2] + UWvsMSU$score[i]
UW_MSU[i+1,3] <- UW_MSU[i, 3]
} else {
UW_MSU[i+1,3] <- UW_MSU[i, 3] + UWvsMSU$score[i]
UW_MSU[i+1,2] <- UW_MSU[i, 2]
}
}
# Add a extra row in the original file "UWvsMSU" to have the same
# length of the new table "UW_MSU
UWvsMSU2 <- rbind(c(NA,NA,NA), UWvsMSU)
UWvsMSU2[is.na(UWvsMSU2)] <- 0
UWscore <- UW_MSU[,2]
MSUscore <- UW_MSU[,3]
Time <- UWvsMSU2[,1]
# Plot the graph
ggplot() + geom_line(aes(x=Time, y=UWscore), color="green") +
geom_line(aes(x=Time, y=MSUscore), color="blue") +
labs( y = "Score", x = "Time", title = "UW vs MSU")
#2) Write a game called “guess my number”. The computer will generate a
# random number between 1 and 100. The user types in a number and the computer
# replies “lower” if the random number is lower than the guess, “higher” if the
# random number is higher, and “correct!” if the guess is correct. The player can
# continue guessing up to 10 times.
## Guess my numbers
# Initial parameters
x = 0
gotRight = 0
failed = 0
#Initial lambda for our random var
correct = sample(1:100, 1, replace = F)
initial = correct
# how many guesses should we allow per number
maxGuesses = 10
while(x != Inf) {
correct = rpois(1,correct) +1
cat("I am thinking of a number between 1 and 100. What is it? (type Inf to quit)n")
# solicit input from the user
x = scan(n=1) # just one item is this vector
if(x == Inf) {
cat("The correct answer was", correct, "n")
cat("You got", gotRight, "right and failed", failed, "times. Maximum allowed guesses was", maxGuesses, "and initial lambda was", initial, ". Goodbye.n")
break
}
for (i in 1:maxGuesses) {
if(x == correct) {
print("Correct")
gotRight = gotRight + 1
break
} else {
if(i == maxGuesses) {
cat("You ran out of guesses. I will pick a new random number based on the last one.n")
failed = failed + 1
} else {
if(x < correct) {
cat("Higher.n")
} else {
cat("Lower.n")
}
x = scan(n=1)
}
}
}
}
|
bc7d4f40de81792ad27a2a0cb83516f38e0db609
|
c8decce1d7dce60b3dcd9982c9b48bbaa632bcca
|
/RQA1.R
|
878ebb8ced6f1c0692bb961c26a71de80aacde1f
|
[] |
no_license
|
skylight0306/R-Design-Project
|
188a2e13cdeed3af0211639dac5965a1090abdc0
|
ed9e32796ceac72f84ccaeb245424a1bd952c9a0
|
refs/heads/master
| 2023-02-10T20:32:07.121558
| 2021-01-11T16:39:14
| 2021-01-11T16:39:14
| 265,524,105
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,418
|
r
|
RQA1.R
|
################################
rm(list=ls())
#
# ============================================================================== #
# Q&A 1.
# Modify the following function to produce the same out as shown below.
HanoiTower <- function(n,A,B,C) {
timee <<-0
hanoi(n,A,B,C)
cat("It takes ", timee, " steps in total.\n")
}
hanoi <- function(n,A,B,C){
if(n == 1) {
timee <<- timee + 1
cat( timee, "Move a sheet from", A, "to", C, "\n")
}
else {
hanoi(n-1, A, C, B);
hanoi(1, A, B, C);
hanoi(n-1, B, A, C);
}
}
HanoiTower(3,'A','B','C')
HanoiTower(4,'A','B','C')
HanoiTower(5,'A','B','C')
# Goal: Write a hanoi function to (1) number each line as 1, 2, 3, ...
# (2) output the total number of steps at the end.
# (3) can be used for different n's again and again
# a sample output:
# > (2,'A', 'B', 'C')
# 1 Move a sheet from A to B
# 2 Move a sheet from A to C
# 3 Move a sheet from B to C
# It takes 3 steps in total.
#
# > hanoi(3, "A", "B", "C")
# 1 Move sheet A to C
# 2 Move sheet A to B
# 3 Move sheet C to B
# 4 Move sheet A to C
# 5 Move sheet B to A
# 6 Move sheet B to C
# 7 Move sheet A to C
# It takes 7 steps in total.
#
# ============================================================================== #
################################
|
ff94cf6ecfc6f7cd6e400b6b6cd8b607fdde4c99
|
c6f1c615abb4f9c8a9a3ebe17f14a7c67d01d0bd
|
/04-data-tables-and-the-tidyverse/reshaping-to-wide-with-tidyr.R
|
be6e949923b08970c8f348d11364e95c1a8016ce
|
[] |
no_license
|
melff/dataman-r
|
05b903ace706d41ef91e7a113adcedc92c0be879
|
4e919fd9be46acc243f702889fdb88ed8c4429d7
|
refs/heads/main
| 2023-06-23T03:24:28.475559
| 2021-07-15T19:53:32
| 2021-07-15T19:53:32
| 324,203,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,246
|
r
|
reshaping-to-wide-with-tidyr.R
|
#' # Reshaping data to wide format with *tidyr*
#' The following makes use of the packages *tidyr* and *readr*. You may need to
#' install them from [CRAN](https://cran.r-project.org) using the code
#' `install.packages(c("tidyr","readr"))` if you want to run this on your computer. (The packages are already installed
#' on the notebook container, however.)
substr(readLines("inequality-oecd-downloaded.csv",n=5),
start=1,stop=40)
library(readr)
inequality.oecd.dld <- read_csv("inequality-oecd-downloaded.csv")
inequality.oecd.dld
library(tidyr)
inequality.oecd.dld %>% spread(key="SUBJECT",value="Value") ->
inequality.oecd
inequality.oecd[-c(2,4,6)]
library(dplyr)
inequality.oecd.sub <- select(inequality.oecd.dld,
LOCATION,SUBJECT,TIME,Value)
inequality.oecd.sub
inequality.oecd.sub %>% spread(key=SUBJECT,
value=Value) -> inequality.oecd
inequality.oecd
inequality.oecd.dld %>% pivot_wider(names_from=SUBJECT,
values_from=Value,
id_cols=c(LOCATION,TIME)) ->
inequality.oecd
inequality.oecd
|
cd5d522f4733c55ea7b1b8f723850890dba96ff7
|
f97c0ca971a7696cc7524d86019478adec9b84f0
|
/exploratoryDataAnalysis/exploringNumericalData.R
|
62c99fd79db568483689661b8a3b9539e5405a36
|
[] |
no_license
|
muhammadali229/r_data_analyst
|
e20d7a6f1dac6459ea0bd3b223d987b1dcdd1396
|
464bd36e5746036cb3f60b76cba88adb543fe5c4
|
refs/heads/main
| 2023-08-07T00:22:51.369649
| 2021-09-16T08:14:04
| 2021-09-16T08:14:04
| 377,105,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,702
|
r
|
exploringNumericalData.R
|
library(readr)
library(dplyr)
library(ggplot2)
# Load comics dataset
cars <- read_csv("D:/R_data_analyst/exploratoryDataAnalysis/cars04.csv")
# Learn data structure
str(cars)
# Create faceted histogram
ggplot(cars, aes(x = city_mpg)) +
geom_histogram() +
facet_wrap(~ suv)
##################################################
uniqe_no_cylinders <- unique(cars$ncyl)
print(uniqe_no_cylinders)
# Filter cars with 4, 6, 8 cylinders
common_cyl <- filter(cars, ncyl %in% c(4, 6, 8))
# Create box plots of city mpg by ncyl
ggplot(common_cyl, aes(x = as.factor(ncyl), y = city_mpg)) +
geom_boxplot()
# Create overlaid density plots for same data
ggplot(common_cyl, aes(x = city_mpg, fill = as.factor(ncyl))) +
geom_density(alpha = .3)
##################################################
# Create hist of horsepwr
cars %>%
ggplot(aes(horsepwr)) +
geom_histogram() +
ggtitle("Across all cars")
# Create hist of horsepwr for affordable cars
cars %>%
filter(msrp < 25000) %>%
ggplot(aes(horsepwr)) +
geom_histogram() +
xlim(c(90, 550)) +
ggtitle("Only cars less than 25000")
##################################################
# Create hist of horsepwr with binwidth of 3
cars %>%
ggplot(aes(horsepwr)) +
geom_histogram(binwidth = 3) +
ggtitle("Distribution of horsepower (Binwidth = 3)")
# Create hist of horsepwr with binwidth of 30
cars %>%
ggplot(aes(horsepwr)) +
geom_histogram(binwidth = 30) +
ggtitle("Distribution of horsepower (Binwidth = 30)")
# Create hist of horsepwr with binwidth of 60
cars %>%
ggplot(aes(horsepwr)) +
geom_histogram(binwidth = 60) +
ggtitle("Distribution of horsepower (Binwidth = 60)")
##################################################
# Construct box plot of msrp
cars %>%
ggplot(aes(x = 1, y = msrp)) +
geom_boxplot()
# Exclude outliers from data
cars_no_out <- cars %>%
filter(msrp < 100000)
# Construct box plot of msrp using the reduced dataset
cars_no_out %>%
ggplot(aes(x = 1, y = msrp)) +
geom_boxplot()
##################################################
# Create plot of city_mpg -> density
cars %>%
ggplot(aes(city_mpg)) +
geom_density()
# Create plot of city_mpg -> boxplot
cars %>%
ggplot(aes(x = 1, y = city_mpg)) +
geom_boxplot()
# Create plot of city_mpg -> density
cars %>%
ggplot(aes(width)) +
geom_density()
# Create plot of city_mpg -> boxplot
cars %>%
ggplot(aes(x = 1, y = width)) +
geom_boxplot()
##################################################
# Facet hists using hwy mileage and ncyl
common_cyl %>%
ggplot(aes(x = hwy_mpg)) +
geom_histogram() +
facet_grid(ncyl ~ suv, labeller = label_both) +
ggtitle("Rows: ncyl, Columns: suv")
##################################################
|
90e5e8030e050dc2cb06d129027e97e892f1a1b9
|
118cf005c146919c38e22e88f24b903353df30f0
|
/Datahantering, r.R
|
14e35f16524931e516b0c9e0c195a98dfa66798c
|
[] |
no_license
|
Tondar7/Uppsats
|
964188c2332eaa17e23d7ee8e9382c7f4140efc7
|
9b4912475a93715a178af87bd3c3179a2cd7e821
|
refs/heads/master
| 2022-12-28T23:10:10.713017
| 2020-09-22T01:00:56
| 2020-09-22T01:00:56
| 297,378,542
| 0
| 0
| null | null | null | null |
ISO-8859-15
|
R
| false
| false
| 3,399
|
r
|
Datahantering, r.R
|
### Datahantering NEK uppsats
#install.packages("readxl")
library("readxl")
#install.packages("tidyverse")
library("tidyverse")
#library("selectr")
#library("xml2")
#library("rvest")
library("stringr")
#library("jsonlite")
data_kommun <- as.data.frame(read_excel("raw_data.xlsx"))
treatment_data <- as.data.frame(read_excel("Treatmentdata - for matching.xlsx"))
brf_t <- c()
brf_c <- c()
empty_df_treatment <- data_kommun[FALSE,]
empty_df_control <- data_kommun[FALSE,]
#-------------------------------------------------------
#sort by treatment and control
for (i in 1:nrow(data_kommun)){
if (data_kommun$adress[i] %in% treatment_data$Adress){
empty_df_treatment <- rbind(empty_df_treatment, data_kommun[i,])
#brf_t <-c(brf_t, treatment_data[i, 1])
}
else {
empty_df_control <- rbind(empty_df_control, data_kommun[i,])
}
}
View(empty_df_treatment)
View(empty_df_control)
## Adding BRF-vector to DF:
df_treatment <- inner_join(empty_df_treatment, treatment_data, by = c("adress" = "Adress"))
df_control <- empty_df_control
View(df_treatment)
View(df_control)
#---------------------------------------------------
#Separate into columns for grouping
split_treatment <- df_treatment %>%
separate(col = kvmpris, into = c("kvmpris", " "), sep = "k", extra = "merge") %>%
separate(col = adress, into = c("gatunamn", "gatunummer", "1", "2", "3"), sep = " ") %>%
separate(col = datum, into= c("dag","månad","år"))
View(split_treatment)
split_control <- df_control %>%
separate(col = kvmpris, into = c("kvmpris", " "), sep = "k", extra = "merge") %>%
separate(col = adress, into = c("gatunamn", "gatunummer", "1", "2", "3"), sep = " ") %>%
separate(col = datum, into= c("dag","månad","år"))
#View(split_control)
#rio::export(split_treatment, "pregrouped_treatment_data.xlsx")
#rio::export(split_control, "pregrouped_control_data.xlsx")
#-----------------------------------------------------
# Group by street and year and find average kvm price per year per street
pregrouped_treatment_data <- read_excel("pregrouped_treatment_data.xlsx")
pregrouped_control_data <- read_excel("pregrouped_control_data.xlsx")
grouped_by_streetnyear_treatment <- pregrouped_treatment_data %>%
group_by(BRF, år) %>%
summarize(snittkvmpris = mean(kvmpris, rm.na = TRUE))
warnings()
grouped_by_streetnyear_control <- pregrouped_control_data %>%
group_by(gatunamn, år) %>%
summarize(snittkvmpris = mean(kvmpris, rm.na = TRUE))
warnings()
#View(grouped_by_streetnyear_treatment)
#View(grouped_by_streetnyear_control)
#rio::export(grouped_by_streetnyear_treatment, "grouped_treatment_data.xlsx")
#rio::export(grouped_by_streetnyear_control, "grouped_control_data.xlsx")
forgraphing1 <- read_excel("grouped_treatment_data_aktuell.xlsx")
forgraphing_t <- na.omit(forgraphing1)
average_peryear_treatment <- forgraphing_ %>%
group_by(år) %>%
summarize(snittperyear = mean(snittkvmpris, rm.na = T))
warnings()
View(average_peryear_treatment)
forgraphing2 <- read_excel("grouped_control_data_aktuell.xlsx")
forgraphing_c <- na.omit(forgraphing2)
average_peryear_control <- forgraphing_c %>%
group_by(år) %>%
summarize(snittperyear = mean(snittkvmpris, rm.na = T))
warnings()
View(average_peryear_control)
|
3e8be864994b764c5a4b7ae475809501d325d906
|
bac12b8b3542bc01b0616de3e70802e2e7e9cc46
|
/ExData_Plotting1.R
|
91c0e42c168fa13b1cdf94b29d5d83ebf2f98ddf
|
[] |
no_license
|
Viditya/ExData_Plotting1
|
c005a3554666191c74d7478b8f7ce88e7f6a275d
|
4f56e2a2d52fd820f55794d50fd9f3c7fef469ab
|
refs/heads/master
| 2021-05-01T11:03:27.959566
| 2018-02-11T18:29:15
| 2018-02-11T18:29:15
| 121,111,975
| 0
| 0
| null | 2018-02-11T10:27:33
| 2018-02-11T10:27:32
| null |
UTF-8
|
R
| false
| false
| 8,917
|
r
|
ExData_Plotting1.R
|
#If the device is on at the starting, switch it off(not required now,as written the same in exit part)
#dev.off()
#1.Download the file using the URL and store it in a zip file in the directory
#download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", dest= "Household_Power_Consumption.zip")
#2. Unzip the downloaded file
#unzip("Household_Power_Consumption.zip")
#3.Parse the file and read it as csv, limit the number of rows as per the requirement
file<-read.csv("Household_Power_Consumption.txt",header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
# Can be used to check the info of the file read above, and commented for now
#head(file)
#4. A collection of the names of the files that will have the code for all the plots
RFileNames<- c("Plot1.R","Plot2.R","Plot3.R","Plot4.R")
#4. A collection of the names of the png files that will have all the plots
PNGFileNames<- c("Plot1.png","Plot2.png","Plot3.png","Plot4.png")
#5. Take the range of the data within the given dates
rangeofData<- subset(file, Date %in% c("1/2/2007","2/2/2007"))
#6. Format the Date column usinf as.Date in the desired format
rangeofData$Date <- as.Date(rangeofData$Date, format="%d/%m/%Y")
#7. create a variable with Data and Time columns together
datetime <- paste(as.Date(rangeofData$Date), rangeofData$Time)
#8. Convert the dates according to POSXct notation
rangeofData$Datetime <- as.POSIXct(datetime)
#9. A function that will take the file name as input and will call the corresponding function for that file
DrawAllPlots<- function(FileName)
{
#(i) Check if the file is the first in the list provided above
if(FileName == RFileNames[1])
{
#a. Call the Plot function for the corresponding file
DrawPlot1(FileName)
}
#(ii) Check if the file is the second in the list provided above
else if(FileName == RFileNames[2])
{
#b. Call the Plot function for the corresponding file
DrawPlot2(FileName)
}
#(iii) Check if the file is the third in the list provided above
else if(FileName == RFileNames[3])
{
#c. Call the Plot function for the corresponding file
DrawPlot3(FileName)
}
#(iv) Check if the file is the fourth in the list provided above
else if(FileName == RFileNames[4])
{
#d. Call the Plot function for the corresponding file
DrawPlot4(FileName)
}
#(v) If we try to call the DrawAllPlots function with wrong file name
else
{
#e. Give the error to the user
stop("Please provide a valid file name")
}
}
#10. Funtion to draw plot for File plot1.R
DrawPlot1<- function(fileName)
{
# Check if the file exists
if(!file.exists(fileName))
{
# Create the file if it does not exist
file.create(fileName)
}
#11. Connection object for the file
fileConn<-file(fileName)
# Variable to plot the graph according to the question
plot1<- deparse(substitute(hist(rangeofData$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")))
# Write the above variable in the file plot1.R
writeLines(plot1, fileConn)
# Close the connection
close(fileConn)
# Read the file from the memory using the source function
source(fileName)
# Plot the graph using the plot function written in the file to the PNG file
dev.copy(png, PNGFileNames[1],width=480, height=480)
# Turn off the device
dev.off()
}
#12. Funtion to draw plot for File plot2.R
DrawPlot2<- function(fileName)
{
# Check if the file exists
if(!file.exists(fileName))
{
# Create the file if it does not exist
file.create(fileName)
}
#13. Connection object for the file
fileConn<-file(fileName)
# Variable to plot the graph according to the question
plot2<- deparse(substitute(with(rangeofData, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
})))
# Write the above variable in the file plot2.R
writeLines(plot2, fileConn)
# Close the connection
close(fileConn)
# Read the file from the memory using the source function
source(fileName)
# Plot the graph using the plot function written in the file to the PNG file
dev.copy(png, PNGFileNames[2],width=480, height=480)
# Turn off the device
dev.off()
}
#14. Funtion to draw plot for File plot3.R
DrawPlot3<- function(fileName)
{
# Check if the file exists
if(!file.exists(fileName))
{
# Create the file if it does not exist
file.create(fileName)
}
#15. Connection object for the file
fileConn<-file(fileName)
# Variable to plot the graph according to the question
plot3<- deparse(substitute(with(rangeofData, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})))
# Legends for the plot
plot3legend <- deparse(substitute(legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))))
#unused code, so commented
# plot3<- paste(plot3,plot3legend, sep=" ")
# Write the above variable in the file plot3.R
writeLines(plot3,fileConn)
#Concatenate the legend variable with the file, starting on the new line
cat(plot3legend, file= fileName, append=TRUE,sep="\n")
#Close the connection
close(fileConn)
# Read the file from the memory using the source function
source(fileName)
# Plot the graph using the plot function written in the file to the PNG file
dev.copy(png, PNGFileNames[3],width=480, height=480)
# Turn off the device
dev.off()
}
#16. Funtion to draw plot for File plot4.R
DrawPlot4<- function(fileName)
{
# Check if the file exists
if(!file.exists(fileName))
{
# Create the file if it does not exist
file.create(fileName)
}
#17. Connection object for the file
fileConn<-file(fileName)
# Variable to set the location parameters for the plot
plot4par<- deparse(substitute(par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))))
# Variable to plot the graph according to the question
plot4<- deparse(substitute(with(rangeofData, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})))
# Write the par in the file plot4.R
writeLines(plot4par, fileConn)
#Concatenate the legend variable with the file, starting on the new line
cat(plot4, file= fileName, append=TRUE,sep="\n")
#Close the connection
close(fileConn)
# Read the file from the memory using the source function
source(fileName)
# Plot the graph using the plot function written in the file to the PNG file
dev.copy(png, PNGFileNames[4],width=480, height=480)
# Turn off the device
dev.off()
}
# Loop through the collection of file names
for(i in RFileNames)
{
# Call the DrawAllPlots function for each file name in the collection
DrawAllPlots(i)
}
# Make the device off on exit
on.exit(dev.off())
|
dc1b9ad98d476904a16fe4cf258538654ff6f325
|
388d7a62bbbd144f243438f9e6a5a456eb2cce3c
|
/man/leastCostMap.Rd
|
66bba8ee7a869f42f1f4f3c698d783455211533c
|
[] |
no_license
|
aspillaga/fishtrack3d
|
64c7dcb2a97a833ef830d845e8bfbc3aaf387827
|
2be695e0f88d97e095f074acd17240cb8878dbbc
|
refs/heads/master
| 2022-01-18T10:50:53.776454
| 2019-05-23T15:09:18
| 2019-05-23T15:09:18
| 118,634,135
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,054
|
rd
|
leastCostMap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/syntPath.R
\name{leastCostMap}
\alias{leastCostMap}
\title{Generate a least cost transition matrix from a topographical raster}
\usage{
leastCostMap(topo, min.depth = 0, max.depth = NULL)
}
\arguments{
\item{topo}{raster dataset (\code{RasterLayer} object) with the topographic
information of the study area (bathymetry or elevation).}
\item{min.depth, max.depth}{Minimum and maximum depths that the path is
allowed to cross. If not provided, only areas that are marked with
\code{NA} in the topographic raster will be avoided.}
}
\value{
A \code{`TransitionLayer`} object (\code{gdistance} package).
}
\description{
This function generates a \code{`TransitionLayer`} (\code{gdistance}
package) that excludes the cells that are outside a depth range, which is
used later to calculate the shortest path between two points that avoids
the excluded cells.
}
\examples{
\dontrun{
library(raster)
dist.cost <- leastCostMap(bathymetry, min.depth = 30)
plot(raster(dist.cost))
}
}
|
94350aed6f866c4fdb9aaf1aae7b12556ad21381
|
97bb2f60ac0b5da9f2f0d4e6a4c2b35b857227cc
|
/packrat/src/packrat/packrat/R/github.R
|
49da66098e8be48487daec6c46c57078d9a3ea80
|
[
"MIT"
] |
permissive
|
rachjone/iapsr
|
153158167a6c2c12bee5a27fd6de5de85fa19f24
|
b7f2c2e8b83a67c2a39d380da830337f9b11ef2e
|
refs/heads/master
| 2022-12-02T02:39:46.956892
| 2020-07-27T21:13:12
| 2020-07-27T21:13:12
| 282,985,045
| 0
| 0
|
NOASSERTION
| 2020-07-27T18:39:50
| 2020-07-27T18:39:49
| null |
UTF-8
|
R
| false
| false
| 762
|
r
|
github.R
|
isGitHubURL <- function(url) {
is.string(url) && grepl("^http(?:s)?://(?:www|api).github.com", url, perl = TRUE)
}
canUseGitHubDownloader <- function() {
all(packageVersionInstalled(devtools = "1.9.1", httr = "1.0.0"))
}
githubDownload <- function(url, destfile, ...) {
onError(1, {
github_pat <- yoink("devtools", "github_pat")
authenticate <- yoink("httr", "authenticate")
GET <- yoink("httr", "GET")
content <- yoink("httr", "content")
token <- github_pat(quiet = TRUE)
auth <- if (!is.null(token))
authenticate(token, "x-oauth-basic", "basic")
else
list()
request <- GET(url, auth)
writeBin(content(request, "raw"), destfile)
if (file.exists(destfile)) 0 else 1
})
}
|
a7e0694c7bfc9c9838705bf63494c56a93fc508c
|
a21350122ae4dbbb6d852ee14a9f4407338393dd
|
/pgm/ch3.r
|
5205e29ece99bb98cd03f2e5f446fe1f0bc4a76b
|
[] |
no_license
|
DataEconomyLab/forecast
|
d40516f5837732c2286163d21993ac64dc44c591
|
197aef8804ce94b964d57f40e32e922a11fa31dc
|
refs/heads/main
| 2023-05-02T08:22:56.168067
| 2021-05-13T23:51:09
| 2021-05-13T23:51:09
| 350,149,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,137
|
r
|
ch3.r
|
# Figure 3-1
setwd("c:/work/data")
gdp <- read.csv("gdpq.csv", header=TRUE)
gdp_o <- ts(gdp[,1]/1000, start=1970, frequency=4)
gdp_sa <- ts(gdp[,2]/1000, start=1970, frequency=4)
gdp_gr <- ts((gdp_sa - lag(gdp_sa, -1)) / lag(gdp_sa, -1)*100, start=c(1970, 2), frequency=4)
plot(gdp_gr, ylab="경제성장률(전기대비)", xlab="연도", col="steelblue", main="")
abline(h=0, lty=2, col="grey")
# Figure 3-2
hist(gdp_gr, breaks=12, col="lightblue", border="black", freq=FALSE, main="", xlab="", xlim=c(-10,10))
lines(density(gdp_gr))
shapiro.test(gdp_gr)
# Figure 3-3
library(quantmod)
# Yahoo! Finance로부터 종합주가지수 종가 데이터 가져오기
kospi <- getSymbols("^KS11", auto.assign = FALSE)
kospi_r <- dailyReturn(na.omit(kospi))
# 그래프
hist(kospi_r, breaks = 30, col="lightblue", border="black", freq=FALSE, main="", xlab="", xlim=c(-0.12, 0.12), ylim=c(0,50))
lines(density(kospi_r))
shapiro.test(kospi_r)
# Figure 3-4
set.seed(1)
nn = length(gdp_o)
wn = ts(rnorm(nn), start=1970, frequency=4)
par(mfrow=c(2,1))
plot(wn, main="", xlab="", ylab="", col="steelblue")
abline(h=0, lty=2, col="gray")
acf(wn, main="", col="steelblue")
# Figure 3-5
nn = length(gdp_sa)
sin = ts(sin(1:nn/nn*12*pi), start=1970, frequency=4)
par(mfrow=c(2,1))
plot(sin, main="", ylab="", col="steelblue")
abline(h=0, lty=2, col="gray")
acf(sin, main="", col="steelbook")
# Figure 3-6 - 8
plot(gdp_o, main="", xlab="", ylab="", col="steelblue")
acf(gdp_o, main="", col="steelblue")
plot(diff(log(gdp_o)), main="", xlab="", ylab="", col="steelblue")
acf(diff(log(gdp_o)), main="", col="steelblue")
plot(diff(log(gdp_o),4), main="", xlab="", ylab="", col="steelblue")
acf(diff(log(gdp_o),4), main="", col="steelblue")
# 표
Box.test(wn, lag=8, type="Ljung")
Box.test(sin, lag=8, type="Ljung")
Box.test(gdp_o, lag=8, type="Ljung")
Box.test(diff(log(gdp_o)), lag=8, type="Ljung")
Box.test(diff(log(gdp_o),4), lag=8, type="Ljung")
par(mfrow=c(2,1))
plot(wn, main="", xlab="", ylab="", col="steelblue")
abline(h=0, lty=2, col="gray")
pacf(wn, main="", col="steelblue")
plot(sin, main="", xlab="", ylab="", col="steelblue")
abline(h=0, lty=2, col="gray")
pacf(sin, main="", col="steelblue")
plot(gdp_o, main="", xlab="", ylab="", col="steelblue")
pacf(gdp_o, main="", col="steelblue")
plot(diff(log(gdp_o)), main="", xlab="", ylab="", col="steelblue")
pacf(diff(log(gdp_o)), main="", col="steelblue")
plot(diff(log(gdp_o),4), main="", xlab="", ylab="", col="steelblue")
pacf(diff(log(gdp_o),4), main="", col="steelblue")
par(mfrow=c(2,1))
plot(wn, main="", xlab="", ylab="", col="steelblue")
abline(h=0, lty=2, col="gray")
aa = spectrum(wn, spans = c(3,3), main="", col="steelblue")
plot(1:80/40, aa$spec, type="1", ylim=c(0,10))
plot(sin, main="", xlab="", ylab="", col="steelblue")
abline(h=0, lty=2, col="gray")
pacf(sin, main="", col="steelblue")
plot(gdp_o, main="", xlab="", ylab="", col="steelblue")
abline(h=0, lty=2, col="gray")
plot(diff(log(gdp_o)), main="", xlab="", ylab="", col="steelblue")
pacf(diff(log(gdp_o)), main="", col="steelblue")
plot(diff(log(gdp_o), 4), main="", xlab="", ylab="", col="steelblue")
pacf(diff(log(gdp_o), 4), main="", col="steelblue")
sin1 = ts(sin(1:nn/nn*12*pi), start=1970, frequency=4)
sin2 = ts(sin(1:nn/nn*36*pi), start=1970, frequency=4)
plot(cbind(sin1, sin2), main="", xlab="", ylab="", col="steelblue")
spectrum(cbind(sin1, sin2), spans=c(3,3), main="", col="steelblue")
spectrum(gdp_o, spans=c(3,3), main="", col="steelblue")
plot(sin1+sin2, main="", xlab="", ylab="", col="steelblue")
spectrum(sin1+sin2, spans=c(3,3), main="", col="steelblue")
plot(wn, main="", xlab="", ylab="", col="steelblue")
abline(h=0, lty=2, col="gray")
spectrum(wn, spans=c(3,3), main="", col=1:2)
plot(gdp_o, main="", xlab="", ylab="")
lines(gdp_sa, col=2)
spectrum(cbind(gdp_o, gdp_sa), spans=c(3,3), main="", col=1:2)
dlgdp1 = diff(log(gdp_o))
dlgdp2 = diff(log(gdp_o), 4)
dlgdp = cbind(dlgdp1, dlgdp2)
plot(dlgdp1, main="", xlab="", ylab="", col="steelblue")
lines(dlgdp2, col=2)
spectrum(na.omit(cbind(dlgdp1, dlgdp2)), spans=c(3,3), main="", col=c("steelblue"), "red")
|
160d75d331f28b19f7cf667da4d84bb93974dccb
|
cb7c00e1dcc28b2b04e13e6248a2fce1fb6d61bc
|
/R/fit-lm.R
|
36674bd503e4dd4204569719b8e0a13d92d442d4
|
[
"MIT"
] |
permissive
|
DavisVaughan/tidyversity
|
bd5fe9f3991999d39ff891077b078f36a09f4f44
|
589379d0126a5d1b40c40fceaca932db9cd04633
|
refs/heads/master
| 2020-03-15T12:44:43.800276
| 2018-05-04T16:03:16
| 2018-05-04T16:03:16
| 132,151,121
| 0
| 0
| null | 2018-05-04T14:32:31
| 2018-05-04T14:32:31
| null |
UTF-8
|
R
| false
| false
| 1,064
|
r
|
fit-lm.R
|
ols_fit <- function(m) {
s <- summary.lm(m)
## f and its p value
f <- s$fstatistic[1]
fp <- do.call("pf", as.list(c(unname(s$fstatistic), lower.tail = FALSE)))
## root mean square error
rmse <- rmse(m)
## deviance
#ll <- -2 * logLik(m)
#lln <- as.integer(attr(ll, "df")
# AIC/BIC
aic <- AIC(m)
bic <- BIC(m)
## stat name and estimate
fit_statistic <- c("F", "R^2", "Adj R^2", "RMSE", "AIC", "BIC")
estimate <- c(f, s$r.squared, s$adj.r.squared, rmse, aic, bic)
## degrees of freedom
df <- rep(NA_integer_, length(fit_statistic))
df[match(fit_statistic[c(1)], fit_statistic)] <- c(as.integer(s$fstatistic[2]))
n <- nobs(m)
## p values
p.value <- rep(NA_real_, length(fit_statistic))
p.value[match(c("F"), fit_statistic)] <- fp
## stars
stars <- make_stars(p.value)
## return data frame
tibble::data_frame(fit_stat = fit_statistic, n, df,
estimate, p.value, stars)
}
rmse <- function(m) {
x <- unname(m$residuals)
n <- nobs(m)
p <- length(variable.names(m))
x <- (1 / (n - p)) * sum(x^2)
sqrt(x)
}
|
a75f3130203e8927dc473394a7eafd804d5326eb
|
9eb69d839ca649884d060a677b2a621a5fbf931e
|
/man/OneR.Rd
|
b2cee434d96e7471b4b0274c7962049ed648ac7a
|
[] |
no_license
|
cran/OneR
|
739bda58b0f3f15e4096de79b5348cca09a027f0
|
e4a1e0414d5ca36fff51554a0241cb1bfc46b7fd
|
refs/heads/master
| 2021-01-20T21:00:57.853733
| 2017-05-05T17:20:25
| 2017-05-05T17:20:25
| 61,529,527
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,951
|
rd
|
OneR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OneR_main.R
\name{OneR}
\alias{OneR}
\alias{OneR.formula}
\alias{OneR.data.frame}
\title{One Rule function}
\usage{
OneR(x, ...)
\method{OneR}{formula}(formula, data, ties.method = c("first", "chisq"),
verbose = FALSE, ...)
\method{OneR}{data.frame}(x, ties.method = c("first", "chisq"),
verbose = FALSE, ...)
}
\arguments{
\item{x}{data frame with the last column containing the target variable.}
\item{...}{arguments passed to or from other methods.}
\item{formula}{formula, additionally the argument \code{data} is needed.}
\item{data}{data frame which contains the data, only needed when using the formula interface.}
\item{ties.method}{character string specifying how ties are treated, see 'Details'; can be abbreviated.}
\item{verbose}{if \code{TRUE} prints rank, names and predictive accuracy of the attributes in decreasing order (with \code{ties.method = "first"}).}
}
\value{
Returns an object of class "OneR". Internally this is a list consisting of the function call with the specified arguments, the names of the target and feature variables,
a list of the rules, the number of correctly classified and total instances and the contingency table of the best predictor vs. the target variable.
}
\description{
Builds a model according to the One Rule (OneR) machine learning classification algorithm.
}
\details{
All numerical data is automatically converted into five categorical bins of equal length. Instances with missing values are removed.
This is done by internally calling the default version of \code{\link{bin}} before starting the OneR algorithm.
To finetune this behaviour data preprocessing with the \code{\link{bin}} or \code{\link{optbin}} functions should be performed.
If data contains unused factor levels (e.g. due to subsetting) these are ignored and a warning is given.
When there is more than one attribute with best performance either the first (from left to right) is being chosen (method \code{"first"}) or
the one with the lowest p-value of a chi-squared test (method \code{"chisq"}).
}
\section{Methods (by class)}{
\itemize{
\item \code{formula}: method for formulas.
\item \code{data.frame}: method for data frames.
}}
\examples{
data <- optbin(iris)
model <- OneR(data, verbose = TRUE)
summary(model)
plot(model)
prediction <- predict(model, data)
eval_model(prediction, data)
## The same with the formula interface:
data <- optbin(iris)
model <- OneR(Species ~., data = data, verbose = TRUE)
summary(model)
plot(model)
prediction <- predict(model, data)
eval_model(prediction, data)
}
\references{
\url{https://github.com/vonjd/OneR}
}
\seealso{
\code{\link{bin}}, \code{\link{optbin}}, \code{\link{eval_model}}, \code{\link{maxlevels}}
}
\author{
Holger von Jouanne-Diedrich
}
\keyword{1R}
\keyword{One}
\keyword{OneR}
\keyword{Rule}
|
af6cc83376081e9e3edb51c4bc752239db3a1c1b
|
9ff1c5bb2148e0a9782bf3084817878f95f191d1
|
/scripts/ejercicio_011.r
|
37e8e454d54a2ea6111a8dcc7018b5fea49b4c29
|
[] |
no_license
|
mar71n/cursoR
|
95a1045f89e77cc6406bd834f3e1a5ff5e38a00c
|
fad97fccc114bb5b8cf367952eb64bc397ba6431
|
refs/heads/master
| 2021-01-16T18:30:01.702666
| 2013-07-18T17:19:35
| 2013-07-18T17:19:35
| 31,655,439
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 5,169
|
r
|
ejercicio_011.r
|
# Un trigrama es una sucesión de tres palabras. Los lingüistas computacionales construyen bases de datos de ellos a partir de textos y los usan para,
# por ejemplo, crear motores de traducción automáticos.
#
# En este enlace se ve la evolución temporal del uso de de dos trigramas en los libros que Google tiene escaneados.
# El ejercicio de hoy consiste en la aplicación de las técnicas de procesamiento de texto que hemos aprendido para recolectar trigramas a partir de texto.
# Para eso, a partir de la (muy famosa) frase
frase <- "Y que todo lo escrito en ellos era irrepetible desde siempre y para siempre, porque las estirpes condenadas a cien años de soledad no tenían una segunda oportunidad sobre la tierra."
oraciones <- strsplit(frase,"[[:punct:]]")
oraciones.palabras <- strsplit(oraciones[[1]],"[[:space:]]")
vector.pos <- function(k){
retval <- c(1:3)
while(max(retval) < k){
largo <- length(retval)
retval <- c(retval,retval[(largo-2):largo] + c(1,1,1))
}
return(retval)
}
vector.pos <- function(k){
retval <- c(1:3)
largo <- 3
for(i in c(1:(k-3))){
retval <- c(retval,retval[(largo-2):largo] + c(1,1,1))
largo <- largo + 3
}
return(retval)
}
f <- function(x){
append(x,c(x[length(x)-1],x[length(x)-1]+1,x[length(x)-1]+2))
}
x <- c(1,2,3)
for(i in 1:97) x <- f(x)
# cuales <- vector.pos(100)
cuales <- x
tri_1 <- matrix(oraciones.palabras[[1]][c(cuales)[1:((length(oraciones.palabras[[1]])-2)*3)]],ncol=3,nrow=length(oraciones.palabras[[1]])-2,byrow=T)
tri_2 <- matrix(oraciones.palabras[[2]][c(cuales)[1:((length(oraciones.palabras[[2]])-2)*3)]],ncol=3,nrow=length(oraciones.palabras[[2]])-2,byrow=T)
tris <- function(palabras,i){
largo <- length(palabras[[i]])
matrix(palabras[[i]][c(cuales)[1:((largo-2)*3)]],ncol=3,nrow=largo-2,byrow=T)
}
tris_ <- function(palabras){
largo <- length(palabras)
matrix(palabras[c(cuales)[1:((largo-2)*3)]],ncol=3,nrow=largo-2,byrow=T)
}
tri_1 <- tris(oraciones.palabras,1)
tri_1 <- tris_(oraciones.palabras[[1]])
tri.b <- tris(oraciones.palabras,1)
for (i in c(2:length(oraciones.palabras))){
tri.b <- rbind(tri.b, tris(oraciones.palabras,i ))
}
tris(oraciones.palabras,c(1,2))
oraciones.palabras[[1]]
lapply(oraciones.palabras, tris_)
tri.a <- matrix(oraciones.palabras[[1]][c(cuales)[1:((length(oraciones.palabras[[1]])-2)*3)]] , ncol=3 , nrow=length(oraciones.palabras[[1]])-2 , byrow=T)
for (i in c(2:length(oraciones.palabras))){
tri.a <- rbind(tri.a, matrix(oraciones.palabras[[i]][c(cuales)[1:((length(oraciones.palabras[[i]])-2)*3)]],ncol=3,nrow=length(oraciones.palabras[[i]])-2,byrow=T))
}
# vamos a ver si somos capaces de construir los trigramas (todos) que contiene. De tener éxito, intentaremos hacer lo mismo con un texto algo más complejo,
frase2 <- "Llegó a la conclusión que aquel hijo por quien ella habría dado la vida era, simplemente, un hombre incapacitado para el amor. Una noche, cuando lo tenía en el vientre, lo oyó llorar. Fue un lamento tan definido, que Jose Arcadio Buendía despertó a su lado y se alegró con la idea de que el niño iba a ser ventrílocuo. Otras personas pronosticaron que sería adivino. Ella, en cambio, se estremeció con la certidumbre de que aquel bramido profundo era un primer indicio de la temible cola de chancho. Pero la lucidez de la decrepitud le permitió ver, y así lo repitió muchas veces, que el llanto de los niños en el vientre de la madre no es augurio de ventriloquía ni facultad adivinatoria, sino una señal inequívoca de incapacidad para el amor."
strsplit(frase2,"[[:punct:]]")
frase2 <- gsub(",", "", frase2)
frase2 <- gsub("[.][ ]", ".", frase2)
oraciones <- strsplit(frase2,"[[:punct:]]")
str(oraciones)
oraciones.palabras <- strsplit(oraciones[[1]],"[[:space:]]")
str(oraciones.palabras)
tri.b <- matrix(oraciones.palabras[[1]][c(cuales)[1:((length(oraciones.palabras[[1]])-2)*3)]],ncol=3,nrow=length(oraciones.palabras[[1]])-2,byrow=T)
for (i in c(2:length(oraciones.palabras))){
tri.b <- rbind(tri.b, matrix(oraciones.palabras[[i]][c(cuales)[1:((length(oraciones.palabras[[i]])-2)*3)]],ncol=3,nrow=length(oraciones.palabras[[i]])-2,byrow=T))
}
# La complejidad adicional consiste en que los trigramas tienen que circunscribirse a una única frase, no saltan los puntos.
#
# Finalmente, el premio especial se concederá a quien recopile todos los trigramas del Quijote (frase a frase, sin saltar por encima de los puntos) y nos indique cuáles son los que ocurren más frecuentemente.
quijote <- readLines("../data/pg2000.txt", encoding = "UTF-8")
frase2 <- paste(quijote[37:37490],collapse=" ")
frase2 <- gsub("[,;:-_¿?!¡]", "", frase2)
frase2 <- gsub("[.][ ]", ".", frase2)
oraciones <- strsplit(frase2,"[[:punct:]]")
oraciones.palabras <- strsplit(oraciones[[1]],"[[:space:]]")
a <- matrix(oraciones.palabras[[1]][c(cuales)[1:((length(oraciones.palabras[[1]])-2)*3)]],ncol=3,nrow=length(oraciones.palabras[[1]])-2,byrow=T)
for (i in c(2:length(oraciones.palabras))){
a <- rbind(a, matrix(oraciones.palabras[[i]][c(cuales)[1:((length(oraciones.palabras[[i]])-2)*3)]],ncol=3,nrow=length(oraciones.palabras[[i]])-2,byrow=T))
}
|
66daa1ea0871f325b8b5c7ec2f6dba7e3b09edc8
|
7c3001f7a2cc3d0f3597f8facd1dbd441f9c7bc9
|
/PortBath.R
|
9ea3bc1fcdad2cbae09e5bfcdc3ca706803775fa
|
[] |
no_license
|
gilaum/Running_HS_Track_Guide
|
27fd706683fe369b752afd3d03a67a08b3137c02
|
dc80f5aab6012d36cf1335b052ba0aa26dec8eb2
|
refs/heads/master
| 2023-02-28T03:28:01.772960
| 2021-02-10T01:32:41
| 2021-02-10T01:32:41
| 334,561,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,354
|
r
|
PortBath.R
|
# Tom Alig
# Oct 11, 2019
# Are Bath times or Portage times faster?
#library(rvest)
#library(plyr)
library(tidyverse)
library(lubridate)
library(scales)
library(hms)
library(reshape2)
library(data.table)
library(Hmisc)
library(tools)
source("RunningProjectR/scripts/my_track_scripts.R")
# Took the girls xc data from 2017, 2018, 2019 to do the analysis
# Read in the data
port.bath <- read.csv("Portage Bath Compare.csv")
head(port.bath)
str(port.bath)
port.bath2 <- port.bath
port.bath3 <- port.bath2 %>%
mutate(time = lubridate::ms(Time)) %>%
mutate(minute = time$minute *60) %>%
mutate(second = time$second) %>%
mutate(total.secs = round((minute + second), digits = 2)) %>%
mutate(mins.dec = total.secs/60) %>%
select(-minute, -second) %>%
select(-c(Time, time, total.secs)) %>%
dplyr::rename(Time = mins.dec)
head(port.bath3)
str(port.bath3)
(port.bath3[2,6] - port.bath3[6,6] )*60
# What are median and mean values for all years combined, by Meet
port.bath3 %>%
group_by(Meet) %>%
summarise(Median = median(Time, na.rm = TRUE),
Mean.Race = mean(Time, na.rm = TRUE))
# We want to compare the athletes who ran both Portage and Bath in the same year
# So, we need to remove all those who ran only one of the races in a given year
# Spread the data
data.spread <- port.bath3 %>%
spread(key = Meet, value = Time) %>%
group_by(Athlete, Year) %>%
filter(Bath != "NA" & Portage != "NA") %>%
mutate(Diff = Bath - Portage)
data.spread %>%
View()
# By year, what are the median and mean values?
p1 <- data.spread %>%
group_by(Year) %>%
summarise(BathMed = round(median(Bath, na.rm = TRUE), digits = 2),
PortMed = round(median(Portage, na.rm = TRUE), digits = 2),
BathMean = round(mean(Bath, na.rm = TRUE), digits = 2),
PortMean = round(mean(Portage, na.rm = TRUE), digits = 2)) %>%
mutate(DecDiffMed = BathMed - PortMed) %>%
mutate(SecDiffMedian = DecDiffMed * 60) %>%
mutate(DecDiffMean = BathMean - PortMean) %>%
mutate(SecsDiffMean = DecDiffMean * 60)
p1
sprintf("%02d:%02d",x%/%60,x%%60)
p1 %>%
mutate(newtimeblah = BathMed * 60) %>%
mutate(newtime2 = sprintf("%02g:%02g", newtimeblah%/%60,newtimeblah%%60)) %>%
select(newtime2, everything()) %>%
View()
ggplot(data = p1, aes(x = Year, y = c(PortMed, BathMed))) +
geom_bar(stat = "identity")
p1 %>%
mutate(time2 = ms(PortMed)) %>%
select(time2, everything())
p1 %>%
gather(BathMed, BathMean, PortMed, PortMean, key = "Meet", value = "time") %>%
select(-c(DecDiffMed, SecDiffMedian, DecDiffMean, SecsDiffMean)) %>%
mutate(newtimeblah = time * 60) %>%
mutate(time2 = sprintf("%02g:%02g", newtimeblah%/%60,newtimeblah%%60)) %>%
ggplot(aes(x = Year, y = time, fill = Meet)) +#, color = Year)) +
geom_bar(stat = "identity", position = "dodge") +
geom_text(aes(label = time2),
position = position_dodge(0.9),
vjust = 1.5) +
ylab("Time in Minutes") +
ggtitle("Bath and Portage Comparison, MI HS XC, 2017-19")
#scale_y_discrete(limits = c("0", "21"))
#scale_y_time(labels = date_format("%M:%S"))
# Are the differences statistically significant?
# Need to use the data in the original (aka "long") form
# But only use the Athletes who peformed in both races
# So, we need to "gather" the data.spread df
data.gather <- data.spread %>%
gather(Bath, Portage, key = "Meet", value = "Time")
# Now, filter for year, so that we can run stat analyses on each year
# For 2017
data.gather17 <- data.gather %>%
filter(Year == "2017")# %>%
View()
m17 <- lm(Time ~ Meet , data = data.gather17)
summary(m17)
# For 2018
data.gather18 <- data.gather %>%
filter(Year == "2018")# %>%
View()
m18 <- lm(Time ~ Meet , data = data.gather18)
summary(m18)
# For 2019
data.gather19 <- data.gather %>%
filter(Year == "2019")
m19 <- lm(Time ~ Meet , data = data.gather19)
summary(m19)
# Overall, regardless of year
m <- lm(Time ~ Meet , data = data.gather)
summary(m)
####
## FINDINGS
####
# Statistically significant different times in 2017
# NOT Statistically significant different times in 2018, 2019
# NOT Statistically significant different times when NOT filtering by Year
# By school, what are the median values?
data.spread %>%
group_by(School) %>%
summarise(BathMedian = round(median(Bath, na.rm = TRUE), digits = 2),
PortageMedian = median(Portage, na.rm = TRUE)) %>%
mutate(DecDiff = BathMedian - PortageMedian) %>%
mutate(SecondsDiff = DecDiff * 60)
########################################
# Discarded Code
# |||
# VVV
port.bath3 <- time.to.format(port.bath3,
port.bath2$time,
"time")
port.bath2$Time[1:33]
port.bath$Time[1:33]
port.bath %>%
mutate(blah = as.numeric(as.character(Time))) %>%
mutate(blah2 = ms(blah))
str(port.bath)
port.bath2 <- port.bath2 %>%
mutate(Time = y) %>%
select(-y)
str(port.bath2)
port.bath4 <- port.bath3 %>%
mutate(newtime = ms(Time))
port.bath4 %>%
group_by(Meet) %>%
summarise(Median = median(newtime, na.rm = TRUE))
head(port.bath4)
port.bath3 %>%
dplyr::count(Athlete, Year) %>%
filter(n > 1) %>%
select(everything())
port.bath3 %>%
group_by(Meet, Year) %>%
summarise(Median = median(Time, na.rm = TRUE))
|
b15ddd37fb67370b32b13caa035b923e6380f98e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/wql/examples/wqData.Rd.R
|
6b3c8d75c64caf8562de519b2d9016ddbee8bc42
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 793
|
r
|
wqData.Rd.R
|
library(wql)
### Name: wqData
### Title: Construct an object of class "WqData"
### Aliases: wqData
### Keywords: classes data
### ** Examples
## Not run:
##D
##D
##D # Create new WqData object from sfbay data. First combine date and time
##D # into a single string after making sure that all times have 4 digits.
##D sfb <- within(sfbay, time <- substring(10000 + time, 2, 5))
##D sfb <- within(sfb, time <- paste(date, time, sep = ' '))
##D sfb <- wqData(sfb, 2:4, 5:12, site.order = TRUE, type = "wide",
##D time.format = "%m/%d/%Y %H%M")
##D
##D head(sfb)
##D tail(sfb)
##D
##D # If time of day were not required, then the following would suffice:
##D sfb <- wqData(sfbay, c(1,3,4), 5:12, site.order = TRUE, type = "wide",
##D time.format = "%m/%d/%Y")
## End(Not run)
|
6a82fe9ec74de17b87718cd2ee653260f21794d7
|
0362e9207d5e9848bda3b76a47b4529c3fcfaec3
|
/r_files/ENG_CORR.r
|
8028c90ecc6fe2d249f408082dbfe55c2abd7e24
|
[] |
no_license
|
chelseaz117/SelfPacedReading_PsychoPy
|
7dccf87d6d2ed3100fc99d08e1b69b1baa169e6d
|
5072a6be34a2f3ff1ae75719f5010fdd6788ebdf
|
refs/heads/master
| 2022-05-12T10:41:41.613665
| 2017-03-07T03:19:31
| 2017-03-07T03:19:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,436
|
r
|
ENG_CORR.r
|
library(lme4)
wholeENG_CORR = read.csv("/Users/chongzhang/Desktop/ENG_CORR.csv")
summary(wholeENG_CORR)
lR34 = lmer (log_R34 ~ RC1 * RC2 + (1|Participant)+(1|Item), wholeENG_CORR)
summary(lR34)
SS_R78_rt = subset(wholeENG_CORR, RC1 == "S" & RC2 == "S")$log_R78
SO_R78_rt = subset(wholeENG_CORR, RC1 == "S" & RC2 == "O")$log_R78
OS_R78_rt = subset(wholeENG_CORR, RC1 == "O" & RC2 == "S")$log_R78
OO_R78_rt = subset(wholeENG_CORR, RC1 == "O" & RC2 == "O")$log_R78
t.test(SS_R78_rt, SO_R78_rt) #not sig.
t.test(OS_R78_rt, OO_R78_rt) #not sig.
#compare SS+OS and OO+SO in RC2:
lR78 = lmer (log_R78 ~ RC1 * RC2 + (1|Participant)+(1|Item), wholeENG_CORR)
summary(lR78)
lR78_onRC1 = lmer (log_R78 ~ log_R34 * RC1 * RC2 + (1+log_R34|Participant)+(1+log_R34|Item), wholeENG_CORR)
summary(lR78_onRC1)
RC2_S = subset(wholeENG_CORR, RC2 == "S")$log_R78
RC2_O = subset(wholeENG_CORR, RC2 == "O")$log_R78
t.test(RC2_S, RC2_O) #not sig.
#RC1+RC2, i.e. R3478
lR3478 = lmer (log_R3478 ~ RC1 * RC2 + (1|Participant)+(1|Item), wholeENG_CORR)
summary(lR3478)
RCs_SS = subset(wholeENG_CORR, RC1 == "S" & RC2 == "S")$log_R3478
RCs_SO = subset(wholeENG_CORR, RC1 == "S" & RC2 == "O")$log_R3478
RCs_OS = subset(wholeENG_CORR, RC1 == "O" & RC2 == "S")$log_R3478
RCs_OO = subset(wholeENG_CORR, RC1 == "O" & RC2 == "O")$log_R3478
t.test(RCs_SS, RCs_SO) #not sig.
t.test(RCs_OS, RCs_OO) #not sig.
t.test(RCs_SS, RCs_OO) #sig t = -2.5894, p-value = 0.009794
|
a38df708cedc20f9fd4602a9124a139cc2ddca5e
|
6fb04083c9d4ee38349fc04f499a4bf83f6b32c9
|
/tests/benchmarking_functions.R
|
704ff3c2284987fc4d46babf607c9068a35ec0e6
|
[] |
no_license
|
phani-srikar/AdapteR
|
39c6995853198f01d17a85ac60f319de47637f89
|
81c481df487f3cbb3d5d8b3787441ba1f8a96580
|
refs/heads/master
| 2020-08-09T10:33:28.096123
| 2017-09-07T09:39:25
| 2017-09-07T09:39:25
| 214,069,176
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,316
|
r
|
benchmarking_functions.R
|
#benchmarking for solve.
FL_benchmarking_generic(specs = list(list(n =5,isSquare =TRUE)),classes = c("FLMatrix"),operator = "solve")
#benchmarking for ginv
FL_benchmarking_generic(specs = list(list(n=5,isSquare = FALSE)),classes = c("FLMatrix"),operator = "ginv")
#benchmarking for dim
alply(expand.grid(list(n=c(5,50),m = c(6,60),la = c("FLMatrix","FLTable"))),function(des){
browser()
prepargs <- function(D){
if(D %in% "FLTable")
preparedArg <- list (rows = des$n,cols =des$m)
else
preparedArg <- list (n = des$n,isSquare =TRUE)
preparedArg
}
specslist<-prepargs(des$la)
FL_benchmarking_generic(specs = list(specslist),classes = c(paste0(des$la)),operator = "dim")
})
#benchmarking for cast functions
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE)),classes = c("FLVector"),operator = "as.FLVector")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE)),classes = c("FLMatrix"),operator = "as.FLMatrix")
#benchmarking for cholesky decomposition
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE)),classes = c("FLMatrix"),operator = "chol")
##benchmarking for LU decomposition
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE)),classes = c("FLMatrix"),operator = "lu")
#benchmarking for length
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE)),classes = c("FLMatrix"),operator = "length")
FL_benchmarking_generic(specs = list(list(rows=5,cols =4)),classes = c("FLTable"),operator = "length")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE)),classes = c("FLVector"),operator = "length")
#benchmarking for transpose.
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE)),classes = c("FLMatrix"),operator = "tr")
#benchmarking for diagnol
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE)),classes = c("FLMatrix"),operator = "diag")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=FALSE)),classes = c("FLVector"),operator = "diag")
#benchmarking for subtraction operator.
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","FLMatrix"),operator = "-")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "-")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLVector","FLMatrix"),operator = "-")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = TRUE)),classes = c("FLMatrix","numeric"),operator = "-")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = TRUE)),classes = c("FLVector","numeric"),operator = "-")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = TRUE)),classes = c("FLVector","matrix"),operator = "-")
##benchmarking for division operator.
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","FLMatrix"),operator = "%/%")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLVector","numeric"),operator = "%/%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","FLVector"),operator = "%/%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","numeric"),operator = "%/%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=6,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "%/%")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLVector","matrix"),operator = "%/%")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","FLVector"),operator = "%/%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix","FLMatrix"),operator = "%/%")
#benchmarking for crossproduct.
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","FLMatrix"),operator = "%*%")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLVector","numeric"),operator = "%*%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","FLVector"),operator = "%*%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","numeric"),operator = "%*%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "%*%")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLVector","matrix"),operator = "%*%")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isRowVec = TRUE)),classes = c("FLVector","FLVector"),operator = "%*%")
#benchmarking for addition operator.
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = FALSE)),classes = c("FLMatrix","FLMatrix"),operator = "+")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLVector","numeric"),operator = "+")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","FLVector"),operator = "+")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","numeric"),operator = "+")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "+")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLVector","matrix"),operator = "+")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","FLVector"),operator = "+")
#benchmarking for / operator.
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = FALSE)),classes = c("FLMatrix","FLMatrix"),operator = "/")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLVector","numeric"),operator = "/")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","FLVector"),operator = "/")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","numeric"),operator = "/")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "/")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLVector","matrix"),operator = "/")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","FLVector"),operator = "/")
#benchmarking for multiplication operator.
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = FALSE)),classes = c("FLMatrix","FLMatrix"),operator = "*")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLVector","numeric"),operator = "*")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","FLVector"),operator = "*")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","numeric"),operator = "*")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "*")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLVector","matrix"),operator = "*")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","FLVector"),operator = "*")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = FALSE)),classes = c("FLMatrix","FLMatrix"),operator = "%%")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLVector","numeric"),operator = "%%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","FLVector"),operator = "%%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLMatrix","numeric"),operator = "%%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "%%")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLVector","matrix"),operator = "%%")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","FLVector"),operator = "%%")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = FALSE)),classes = c("FLMatrix","FLMatrix"),operator = "==")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=5,isRowVec = FALSE)),classes = c("FLVector","numeric"),operator = "==")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "==")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=TRUE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","FLVector"),operator = "==")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=FALSE),list(n=9,isRowVec = FALSE)),classes = c("FLVector","FLVector"),operator = "-")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=FALSE),list(n=9,isRowVec = FALSE)),classes = c("FLVector","numeric"),operator = "-")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE)),classes = c("FLMatrix"),operator = "t")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE)),classes = c("FLMatrix"),operator = "rowMeans")
FL_benchmarking_generic(specs = list(list(n=8,isSquare=TRUE)),classes = c("FLMatrix"),operator = "rowSums")
FL_benchmarking_generic(specs = list(list(n=8,isSquare=FALSE)),classes = c("FLMatrix"),operator = "colMeans")
FL_benchmarking_generic(specs = list(list(n=8,isSquare=FALSE)),classes = c("FLMatrix"),operator = "colSums")
FL_benchmarking_generic(specs = list(list(n=8,isRowVec=FALSE)),classes = c("FLVector"),operator = "[")
#Test for FL Table is showing some sql syntax error in FL wide to deep
#initFgeneric will generate same FL Table and I think correalation would be a constant
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLVector","FLMatrix"),operator = "cor")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=TRUE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","FLMatrix"),operator = "cor")
FL_benchmarking_generic(specs = list(list(n=5,isRowVec=FALSE),list(rows=5,cols= 5)),classes = c("FLVector","FLTable"),operator = "cor")
FL_benchmarking_generic(specs = list(list(rows=5,cols=5),list(rows=5,cols= 5)),classes = c("FLTable","FLTable"),operator = "cor")
FL_benchmarking_generic(specs = list(list(rows=5,cols=6),list(n=5,isSquare=TRUE)),classes = c("FLTable","FLMatrix"),operator = "cor")
FL_benchmarking_generic(specs = list(list(n=6,isRowVec = TRUE),list(n=5,isRowVec=TRUE)),classes = c("FLVector","FLVector"),operator = "cor")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","FLMatrix"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=6,isRowVec = TRUE)),classes = c("FLMatrix","FLVector"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=6,isRowVec = TRUE)),classes = c("FLMatrix","numeric"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","FLMatrix"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=6,isRowVec=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLVector","FLMatrix"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=6,isRowVec=FALSE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","FLVector"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=6,isRowVec=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLVector","matrix"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=6,isRowVec=FALSE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","numeric"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=6,isRowVec = FALSE)),classes = c("matrix","FLVector"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=6,isRowVec = FALSE)),classes = c("matrix","numeric"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("matrix","FLMatrix"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("matrix","matrix"),operator = "crossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","FLMatrix"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=6,isRowVec = TRUE)),classes = c("FLMatrix","FLVector"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=6,isRowVec = TRUE)),classes = c("FLMatrix","numeric"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","matrix"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLMatrix","FLMatrix"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=6,isRowVec=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLVector","FLMatrix"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=6,isRowVec=FALSE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","FLVector"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=6,isRowVec=FALSE),list(n=5,isSquare = TRUE)),classes = c("FLVector","matrix"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=6,isRowVec=FALSE),list(n=6,isRowVec = TRUE)),classes = c("FLVector","numeric"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=6,isRowVec = FALSE)),classes = c("matrix","FLVector"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=6,isRowVec = FALSE)),classes = c("matrix","numeric"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("matrix","FLMatrix"),operator = "tcrossprod")
FL_benchmarking_generic(specs = list(list(n=5,isSquare=FALSE),list(n=5,isSquare = TRUE)),classes = c("matrix","matrix"),operator = "tcrossprod")
#objects are not created.Sql problem.
FL_benchmarking_generic(specs = list(list(n=6,isRowVec = FALSE,type ="character"),list(n=5,isRowVec=FALSE,type = "character")),classes = c("FLVector","FLVector"),operator = "hamming.distance")
|
b0831aaf56c046df7bc4155169b73f7c55857d96
|
84597ca9950c4205e3c172b8c05de45fb80a5676
|
/R/character-utils.R
|
d1c8e42613778f30b70469272856174839fb33f5
|
[] |
no_license
|
Bioconductor/S4Vectors
|
6590230a62f7bbcd48c024f5e4ac952ad21df8c8
|
5cb9c73f6ece6f3a2f1b29b8eb364fc1610657d0
|
refs/heads/devel
| 2023-08-08T21:26:55.079510
| 2023-05-03T04:40:11
| 2023-05-03T04:40:11
| 101,237,056
| 17
| 23
| null | 2023-07-25T13:44:44
| 2017-08-24T00:37:11
|
R
|
UTF-8
|
R
| false
| false
| 2,007
|
r
|
character-utils.R
|
### =========================================================================
### Some utility functions to operate on strings
### -------------------------------------------------------------------------
### NOT exported
capitalize <- function(x)
{
substring(x, 1L, 1L) <- toupper(substring(x, 1L, 1L))
x
}
### NOT exported
### Reduce size of each input string by keeping only its head and tail
### separated by 3 dots. Each returned strings is guaranteed to have a number
### characters <= width.
sketchStr <- function(x, width=23)
{
if (!is.character(x))
stop("'x' must be a character vector")
if (!isSingleNumber(width))
stop("'width' must be a single integer")
if (!is.integer(width))
width <- as.integer(width)
if (width < 7L)
width <- 7L
x_nchar <- nchar(x, type="width")
idx <- which(x_nchar > width)
if (length(idx) != 0L) {
xx <- x[idx]
xx_nchar <- x_nchar[idx]
w1 <- (width - 2L) %/% 2L
w2 <- (width - 3L) %/% 2L
x[idx] <- paste0(substr(xx, start=1L, stop=w1),
"...",
substr(xx, start=xx_nchar - w2 + 1L, stop=xx_nchar))
}
x
}
setGeneric("unstrsplit", signature="x",
function(x, sep="") standardGeneric("unstrsplit")
)
setMethod("unstrsplit", "list",
function(x, sep="") .Call2("unstrsplit_list", x, sep, PACKAGE="S4Vectors")
)
setMethod("unstrsplit", "character",
function(x, sep="") x
)
### Safe alternative to 'strsplit(x, NULL, fixed=TRUE)[[1L]]'.
safeExplode <- function(x)
{
if (!isSingleString(x))
stop("'x' must be a single string")
.Call2("safe_strexplode", x, PACKAGE="S4Vectors")
}
### svn.time() returns the time in Subversion format, e.g.:
### "2007-12-07 10:03:15 -0800 (Fri, 07 Dec 2007)"
### The -0800 part will be adjusted if daylight saving time is in effect.
### TODO: Find a better home for this function.
svn.time <- function() .Call2("svn_time", PACKAGE="S4Vectors")
|
89921d308679ad453f7921692193060a2648c075
|
8a476df8f065da042e23ea00a9d1cd86ae63ad4e
|
/man/merluzaChile.Rd
|
9f3b66199a8f68cb485de2d28b825d970b909eae
|
[] |
no_license
|
cran/skewtools
|
b40ab0ce0f1a3a1dd210105c0a8659ccebcd6881
|
91c35886083fd03f4af94ff3ad5d86b483a2bb88
|
refs/heads/master
| 2021-01-18T14:24:05.220898
| 2012-07-21T00:00:00
| 2012-07-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 803
|
rd
|
merluzaChile.Rd
|
\name{merluzaChile}
\alias{merluzaChile}
\docType{data}
\title{
Merluccius gayi gayi Data
}
\description{
Length at age database of Common Hake (Merluccius gayi gayi, males fished off Chilean coast during 2005)
}
\usage{data(merluzaChile)}
\format{
A data frame containing 1638 observations on the following 2 variables.
\describe{
\item{\code{edad}}{a numeric vector of ages (years)}
\item{\code{long}}{a numeric vector of lengths (cm)}
}
}
\source{
Instituto de Fomento Pesquero, Valparaiso, Chile. URL \url{http://www.ifop.cl}
}
\examples{
data(merluzaChile)
str(merluzaChile)
dim(merluzaChile)
x <- merluzaChile$edad
y <- merluzaChile$long
plot(x, y, main="Age-Length", ylab="Length (cm)", xlab="Age (years)")
}
\keyword{datasets}
\keyword{skewtools}
|
3f1c865f2f97a08d811e6e0274862b8e3afa353a
|
f70af54a59c09e64d8697b53fac6213f92108e58
|
/man/emv_gamma.Rd
|
76421f902e769ff5989dcc4eaeb3d1d78a0c3405
|
[] |
no_license
|
cran/noisySBM
|
ce4c1cc38623108f6afa9621b3933364b151368a
|
2ae46a0440befeeddbd909a4251bcfa02da03cc3
|
refs/heads/master
| 2023-02-01T19:05:05.020325
| 2020-12-16T09:40:06
| 2020-12-16T09:40:06
| 323,347,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 661
|
rd
|
emv_gamma.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VEMalgorithm.R
\name{emv_gamma}
\alias{emv_gamma}
\title{compute the MLE in the Gamma model using the Newton-Raphson method}
\usage{
emv_gamma(L, M, param.old, epsilon = 0.001, nb.iter.max = 10)
}
\arguments{
\item{L}{weighted mean of log(data)}
\item{M}{weighted mean of the data}
\item{param.old}{parameters of the Gamma distribution}
\item{epsilon}{threshold for the stopping criterion}
\item{nb.iter.max}{maximum number of iterations}
}
\value{
updated parameters of the Gamma distribution
}
\description{
compute the MLE in the Gamma model using the Newton-Raphson method
}
|
6b7d764d4f48ab4ca680bcd6bb1a35b8c5a0e626
|
75747bff0565bfc31b3de6bdeb226157bb364241
|
/SHEAF_climate_access.R
|
1c24757cfd6d57dca0b0755b48b3e60051cc4932
|
[] |
no_license
|
soilhealthfeedback/SHEAF_EDA
|
53b0eed2c549d5a51fdc19d10370973ff5511948
|
0b8a6299619b1e7bd66c1e27da99bc8e16b4f8ff
|
refs/heads/master
| 2020-04-09T09:34:20.613120
| 2019-11-18T21:35:05
| 2019-11-18T21:35:05
| 160,238,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,116
|
r
|
SHEAF_climate_access.R
|
#------------------------------------------------------------------------#
# TITLE: netcdf_access.R
#
# AUTHOR: Erich Seamon
#
# INSTITUITON: College of Natural Resources
# University of Idaho
#
# DATE: Feb 1, 2019
#
# STAGE: netcdf access
#
# COMMENTS: This script opens and displays netcdf data.
#
#--Setting the working directory an d clearing the workspace-----------#
#netcdf_access(climatevar_short, climatevar, year )
#netcdf_access
netcdf_access <- function(climatevar_short, climatevar, year) {
#climatevar_short <- "pdsi"
#climatevar <- "palmer_drought_severity_index"
#year = 2012
#library("ncdf")
library("zoo")
library("raster")
library("sp")
library("rgeos")
library("rgdal")
library("proj4")
library("RNetCDF")
library("ncdf4")
library("RColorBrewer")
library("raster")
#library("rasterVis")
library("latticeExtra")
library("maptools")
library("parallel")
library("Evapotranspiration")
library("plyr")
library("data.table")
library("sirad")
library("rgdal")
library("stringr")
setwd("/nethome/erichs/counties/")
counties <- readShapePoly('UScounties.shp',
proj4string=CRS
("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"))
projection = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
#subsets to CONUS
counties <- subset(counties, STATE_NAME != "Alaska")
counties <- subset(counties, STATE_NAME != "Hawaii")
#--loop list for county by fip
countyfiploop <- counties@data$FIPS
#--data frame of county fip list
countyfiplist <- data.frame(counties@data$FIPS)
#--data frame of county names
countynames <- data.frame(counties@data$NAME)
statenames <- data.frame(counties@data$STATE_NAME)
#combo of county names and fip for this list
countylist <- cbind(statenames, countynames, countyfiplist)
colnames(countylist) <- c("STATE_NAME", "NAME", "FIPS")
#--number of rows in county list
countylistrows <- nrow(countylist)
#climatevar_short <- "pdsi"
#climatevar <- "palmer_drought_severity_index"
#nc <- nc_open(paste("http://thredds.northwestknowledge.net:8080/thredds/dodsC/agg_met_", climatevar_short, "_1979_CurrentYear_CONUS.nc?lon[0:1:1385],lat[0:1:584],", climatevar, "[0:1:0][0:1:0][0:1:0],day[0:1:0]", sep=""))
nc <- nc_open(paste("http://thredds.northwestknowledge.net:8080/thredds/dodsC/MET/pdsi/", climatevar_short, "_", year, ".nc?lon[0:1:1385],lat[0:1:584],", climatevar, "[0:1:0][0:1:0][0:1:0],day[0:1:0]", sep=""))
#maxtemp <- nc_open('http://reacchpna.org/thredds/dodsC/agg_met_tmmx_1979_2014_WUSA.nc?lon[0:1:1385],lat[0:1:584],daily_maximum_temperature[0:1:0][0:1:0][0:1:0],day[0:1:10]')# Open a netcdf file
##--
# extract variable name, size and dimension
v <- nc$var[[1]]
size <- v$varsize
dims <- v$ndims
nt <- size[dims] # length of time dimension
lat <- nc$dim$lat$vals # latitude position
lon <- nc$dim$lon$vals # longitude position
# read sst variable
r<-list()
for (i in 1:nt) {
start <- rep(1,dims) # begin with start=(1,1,...,1)
start[dims] <- i # change to start=(1,1,...,i) to read timestep i
count <- size # begin with count=(nx,ny,...,nt), reads entire var
count[dims] <- 1 # change to count=(nx,ny,...,1) to read 1 tstep
dt<-ncvar_get(nc, varid = 'palmer_drought_severity_index', start = start, count = count)
# convert to raster
r[i]<-raster(dt)
r[i] <- rotate(r[[i]])
extent(r[[i]]) <- c(25.0667, 49.4000, -124.7667, -67.0583)
crs(r[[i]]) <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
}
r2 <- brick(r)
r3 <- t(flip(r2, direction='x' ))
r4 <- mean(r3)
jj = 0
newmatrix <- matrix(NA, nrow=countylistrows, ncol=2)
for (l in countyfiploop) {
jj = jj + 1
subset_county <- counties[counties@data$FIPS == l,]
e <- extract(r4, subset_county)
newmatrix[jj,1] <- mean(e[[1]])
newmatrix[jj,2] <- l
}
nm <- data.frame(NA, nrow=newmatrix, ncol=2)
nm$pdsi <- as.numeric(as.character(newmatrix[,1]))
nm$countyFIPS <- as.numeric(as.character(newmatrix[,2]))
nm2 <- data.frame(nm$pdsi, nm$countyFIPS)
colnames(nm2) <- c("pdsi", "FIPS")
nm2$FIPS <- str_pad(nm2$FIPS, 5, pad = "0")
pdsi <- merge(counties, nm2, by = "FIPS")
#--map it
pal <- colorNumeric(brewer.pal(9, "RdBu"), na.color = "#ffffff",
domain = pdsi$pdsi)
exte <- as.vector(extent(counties))
label <- paste(sep = "<br/>", pdsi$STATE_NAME, pdsi$pdsi)
markers <- data.frame(label)
labs <- as.list(pdsi$pdsi)
leaflet(data = pdsi) %>% addProviderTiles("Stamen.TonerLite") %>% fitBounds(exte[1], exte[3], exte[2], exte[4]) %>% addPolygons(color = pal(pdsi$pdsi), popup = markers$label, weight = 1) %>%
addLegend(pal = pal, values = pdsi$pdsi, bins = 3, opacity = 0.5, title = paste(years, " ", climatevar_short, sep=""),
position = "bottomright")
}
|
f1b34e77d5b91f69b8520d8b593d5a994b36fc1d
|
8eac35d80030bcc4f9154aa5636ae581aa4eb256
|
/posterPlots.R
|
0c33f96ab7c60dfbc61c786516a19a7fea895585
|
[] |
no_license
|
jsnoke/altman_HD
|
4f9d2904806a4ad577e52200e881f9edbe1828a0
|
e996ac433c633a37726258f9e2f38da57cb64bc5
|
refs/heads/master
| 2021-01-10T09:26:24.221583
| 2016-04-11T15:55:00
| 2016-04-11T15:55:00
| 50,198,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,493
|
r
|
posterPlots.R
|
#####
## risk plots
#####
load("~/Documents/Altman_HD/R/miNoise4.RData")
load("~/Documents/Altman_HD/R/partNoise4.RData")
load("~/Documents/Altman_HD/R/noErr4.RData")
library(ggplot2)
library(grid)
plotDF = data.frame(matrix(NA, nrow = 3*10*6, ncol = 5))
colnames(plotDF) = c("Factor1", "Factor2", "Noise", "p", "epsilon")
plotDF[, "Factor1"] = c(resultsMu1, results2Mu1, results3Mu1)
plotDF[, "Factor2"] = c(resultsMu2, results2Mu2, results3Mu2)
plotDF[, "Noise"] = rep(c("Single Impute", "Noise Only", "Multiple Imputes"), each = 60)
plotDF[, "p"] = rep(rep(p, each = 10), 3)
plotDF[, "epsilon"] = rep(rep(epsilon, 6), 3)
p1 = ggplot(data = plotDF[plotDF$p == 25, ], aes(x = epsilon, y = Factor1, color = Noise)) +
geom_line() + ylim(range(plotDF$Factor1)) + ggtitle("p = 25") +
ylab("Mean Absolute Loadings Difference") + theme_bw()
p2 = ggplot(data = plotDF[plotDF$p == 101, ], aes(x = epsilon, y = Factor1, color = Noise)) +
geom_line() + ylim(range(plotDF$Factor1)) + ggtitle("p = 101") +
ylab("Mean Absolute Loadings Difference") + theme_bw()
p3 = ggplot(data = plotDF[plotDF$p == 500, ], aes(x = epsilon, y = Factor1, color = Noise)) +
geom_line() + ylim(range(plotDF$Factor1)) + ggtitle("p = 500") +
ylab("Mean Absolute Loadings Difference") + theme_bw()
p4 = ggplot(data = plotDF[plotDF$p == 1000, ], aes(x = epsilon, y = Factor1, color = Noise)) +
geom_line() + ylim(range(plotDF$Factor1)) + ggtitle("p = 1000") +
ylab("Mean Absolute Loadings Difference") + theme_bw()
grid.newpage()
pushViewport(viewport(layout = grid.layout(2, 2)))
print(p1, vp = viewport(layout.pos.row = 1, layout.pos.col = 1) )
print(p2, vp = viewport(layout.pos.row = 1, layout.pos.col = 2) )
print(p3, vp = viewport(layout.pos.row = 2, layout.pos.col = 1) )
print(p4, vp = viewport(layout.pos.row = 2, layout.pos.col = 2) )
pp1 = ggplot(data = plotDF[plotDF$p == 25, ], aes(x = epsilon, y = Factor2, color = Noise)) +
geom_line() + ylim(range(plotDF$Factor2)) + ggtitle("p = 25") +
ylab("Mean Absolute Loadings Difference")
pp2 = ggplot(data = plotDF[plotDF$p == 101, ], aes(x = epsilon, y = Factor2, color = Noise)) +
geom_line() + ylim(range(plotDF$Factor2)) + ggtitle("p = 101") +
ylab("Mean Absolute Loadings Difference")
pp3 = ggplot(data = plotDF[plotDF$p == 500, ], aes(x = epsilon, y = Factor2, color = Noise)) +
geom_line() + ylim(range(plotDF$Factor2)) + ggtitle("p = 500") +
ylab("Mean Absolute Loadings Difference")
pp4 = ggplot(data = plotDF[plotDF$p == 1000, ], aes(x = epsilon, y = Factor2, color = Noise)) +
geom_line() + ylim(range(plotDF$Factor2)) + ggtitle("p = 1000") +
ylab("Mean Absolute Loadings Difference")
grid.newpage()
pushViewport(viewport(layout = grid.layout(2, 2)))
print(pp1, vp = viewport(layout.pos.row = 1, layout.pos.col = 1) )
print(pp2, vp = viewport(layout.pos.row = 1, layout.pos.col = 2) )
print(pp3, vp = viewport(layout.pos.row = 2, layout.pos.col = 1) )
print(pp4, vp = viewport(layout.pos.row = 2, layout.pos.col = 2) )
#####
## syn plots
#####
load("~/Documents/Altman_HD/R/noHigh.RData")
p = c(5, 10, 25, 50, 75, 90)
resultsMu1 = matrix(NA, ncol = 6, nrow = 2*1000)
resultsMu2 = matrix(NA, ncol = 6, nrow = 2*1000)
#resultsSd1 = matrix(NA, ncol = 6, nrow = 2*1000)
#resultsSd2 = matrix(NA, ncol = 6, nrow = 2*1000)
for(c in 1:2){
for(b in 1:6){
#resultsMu1[c, b] = 0
#resultsMu2[c, b] = 0
#resultsSd1[c, b] = 0
#resultsSd2[c, b] = 0
for(a in 1:1000){
hold = test[a, b][[1]]
resultsMu1[(a + (c - 1) * 1000), b] = mean(abs(hold[, 1] - hold[, (1 + c * 2)]))
#resultsMu1[c, b] = (mean(abs(hold[, 1] - hold[, (1 + c * 2)])) + resultsMu1[c, b] * (a - 1)) / a
resultsMu2[(a + (c - 1) * 1000), b] = mean(abs(hold[, 2] - hold[, (2 + c * 2)]))
#resultsMu2[c, b] = (mean(abs(hold[, 2] - hold[, (2 + c * 2)])) + resultsMu2[c, b] * (a - 1)) / a
#resultsSd1[c, b] = (sd(abs(hold[, 1] - hold[, (1 + c * 2)])) + resultsSd1[c, b] * (a - 1)) / a
#resultsSd2[c, b] = (sd(abs(hold[, 2] - hold[, (2 + c * 2)])) + resultsSd2[c, b] * (a - 1)) / a
}
}
}
library(ggplot2)
library(grid)
plotDF = data.frame(matrix(NA, nrow = 2000*6, ncol = 4))
colnames(plotDF) = c("Factor1", "Factor2", "Synthesis", "p")
plotDF[, "Factor1"] = c(resultsMu1)
plotDF[, "Factor2"] = c(resultsMu2)
plotDF[, "Synthesis"] = rep(rep(c("Sequential Synthesis", "SVD Synthesis"), each = 1000), 6)
plotDF[, "p"] = rep(p, each = 2000)
p1 = ggplot(data = plotDF, aes(x = p, y = Factor1, color = Synthesis)) + ggtitle("Utility - First Factor") +
ylab("Mean Absolute Loadings Difference") + stat_smooth(method = "lm", se = F) + theme_bw()
#p1
p2 = ggplot(data = plotDF, aes(x = p, y = Factor2, color = Synthesis)) + ggtitle("Utility - Second Factor") +
ylab("Mean Absolute Loadings Difference") + stat_smooth(method = "lm", se = F) + theme_bw()
#p2
grid.newpage()
pushViewport(viewport(layout = grid.layout(1, 2)))
print(p1, vp = viewport(layout.pos.row = 1, layout.pos.col = 1) )
print(p2, vp = viewport(layout.pos.row = 1, layout.pos.col = 2) )
#####
## lowD risk plots
#####
load("~/Documents/Altman_HD/R/nonHighRisk.RData")
library(ggplot2)
library(grid)
p = c(5, 25, 50, 101, 500, 1000)
plotDF = data.frame(matrix(NA, nrow = 6*3, ncol = 4))
colnames(plotDF) = c("Total", "Prob", "p", "Neighborhood.Size")
plotDF[, "Total"] = c(colMeans(nonHighRisk[[1]]), colMeans(nonHighRisk[[2]]), colMeans(nonHighRisk[[3]]))
plotDF[, "Prob"] = c(colMeans(nonHighRisk[[4]]), colMeans(nonHighRisk[[5]]), colMeans(nonHighRisk[[6]]))
plotDF[, "p"] = rep(p, 3)
plotDF[, "Neighborhood.Size"] = rep(c("0.01", "0.1", "0.05"), each = 6)
p3 = ggplot(data = plotDF, aes(x = p, y = Total, color = Neighborhood.Size)) +
ggtitle("Risk - Individual Matches") + ylim(range(plotDF$Total)) + geom_line() +
#stat_smooth(method = "lm", se = F) +
ylab("Max Var. Matches for a Released Row") + theme_bw()
#p1
p4 = ggplot(data = plotDF, aes(x = p, y = Prob, color = Neighborhood.Size)) +
ggtitle("Risk - Individual Matches") + ylim(range(plotDF$Prob)) + geom_line() +
#stat_smooth(method = "lm", se = F) +
ylab("Probability of Unique Maximum Match") + theme_bw()
grid.newpage()
pushViewport(viewport(layout = grid.layout(2, 2)))
print(p1, vp = viewport(layout.pos.row = 1, layout.pos.col = 1) )
print(p2, vp = viewport(layout.pos.row = 1, layout.pos.col = 2) )
print(p3, vp = viewport(layout.pos.row = 2, layout.pos.col = 1) )
print(p4, vp = viewport(layout.pos.row = 2, layout.pos.col = 2) )
#####
## theoretical ru map
#####
plotDF = data.frame(matrix(NA, nrow = 30, ncol = 3))
colnames(plotDF) = c("Risk", "Utility", "Theoretical.SDC.Method")
plotDF[, "Risk"] = c(c(0, 0, 0, 0.1, 0.1, 0.2, 0.2, 0.4, 0.6, 0.9),
c(0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.9, 1, 1, 1),
c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9))
plotDF[, "Utility"] = c(c(0, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8, 0.85, 0.9, 0.95),
c(0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.9, 1, 1, 1),
c(0, 0, 0, 0, 0, 0.1, 0.1, 0.1, 0.1, 0.2))
plotDF[, "Theoretical.SDC.Method"] = factor(c(rep(1:3, each = 10)))
ggplot(data = plotDF, aes(y = Utility, x = Risk, color = Theoretical.SDC.Method)) + stat_smooth(se = F) +
theme_bw() + theme(axis.title = element_text(size=20)) + ylab("Utility") + xlab("Risk")
|
c453c84cf98d9e9c516c549810e8d377de5b3bee
|
3a122c36f2b8e5e39cc49819ee92c4902851d762
|
/plot1.R
|
376ba47dfa2e5147d6663f5ecb0add29bd784604
|
[] |
no_license
|
vijeshm/ExData_Plotting1
|
0350a233ea78895a1fc8eb5abdb7491134a7bad5
|
a88f2fd48e6bb7e3bcef931427819a4464c66afa
|
refs/heads/master
| 2020-12-27T12:02:38.101199
| 2014-09-08T10:53:56
| 2014-09-08T10:53:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,382
|
r
|
plot1.R
|
# Author: mv.vijesh@gmail.com
# Dated: 8th Sept, 2014
# Description: This code plots the histogram of global active power, averaged hourly, for households during the time period 1-2-2007 and 2-2-2007, onto a PNG.
# Ensure that the text file, household_power_consumption, is in the working directory.
#The values are separated by semi colon; NA values are encoded as blanks and question mark
houseHoldPower <- read.csv("household_power_consumption.txt", sep = ";", na.strings = c("", "?"))
#Convert date in the text format to Date object
houseHoldPower$Date <- as.Date(houseHoldPower$Date, format = "%d/%m/%Y")
#Convert time in the text format to POSIXlt object. Note that the date stamp will be the date on which you run the code
houseHoldPower$Time <- strptime(houseHoldPower$Time, format = "%H:%M:%S")
#Filter the records with the date 1st Feb, 2007 and 2nd Feb, 2007
houseHoldPower <- houseHoldPower[houseHoldPower$Date == as.Date("01-02-2007", format = "%d-%m-%Y") | houseHoldPower$Date == as.Date("02-02-2007", format = "%d-%m-%Y"), ]
#open up a PNG device. Although the defaults are 480x480, mentioning it specifically.
png(filename = "plot1.png", width = 480, height = 480)
#plot a histogram
hist(houseHoldPower$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
#Dont forget to turn off the device
dev.off()
|
5746a92334dca7fed9835c5d5f545a90311246b0
|
6cf9a94de51479dc65dad3608a4b315ba289a36f
|
/man/get_single_run-HierarchicalPartition-method.rd
|
17d58b6523e57472c8a74539dc571e07111d6970
|
[] |
no_license
|
NagaComBio/cola
|
818c3afdab7e140d549ab9ebf6995a882c967cf5
|
304b3cf771e97ced7f4b20388815b882202cdd84
|
refs/heads/master
| 2021-04-27T08:31:19.184145
| 2018-02-26T10:00:07
| 2018-02-26T10:00:07
| 122,491,685
| 0
| 0
| null | 2018-02-22T14:45:23
| 2018-02-22T14:45:23
| null |
UTF-8
|
R
| false
| false
| 635
|
rd
|
get_single_run-HierarchicalPartition-method.rd
|
\name{get_single_run-HierarchicalPartition-method}
\alias{get_single_run,HierarchicalPartition-method}
\title{
Get result for a specified level in the partition hierarchy
}
\description{
Get result for a specified level in the partition hierarchy
}
\usage{
\S4method{get_single_run}{HierarchicalPartition}(object, node = "0")
}
\arguments{
\item{object}{a \code{\link{HierarchicalPartition-class}} object}
\item{node}{node labal, see \code{\link{hierarchical_partition}} for explaination.}
}
\value{
A \code{\link{ConsensusPartition-class}} object.
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
# There is no example
NULL
}
|
000f4f4dc12c9452c26dcceeb085f10f7eb92a81
|
4ae34bc5607632a1c771a52e66a56d2cb049e050
|
/plot2.R
|
d64ae4b680757b753e41040b52ea72bfa64ee4c0
|
[] |
no_license
|
christina-zhong/R-Exercise2
|
a7ebbf37edf4c9988ac26635430e911944762304
|
23b9a9ca12065bf4a579382aac50b846a8498638
|
refs/heads/master
| 2020-12-25T12:57:39.262669
| 2014-08-10T19:52:56
| 2014-08-10T19:52:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 485
|
r
|
plot2.R
|
data<-read.table("household_power_consumption.txt", header=TRUE,sep=";", na.strings="?",colClasses=c(rep("character",2),"numeric",rep("NULL",6)))
gap <- subset(data,Date %in% c("2/1/2007","2/2/2007"))
gap$datetime <- strptime(paste(gap$Date,gap$Time,sep=" "),format='%m/%d/%Y %H:%M:%S')
par(mar=c(5.1,5.1,4.1,2.1))
with(gap,plot(datetime,Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)"))
dev.copy(device=png,file="plot2.png",width=480,height=480)
dev.off()
|
9ce23ef11b237d604e04bf35f265312a57fc0d29
|
44fe4a9f6d9630d0a80f987ace8474ba48b858f7
|
/man/shiny_rdf_spec_curve.Rd
|
f7272a367438e93b283c5e3235b94ba8c2ba16ee
|
[
"MIT"
] |
permissive
|
iMarcello/rdfanalysis
|
ee8be635d38554f81abc21f87673fd48aef06041
|
5c43ee48ff2d7f82b921419d9dbb9b2663e57d7c
|
refs/heads/master
| 2020-09-27T22:15:35.705406
| 2019-12-07T13:12:16
| 2019-12-07T13:12:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,853
|
rd
|
shiny_rdf_spec_curve.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny_rdf_spec_curve.R
\name{shiny_rdf_spec_curve}
\alias{shiny_rdf_spec_curve}
\title{An Interactive Specification Curve}
\usage{
shiny_rdf_spec_curve(ests, spec_curve_parms, design = NULL,
rel_dir = NULL, start_input = NULL, regression_cutoff = 5,
default_choices = NULL, title = "A Shiny Specification Curve",
abstract = NULL)
}
\arguments{
\item{ests}{The data frame provided by \code{\link[rdfanalysis:exhaust_design]{exhaust_design()}}.}
\item{spec_curve_parms}{A list containing additional paramters that will be
passed on to \code{\link[rdfanalysis:plot_rdf_spec_curve]{plot_rdf_spec_curve()}}.}
\item{design}{if not \code{NULL} it takes the design that was used to generate
the estimates. In this case, you also need to specify the \code{rel_dir}
and \code{start_input} parameter below. The shiny app will then display
full regresssion results when you select choices that generate less than
\code{regression_cutoff} estimates.}
\item{rel_dir}{The path to the code directory. See above.}
\item{start_input}{The parameters that you pass to the first design step.
See above.}
\item{regression_cutoff}{If your choices generate less or equal estimates,
the display will switch to normal regression output (needs parameters above
to be not \code{NULL}).}
\item{default_choices}{A list containing choices that you want
the app to start with. If \code{NULL}, it will start with all choices
included.}
\item{title}{The title of the shiny app.}
\item{abstract}{Text that will be displayed by the app. Wrapped
into \code{HTML()} so that you can use HTML code.}
}
\description{
A shiny based web app that allows you to explore your
researcher degrees of freedom's specification curve interactively.
}
\examples{
\dontrun{
print("Sorry. No examples yet.")
}
}
|
33728b39ed42a170890d6998f0d9eb4a7f6d89b0
|
6e632fa2255635d340bff97ae0dc31e860baed54
|
/R/mt_plotpairs.R
|
895cbb413e0e706313c46af3ac022a0da1a595a5
|
[] |
no_license
|
TYMichaelsen/mmtravis
|
01a2546da1af40c7ebe42276e6a27d7fb35637db
|
0bb0cb651be6737b01aa4c9b7776e177d55cb7eb
|
refs/heads/master
| 2020-03-14T23:15:04.587733
| 2019-08-03T05:35:17
| 2019-08-03T05:35:17
| 131,840,635
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,069
|
r
|
mt_plotpairs.R
|
#' @title Plot multiple combinations of sample gene expressions in a pairs plot
#'
#' @description Plots the gene expression from multiple samples in a \code{mmt} object in a grid plot with all pairs of variables.
#'
#' @param mmt (\emph{required}) A \code{mmt} list loaded with \code{\link{mt_load}}.
#' @param samples A vector of 3 or more sample names in \code{mmt} to plot on each axis. If NULL, the default, then all samples will be plotted. (\emph{Default: } \code{NULL})
#' @param label_by replace the SampleIDs plotted in the diagonal with one(!) column in the metadata.
#' @param textsize The text size of the axis titles.
#' @param pointsize The size of points in the plot(s).
#' @param linesize The size of lines in the plot(s).
#'
#' @export
#'
#' @return A ggplot2 object.
#'
#' @importFrom magrittr %>%
#' @importFrom cowplot plot_grid
#' @importFrom dplyr transmute_ mutate_all
#' @import ggplot2
#'
#' @examples
#' \dontrun{
#' data("example_mmt")
#'
#' # Plot all samples (might take some time)
#' mt_plotpairs(example_mmt)
#'
#' # Plot replicates and relabel.
#' mt_plotpairs(example_mmt,samples = c("HQ180323_13","HQ180323_14"),label_by = "Replicate")
#' }
#'
#' @author Thomas Yssing Michaelsen \email{tym@@bio.aau.dk}
mt_plotpairs <- function(mmt,
samples = NULL,
label_by = NULL,
textsize = 5,
linesize = 0.5,
pointsize = 2){
if(is.null(samples)){
samples <- mmt$mtmeta[[1]]
}
if(!all(samples %in% mmt$mtmeta[[1]])){
mis <- samples %w/o% mmt$mtmeta[[1]]
stop(paste("Following sample(s) is not in the data:",paste(mis,collapse = ", ")),call. = FALSE)
}
## Label by metadata instead.
if(!is.null(label_by)){
if (label_by %in% colnames(mmt$mtmeta)){
label_by <- mmt$mtmeta[[label_by]]
} else {
stop("Your 'label_by' is not in metadata.",call. = FALSE)
}
} else {
label_by <- samples
}
## Order according to samples (if specified)
ord <- match(samples,mmt$mtmeta$SampleID)
label_by <- label_by[ord]
## Prepare the data.
dat <- mmt$mtdata[,.SD,.SDcols = (ord+1)]
for (j in colnames(dat)) set(dat, j = j, value = log2(dat[[j]] + 1))
rng <- range(dat)
## Make a blank plot
emp <- data.frame(x = 0, y = 0)
pblank <- ggplot(emp, aes(x,y)) +
geom_blank() +
theme_bw() +
theme(plot.margin = margin(0,0,0,0),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks = element_blank(),
panel.border = element_blank())
## Iterate through the different combinations of plots.
temp <- list()
for (i in 1:length(samples)){
for (j in 1:length(samples)){
# Subset data and round to 2 decimals.
dat_sub <- data.table(x = dat[[samples[j]]],
y = dat[[samples[i]]])
for (k in colnames(dat_sub)) set(dat_sub, j = k, value = round(dat_sub[[k]],2))
if (i < j){
# Remove duplicated datapoints before plotting.
dat_sub_uni <- unique(dat_sub)
# make plot
p <- ggplot(dat_sub_uni,aes(x = x,
y = y)) +
geom_point(alpha = 0.1,size = pointsize,shape = 19) +
coord_cartesian(xlim = rng,ylim = rng) +
geom_smooth(
data = dat_sub,
method = "gam",
se = F,
formula = y ~ s(x, bs = "cs"),
colour = "red",
size = linesize) +
geom_abline(slope = 1,intercept = 0,colour = "red",linetype = "dotted",size = linesize) +
theme(plot.margin = margin(3,3,0,0, unit = "pt"),
legend.position = "none",
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(fill = NA, size = 0.5, linetype = 3, color = "gray75"))
}
if (i == j){
p <- pblank + geom_text(label = label_by[i], size = textsize,fontface = "bold")
}
if(i > j){
# Calculate correlation (omit cases with zeros).
r <- dat_sub %>%
{.[apply(.,1,function(x){!any(x == 0)}),]} %>%
{cor(.)[1,2]}
txt <- format(c(r, 0.123456789), digits = 2)[1]
p <- pblank + geom_text(label = txt,size = 5*r^2)
}
plotnr <- paste0("x_",samples[j],"y_",samples[i])
temp[plotnr] <- list(p)
}
}
ncol <- temp %>%
length(.) %>%
sqrt(.) %>%
floor(.) %>%
as.integer(.)
cowplot::plot_grid(plotlist = temp,ncol = ncol,align = "hv")
}
|
df1eeb5e418aa9e79a9c0a12881534090b2a078d
|
d11e3c96cbec4c4ce02d8c663a51835d4b213278
|
/documentation/esm237examplesS18/R/clim.R
|
022ae6e384089da054a02bd8604efca4ef515364
|
[] |
no_license
|
molly-williams/esm262-assignments-MW
|
b48d8c1d2356e299c5ce426bbf2e19351ba0e6c7
|
15bff91481c6c19b3a879b874ad7eaf3b938cfb1
|
refs/heads/master
| 2021-06-17T15:38:58.697157
| 2021-03-16T20:22:47
| 2021-03-16T20:22:47
| 184,777,904
| 0
| 1
| null | 2019-06-01T02:54:44
| 2019-05-03T15:19:40
|
HTML
|
UTF-8
|
R
| false
| false
| 439
|
r
|
clim.R
|
#' Climate Data from Santa Barbara
#'
#' Data from SB-LTER meterology station
#'
#' @format A data frame with 27274 rows and 8 colums
#' \itemize{
#' \item date day/month/year
#' \item tmin minimum daily temperature (C)
#' \item tmax maximum daily temperature (C)
#' \item rain daily rainfall (mm)
#' \item year
#' \item month
#' \item day
#' \item wy water year
#' }
#' @source \url{http://sbs.lternet.edu/data/}
#' @author Naomi
"clim"
|
b678101b5eb34525942526668fee26b50e94ad06
|
15107b515d45e60c7ea59cfcb63b758984c52272
|
/man/getTicks.Rd
|
93c5b6edba2856b59e16a24d7b78bec569c3aac4
|
[] |
no_license
|
gforge/forestplot
|
e00e700b727758c30a530d077168d26b86c63f4b
|
b26b33561d2664933fc7a9b8258e26dffa8fe2e5
|
refs/heads/master
| 2023-09-01T12:16:44.558022
| 2023-08-27T19:52:18
| 2023-08-27T19:52:18
| 28,350,997
| 38
| 16
| null | 2022-11-23T20:55:20
| 2014-12-22T17:53:03
|
R
|
UTF-8
|
R
| false
| true
| 1,557
|
rd
|
getTicks.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getTicks.R
\name{getTicks}
\alias{getTicks}
\title{Ticks for plot axis}
\usage{
getTicks(low, high = low, clip = c(-Inf, Inf), exp = FALSE, digits = 0)
}
\arguments{
\item{low}{lower bound, can be a single number or a vector}
\item{high}{upper bound - optional, you can just have all data in the low variable}
\item{clip}{if the ci are clipped}
\item{exp}{If the value should be in exponential form (default)}
\item{digits}{Number of digits - used in exp mode}
}
\value{
\code{vector} Returns a vector with the ticks
}
\description{
Gets the ticks in a formatted version. This is since I'm not always
that fond of just pretty(1:10/5). In exponential form the ticks are
determined from the 2-base, meaning that you get an intuitive feeling
for when the value is doubled.
}
\details{
This function is far from perfect and I recommend specifying yourself
the ticks that you want.
}
\examples{
test_data <- data.frame(
coef = c(2, 0.5),
low = c(1.5, 0.05),
high = c(3, 0.75),
boxsize = c(0.5, 0.5)
)
# Exponential form where the exponent base i 2 for easier understanding
getTicks(
low = test_data$low,
high = test_data$high,
clip = c(-Inf, Inf),
exp = TRUE
)
# Non exponential form with using pretty
getTicks(
low = test_data$low,
high = test_data$high,
clip = c(-Inf, Inf),
exp = FALSE
)
# A very simple example
getTicks(1:5 * 2.33,
exp = FALSE
)
# A slightly more advanced exponential version
getTicks(1:10 * .33,
digits = 2,
exp = TRUE
)
}
|
880e88392272c1796805aab2d60e77bbb2b7804c
|
b2041dcde785d4ac86387fc48960e53d0c9af848
|
/hw3/rankhospital.R
|
31e8412cad268b8797a3970b59907303deba91d6
|
[] |
no_license
|
YeLibrarian/cdata-analysis
|
53ea05f408b36af598fdb95a65db7630cdc30692
|
f171d961cf2b2e08a9fa71409079e420eea31c9f
|
refs/heads/master
| 2021-04-15T12:12:54.480545
| 2014-02-18T06:32:23
| 2014-02-18T06:32:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,579
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
#this is being harcoded because it's kind of a hell
labels=list("heart attack"=11, "heart failure"=17, "pneumonia"=23)
dataCols=sapply(labels, "[[",1)
#Coerce chars to numeric and char to factor
outcomeData[dataCols] <- lapply(outcomeData[dataCols],function(x) as.numeric(x))
outcomeData$State <- as.factor(outcomeData$State)
## Check that state and outcome are valid
if (!(state %in% levels(outcomeData$State))) {
#state not in list, stop with error
stop("invalid state")
}
else if(!(outcome %in% names(labels))){
#outcome not in list, stop with error
stop("invalid outcome")
}
## Return hospital name in that state with the given rank
## 30-day death rate
#filter data by state
outcomeData <- outcomeData[outcomeData$State == state,]
if (num=="best"){
hospital <- as.vector(na.omit(outcomeData$Hospital.Name[outcomeData[,labels[[outcome]]]==min(na.omit(outcomeData[,labels[[outcome]]]))]))
}
else if (num=="worst"){
hospital <- as.vector(na.omit(outcomeData$Hospital.Name[outcomeData[,labels[[outcome]]]==max(na.omit(outcomeData[,labels[[outcome]]]))]))
}
else if (num>nrow(outcomeData)){
hospital <- NA
}
else { #num is integer
hospital <- as.vector(outcomeData$Hospital.Name[order(outcomeData[labels[[outcome]]], outcomeData$Hospital.Name,na.last=NA)])
hospital <- hospital[as.numeric(num)]
}
hospital
}
|
e13c6a7d1498ad170be7b70924857eea7b58e61d
|
5c5e9ddcff88fe03d6249fc25935574608a2abed
|
/R/kge_nse.r
|
b4422043dbbed2532b320939679c50d448d44cac
|
[] |
no_license
|
freysimon/PestR
|
5864e5a596f57a4bcfa4d9fe40577c899a00f9db
|
66416dd406aa59ede65647104b6fc29dc0492705
|
refs/heads/master
| 2021-01-18T22:22:10.554621
| 2017-05-30T14:39:09
| 2017-05-30T14:39:09
| 87,049,633
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 914
|
r
|
kge_nse.r
|
# Designed to read a COSERO outputfile
kge_nse <- function(x,simcol=2,
OUT = "kge_nse.txt",
from = NULL, to = NULL){
# libraries laden
require(hydroGOF)
require(xts)
if(!any(is.null(c(from,to)))){
x <- x[paste(from,"/",to,sep="")]
}
if(is.null(from) & !is.null(to)){
x <- x[paste(from,"/",sep="")]
}
if(!is.null(from) & is.null(to)){
x <- x[paste("/",to,sep="")]
}
nse <- NSE(sim=x[,simcol],obs=x[,simcol-1])
kge <- KGE(sim=x[,simcol],obs=x[,simcol-1])
out <- t(data.frame(KGE = kge, NSE = nse))
# write.table(rbind(nse,kge),file="kge_nse.txt",col.names=TRUE,row.names=TRUE,quote=FALSE,sep="\t")
if(!is.null(OUT)) {
write.table(out*(-1),file=OUT,col.names=FALSE,row.names=FALSE,quote=FALSE,sep="\t")
}
print(out)
print("----------------------------------------")
print(paste("file write to",pfad,"OK",sep=" "))
}
|
4dbe51a0f4d8dcc04ec13d0c7821c8073814a797
|
34fab1c1c94231367e92f67e05762b73137df8d7
|
/RNA-Seq/Quantifying-RNA-Expression/Gene-Level-Analysis/Differential-Expression-Analysis/DESeq2/simpleDESeq2Analysis.R
|
f13f260547acc260f69a506c0968c27acb0c2ab0
|
[] |
no_license
|
aiminy/tutorials
|
ac35ce4bf1fca3625cb84fba9b8d3b99387c16d4
|
511af0ed6535b59bab80099096ed35ce808f6661
|
refs/heads/master
| 2021-01-21T01:38:55.693437
| 2015-10-25T13:52:10
| 2015-10-25T13:52:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,325
|
r
|
simpleDESeq2Analysis.R
|
library(DESeq2)
directory = 'C:/RNASeq/ReadCounts/'
setwd(directory)
# Load data directly from HTSeq Count
sampleFiles=grep('.geneIDCounts.txt',list.files(directory),value=TRUE)
sampleNames=sub('.geneIDCounts.txt','',sampleFiles)
sampleCondition=sub('[1-3]','',sampleNames)
sampleTable=data.frame(sampleName=sampleNames, fileName=sampleFiles, condition=sampleCondition)
ddsHTSeq = DESeqDataSetFromHTSeqCount(sampleTable=sampleTable, directory=directory, design= ~ condition)
# Differential expression analysis
dds <- DESeq(ddsHTSeq)
dds$condition = relevel(dds$condition, 'Control')
res <- results(dds)
# Extract the significantly differentially expressed genes
resOrdered<- res[order(res$padj),]
resSig <- subset(resOrdered, padj<0.05)
# Convert Gene IDs to Gene Names
conversionTable=read.table('C:\RNASeq\GeneIDtoNameConversionTable.txt',sep='\t',header=FALSE,row.names=1)
colnames(conversionTable) = c('GeneName')
nameRes=merge(conversionTable,as.data.frame(res),by.x=0,by.y=0)
nameResSig=merge(conversionTable,as.data.frame(resSig),by.x=0,by.y=0)
# Print results to file
setwd('C:/RNASeq/DESeq2Output/')
write.table(nameRes, file='ExperimentalvsControl_DEResults.txt',sep='\t',quote=FALSE)
setwd('C:/RNASeq/DESeq2DEGenes/')
write.table(nameResSig, file='ExperimentalvsControl_DE_pAdj0.05.txt',sep='\t',quote=FALSE)
|
bf7bc818abe24a9b4bcf039b9219566060d007e0
|
a59627d4b4d1dd1daa6bbee94f56b1caba590675
|
/FB.R
|
9ef9563ad21bf6e5c16013092eb2d7c73ba5366c
|
[] |
no_license
|
AshleshaSampathi/Exploring-Data-Science-Market-through-Glassdoor
|
10b4406e2712ff34712fff8ddbfe95f374430c84
|
53c3a310a98b389adfb395708f04ec48e01d3883
|
refs/heads/master
| 2021-04-02T17:48:26.755765
| 2020-03-18T17:53:56
| 2020-03-18T17:53:56
| 248,302,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,077
|
r
|
FB.R
|
library(tidyverse)
library(tidytext)
library(tm)
library(wordcloud)
library(udpipe)
library(lattice)
#Stemming packages
library(SnowballC)
#library(hunspell)
#library(proustr)
fb = read.csv("C:\\Users\\ashle\\Downloads\\fb_reviews_csv.csv", header = TRUE)
colnames(fb)
fb= fb %>%
gather(pros, cons, key = "review_sentiment", value = "review",-heading)
summary(fb$review_sentiment)
fbreviews = fb%>%
select(review,review_sentiment)
fbreviews$review= as.character(fbreviews $review)
tidy_dataset = fbreviews %>%
unnest_tokens(word, review)
tidy_dataset %>%
count(word) %>%
arrange(desc(n))
colnames(tidy_dataset)
data("stop_words")
tidy_dataset2 = tidy_dataset %>%
anti_join(stop_words)
### Compare the count of top words from above ###
tidy_dataset2 %>%
count(word) %>%
arrange(desc(n))
patterndigits = '\\b[0-9]+\\b'
tidy_dataset2$word = tidy_dataset2$word %>%
str_replace_all(patterndigits, '')
tidy_dataset2$word = tidy_dataset2$word %>%
str_replace_all('[:space:]', '')
tidy_dataset3 = tidy_dataset2 %>%
filter(!(word == ''))
tidy_dataset3 %>%
count(word) %>%
arrange(desc(n))
tidy_dataset4 = tidy_dataset3 %>%
mutate_at("word", funs(wordStem((.), language="en")))
tidy_dataset4 %>%
count(word) %>%
arrange(desc(n))
frequency2 = tidy_dataset4 %>%
count(word) %>%
arrange(desc(n)) %>%
mutate(proportion = (n / sum(n)*100)) %>%
filter(proportion >= 0.5)
ggplot(frequency2, aes(x = proportion, y = word)) +
geom_abline(color = "gray40", lty = 2) +
geom_jitter(alpha = 0.1, size = 2.5, width = 0.3, height = 0.3) +
geom_text(aes(label = word), check_overlap = TRUE, vjust = 1.5) +
scale_color_gradient(limits = c(0, 0.001), low = "darkslategray4", high = "gray75") +
theme(legend.position="none") +
labs(y = 'Word', x = 'Proportion')
positive = tidy_dataset4 %>%
filter(sentiment == "pros")
wordcloud(positive[,2],
max.words = 100,
random.order=FALSE,
rot.per=0.30,
use.r.layout=FALSE,
colors=brewer.pal(9, "Greens"))
positive2 = positive %>%
count(word, sort = TRUE) %>%
rename(freq = n) %>%
top_n(21)
colourCount = length(unique(positive2$word))
getPalette = colorRampPalette(brewer.pal(9, "Set1"))
positive2 %>%
mutate(word = reorder(word, freq)) %>%
ggplot(aes(x = word, y = freq)) +
geom_col(fill = getPalette(colourCount)) +
coord_flip()
negative = tidy_dataset4 %>%
filter(sentiment == "cons")
wordcloud(negative[,2],
max.words = 100,
random.order=FALSE,
rot.per=0.30,
use.r.layout=FALSE,
colors=brewer.pal(9, "Reds"))
negative2 = negative %>%
count(word, sort = TRUE) %>%
rename(freq = n) %>%
top_n(21)
colourCount = length(unique(negative2$word))
getPalette = colorRampPalette(brewer.pal(8, "Dark2"))
negative2 %>%
mutate(word = reorder(word, freq)) %>%
ggplot(aes(x = word, y = freq)) +
geom_col(fill = getPalette(colourCount)) +
coord_flip()
nrc_joysad = get_sentiments('nrc') %>%
filter(sentiment == 'joy' |
sentiment == 'sadness')
nrow(nrc_joysad)
(tweet_joysad = tidy_dataset4 %>%
inner_join(nrc_joysad,by='word') %>%
count(word, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(contentment = joy - sadness, linenumber = row_number()) %>%
arrange(desc(contentment)))
(tweet_joysad2 = tweet_joysad %>%
slice(1:10,24:34))
ggplot(tweet_joysad2, aes(x=linenumber, y=contentment, fill=word)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Index Value',
y='Contentment'
) +
theme(
legend.position = 'bottom',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10)
) +
geom_col()
nrc_trstfear = get_sentiments('nrc') %>%
filter(sentiment == 'trust' |
sentiment == 'fear')
nrow(nrc_trstfear)
(tweet_trstfear = tidy_dataset4 %>%
inner_join(nrc_trstfear) %>%
count(word, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(trustworthy = trust - fear, linenumber = row_number()) %>%
arrange(desc(trustworthy))%>%
slice(1:10,53:63))
ggplot(tweet_trstfear, aes(x=linenumber, y=trustworthy, fill=word)) +
coord_flip() +
theme_light(base_size = 15) +
labs(
x='Index Value',
y='Trustworthiness'
) +
theme(
legend.position = 'bottom',
panel.grid = element_blank(),
axis.title = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.text.y = element_text(size = 10)
) +
geom_col()
|
86d831853dd3673cabd28851f39afc3ab701b962
|
b23e8b8fa4f70d915f0c8d3c6ed57ba2d2ae4ee6
|
/GDM_Workflow.R
|
a46687813e5705a703fc0117a13493895d9d84c5
|
[] |
no_license
|
cwarecsiro/gdmEngine
|
99195eefc1369165051ce8985f5954369358664c
|
4fd57e609f7411ebb989b0987e6b10d33af63ae4
|
refs/heads/master
| 2021-07-15T23:33:37.415368
| 2018-11-23T00:06:05
| 2018-11-23T00:06:05
| 137,005,799
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,100
|
r
|
GDM_Workflow.R
|
###########################################################################################
##
## Generailsed Dissimilarity Modelling workflow
##
## Karel Mokany et al, (CSIRO) - Draft 30 August 2017
##
###########################################################################################
# Install libraries
# Load libraries
library(gdmEngine)
library(ALA4R)
library(raster)
#library(data.table)
#library(dplyr)
#library(magrittr)
#library(plyr)
#library(assertthat)
#library(spatstat)
## ESTABLISH KEY INPUTS ------------------------------------------------------------------##
# Read in a spatial raster specifying the domain and resolution to be modelled
Aus.domain.mask <- raster("//ces-10-cdc/OSM_CDC_GISDATA_work/AUS0025/CLIM/MASK/MASK0.flt")
# SPECIFY ALA DATA FILTERING THRESHOLDS
data.start.year = 1970
location.uncertainty.limit = 2000
# Specify Environmental layers
climate.files <- list.files(path = "//lw-osm-02-cdc/OSM_CBR_LW_R51141_GPAA_work/ENV/A/OUT/1990", full.names=TRUE, pattern = ".flt")
terrain.files <- list.files(path = "//lw-osm-02-cdc/OSM_CBR_LW_R51141_GPAA_work/ENV/A/OUT/LAND", full.names=TRUE, pattern = ".flt")
#soil.files <- list.files(path = "//lw-osm-02-cdc/OSM_CBR_LW_HCAS_work/HCAS2.0/HCAS2.0a/ENV/SOIL/TOP", full.names=TRUE, pattern = ".flt")
soil.files <- list.files(path = "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/env/SOIL/TOP", full.names=TRUE, pattern = ".flt")
env.files <- c(climate.files, terrain.files, soil.files)
env.files <- env.files[(substr(env.files, nchar(env.files)-3, nchar(env.files)) == ".flt")] # to remove some arcmap filenames
env.files <- env.files[-c(3,11,12,26,29,30,31,32,33,34,37,38,39,40)] # remove grids we don't want to assess in the modelling
env.stk <- stack(env.files, quick=TRUE) #env.stk <- stack(env.files)
# PLANTS INPUTS
species.names.file <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/vascular_plants/APC_and_Orchid_SpeciesNames.csv"
species.names <- read.csv(species.names.file)
species.names <- as.character(species.names[,1])
species.records.folder <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/vascular_plants"
species.records.folder.raw <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/vascular_plants/raw_files"
data.processing.folder <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/vascular_plants"
agg.cell.rad <- 2.25
min.rich.limit <- 10
max.rich.limit <- 400
min.rich.rad <- 200
min.rich.proportion <- 0.25
n.pairs.model <- 144000 # equates to each site used 10 times
train.proportion <- 0.8
n.pairs.test <- 36000 # equates to each site used 10 times
# AMPHIBIANS INPUTS
species.names.file <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/amphibians/AFD-20171211T130458.csv"
species.names <- read.csv(species.names.file)
species.names <- paste(species.names$GENUS, species.names$SPECIES)
species.names <- unique(species.names)
species.records.folder <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/amphibians"
species.records.folder.raw <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/amphibians/raw_files"
data.processing.folder <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/amphibians"
agg.cell.rad <- 2.25
min.rich.limit <- 2
max.rich.limit <- 50
min.rich.rad <- 200
min.rich.proportion <- 0.25
n.pairs.model <- 67000 # equates to each site used 10 times
train.proportion <- 0.8
n.pairs.test <- 17000 # equates to each site used 10 times
# LAND SNAIL INPUTS
species.names.file <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/land_snails/AusLandSnails_ALASpeciesList_9Mar18.csv"
species.names <- read.csv(species.names.file)
species.names <- species.names$Species.Name
species.names <- unique(species.names)
species.records.folder <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/land_snails"
species.records.folder.raw <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/land_snails/raw_files"
data.processing.folder <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/land_snails"
agg.cell.rad <- 2.25
min.rich.limit <- 2
max.rich.limit <- 50
min.rich.rad <- 50
min.rich.proportion <- 0.25
n.pairs.model <- 14000 # equates to each site used 10 times
train.proportion <- 0.8
n.pairs.test <- 3500 # equates to each site used 10 times
# REPTILE INPUTS
species.names.file <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/reptiles/AFD-20171211T113438.csv"
species.names <- read.csv(species.names.file)
species.names <- paste(species.names$GENUS, species.names$SPECIES)
species.names <- unique(species.names)
species.records.folder <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/reptiles"
species.records.folder.raw <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/source/biol/reptiles/raw_files"
data.processing.folder <- "//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/reptiles"
agg.cell.rad <- 2.25
min.rich.limit <- 3
max.rich.limit <- 50
min.rich.rad <- 200
min.rich.proportion <- 0.25
n.pairs.model <- 60500 # equates to each site used 10 times
train.proportion <- 0.8
n.pairs.test <- 15000 # equates to each site used 10 times
## DOWNLOAD BIOLOGICAL DATA FROM ALA -----------------------------------------------------##
# Download the species records from ALA
download_taxalist(specieslist = species.names,
dst = species.records.folder)
## MERGE THE BIOLOGICAL DATA FOR EACH SPECIES --------------------------------------------##
All.records <- merge_downloads(src=species.records.folder.raw,
output.folder = data.processing.folder,
parallel = FALSE)
## FILTER THE BIOLOGICAL DATA ------------------------------------------------------------##
Filtered.records <- filter_ALA_data(ALA.download.data = All.records$data,
output.folder = data.processing.folder,
domain.mask = Aus.domain.mask,
earliest.year = data.start.year,
spatial.uncertainty.m = location.uncertainty.limit)
## AGGREGATE THE BIOLOGICAL DATA TO GRID CELLS -------------------------------------------##
Aggregated.records <- aggregate_ALA_data(ALA.filtered.data = Filtered.records,
domain.mask = Aus.domain.mask,
agg.radius.ncells = agg.cell.rad,
output.folder = data.processing.folder)
## REFINE BIOLOGICAL DATA TO GRID CELLS CONTAINING SUITABLE NUMBERS OF SPECIES RECORDS----##
Selected.records <- select_gridcells_composition(ALA.aggregated.data = Aggregated.records ,
domain.mask = Aus.domain.mask,
min.richness.threshold = min.rich.limit,
max.richness.threshold = max.rich.limit,
reference.radius.ncells = min.rich.rad,
min.proportion.max.richness = min.rich.proportion,
output.folder = data.processing.folder)
## EXTRACT ENVIRONMENTAL DATA FOR SELECTED GRID CELLS ------------------------------------##
Site.Env.Data <- extract_env_data(ALA.composition.data = Selected.records,
environment.stk = env.stk,
output.folder = data.processing.folder)
##TEMP##
#AMPHIBIANS -------
Selected.records <- read.csv("//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/amphibians/selected_gridcell_composition_2018-03-05.csv")
Site.Env.Data <- read.csv("//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/amphibians/site_env_data_2018-06-04.csv")
#VASCULAR PLANTS -------
#Selected.records <- read.csv("//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/vascular_plants/selected_gridcell_composition_2018-03-07.csv")
#Site.Env.Data <- read.csv("//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/vascular_plants/site_env_data_2018-05-31.csv")
#LAND SNAILS -------
Selected.records <- read.csv("//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/land_snails/selected_gridcell_composition_2018-03-09.csv")
Site.Env.Data <- read.csv("//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/land_snails/site_env_data_2018-06-04.csv")
#Reptiles ---------
Selected.records <- read.csv("//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/reptiles/selected_gridcell_composition_2018-03-15.csv")
Site.Env.Data <- read.csv("//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/reptiles/site_env_data_2018-06-04.csv")
##ENDTEMP##
## DERIVE A GDM ------------------------------------------------------------------------------------##
ptm <- proc.time()
GDM.Selection <- gdm_builder(site.env.data = Site.Env.Data,
composition.data = Selected.records,
geo=FALSE,
n.pairs.train = n.pairs.model,
n.pairs.test = n.pairs.test,
correlation.threshold = 0.8,
selection.metric = 'D2',
sample.method = 'geowt',
Indiv.Dev.Explained.Min = 1.0,
n.predictors.min = 5,
domain.mask=Aus.domain.mask,
pcs.projargs="+init=epsg:3577",
bandwidth.geowt=150000,
bandwidth.skip=2,
bandwidth.DistFact=1,
geowt.RndProp=0.05,
output.folder = data.processing.folder,
output.name = "gdm_mod_builder_results_GeowtSamp_NoGeo_V2")
proc.time() - ptm
## SELECT A SET OF PREDICTORS FOR A GDM & APPLY SIGNIFICANCE TEST -----------------------------------##
final.mod.preds <- GDM.Selection$Mean.Final.GDM$predictors
geo.in = FALSE
if(final.mod.preds[1] == 'Geographic')
{
final.mod.preds <- final.mod.preds[-1]
geo.in = TRUE
}# end if final.mod.preds[1] == 'Geographic'
# or specify directly, for example:
# final.mod.preds <- c('WDA','TXM','PTX','ELVR1000','SNDT','ECET','TNI','PTOT') #PLANTS
final.mod.preds <- c('TXM','EAAS','TRI','ECET','ELVR1000','SNDT') #AMPHIBIANS
final.mod.preds <- c('WDA','EAAS','TRA','EPI','BDWT','ELVR1000','CLYT') #geo.in=TRUE #LANDSNAILS
final.mod.preds <- c('EPA','ADI','PTX','TRA','PHCT','NTOT','EPI','ELVR1000','SNDT') #REPTILES
## ASSUMING YOU'RE HAPPY WITH A SET OF PREDICTORS, FIT A FINAL MODEL, INCLUDING CROSS-VALIDATION
## ASSESSMENT AND SIGNIFICANCE TEST -----------------------------------------------------------------##
final.model.test <- gdm_build_single_model(site.env.data = Site.Env.Data,
composition.data = Selected.records,
predictor.names = final.mod.preds,
geo=geo.in,
n.pairs.train = n.pairs.model,
n.pairs.test = n.pairs.test,
sample.method = 'geowt',
b.used.factor=2,
domain.mask=Aus.domain.mask,
pcs.projargs="+init=epsg:3577",
bandwidth.geowt=150000,
bandwidth.skip=2,
bandwidth.DistFact=1,
geowt.RndProp=0.05,
output.folder = file.path(data.processing.folder,"Final_GDM"),
output.name = "gdm_build_FinMod_amphibians")
# ## SIGNIFICANCE TEST
# ptm <- proc.time()
# gdm_ext.sigtest(dllpath="//ces-10-cdc/OSM_CDC_MMRG_work/users/bitbucket/gdm_workflow/GDM_EXT_For_Karel/GDM4Rext.dll",
# wdpath = data.processing.folder,
# datatable = "//osm-23-cdc/OSM_CBR_LW_DEE_work/processing/biol/vascular_plants/gdm_builder_FinMod_RandSamp_GDMtable_2018-05-10.csv", # GDM input table saved to .csv
# outname = "FinMod_RandSamp_sig_test",
# iterations = 100,
# do_geo = TRUE)
# proc.time() - ptm
# Load a model
load(file.path(data.processing.folder,"Final_GDM","gdm_build_FinMod_land_snails_2018-06-12.Rdata"))
final.gdm <- GDM_Final_Model$Mean.Final.GDM
## NOW TRANSFORM THE GRIDS BASED ON THE SELECTED MODEL ----------------------------------------------##
final.gdm <- final.model.test$Mean.Final.GDM
TransformGrids(gdm.model=final.gdm,
env.grids.stk=env.stk,
extrap.method = 'Conservative',
output.folder = file.path(data.processing.folder,"Final_GDM"))
## And plot the transformed grids to see if there are any spatial issues with the model projections
trans.grids <- list.files(path = file.path(data.processing.folder,"Final_GDM"), full.names=TRUE, pattern = ".flt")
for(i in 1:length(trans.grids))
{
next.ras <- raster(trans.grids[i])
png(paste0(file.path(data.processing.folder,"Final_GDM"),"/plot_",names(next.ras),".png"),height=1000,width=1000)
plot(next.ras)
dev.off()#___
} # end for i
## Create a plot of the main axes of compositional variation from the GDM transformed layers
trans.grids <- list.files(path = file.path(data.processing.folder,"Final_GDM"), full.names=TRUE, pattern = ".flt")
trans.grid.stk <- stack(trans.grids)
rastDat <- sampleRandom(trans.grid.stk, 200000)
pcaSamp <- prcomp(rastDat)
pcaRast <- predict(trans.grid.stk, pcaSamp, index=1:3)
# scale rasters
pcaRast[[1]] <- (pcaRast[[1]]-pcaRast[[1]]@data@min) / (pcaRast[[1]]@data@max-pcaRast[[1]]@data@min)*255
pcaRast[[2]] <- (pcaRast[[2]]-pcaRast[[2]]@data@min) / (pcaRast[[2]]@data@max-pcaRast[[2]]@data@min)*255
pcaRast[[3]] <- (pcaRast[[3]]-pcaRast[[3]]@data@min) / (pcaRast[[3]]@data@max-pcaRast[[3]]@data@min)*255
plotRGB(pcaRast, r=1, g=2, b=3)
# KM - rescale pca axes to represent their contribution
PC1.rng <- max(pcaSamp$x[,1]) - min(pcaSamp$x[,1])
PC2.rng <- max(pcaSamp$x[,2]) - min(pcaSamp$x[,2])
PC3.rng <- max(pcaSamp$x[,3]) - min(pcaSamp$x[,3])
PC2.scl <- PC2.rng/PC1.rng
PC3.scl <- PC3.rng/PC1.rng
pcaRast[[2]] <- 255 - (pcaRast[[2]] * PC2.scl)
pcaRast[[3]] <- 255 - (pcaRast[[3]] * PC3.scl)
plotRGB(pcaRast, r=1, g=2, b=3)
## ADITIONAL STUFF ##-----------------------???----------------------------------------------##
## Assess sitepair samples --------------------------------------------------------------------------##
SitePairOut <- sitepair_sample_assessor(site.env.data = Site.Env.Data,
composition.data = Selected.records ,
n.pairs.train = n.pairs.model,
sample.method = 'random',
domain.mask=Aus.domain.mask,
pcs.projargs="+init=epsg:3577",
output.folder = data.processing.folder,
output.name = "sitepair_assess_amph")
#Random sample
Pairs.Table.Rnd <- sitepair_sample_random(site.env.data = Site.Env.Data,
n.pairs.target = n.pairs.model)
#Geographic distance based sample
Pairs.Table.Geo <- sitepair_sample_geographic(site.env.data = Site.Env.Data,
n.pairs.target = n.pairs.model,
a.used=0.05,
b.used.factor=2,
c.used=3,
a.dpair=0.05,
b.dpair.factor=2,
c.dpair=3)
# Environmental distance based sample
Pairs.Table.Env <- sitepair_sample_environment(site.env.data = Site.Env.Data,
n.pairs.target = n.pairs.model,
env.colnames = c('PTA','TXX'),
b.used.factor=2,
c.used=3,
b.epair.factor=1,
c.epair=3)
# Neighbourhood site-density based sample (weighted towards less sampled areas)
Pairs.Table.Dens <- sitepair_sample_density(site.env.data = Site.Env.Data,
n.pairs.target = n.pairs.model,
domain.mask = Aus.domain.mask,
a.used=0.05,
b.used.factor=2,
c.used=3,
sigma.spair=0.5,
a.spair=0.05,
b.spair.factor=1.0,
c.spair=1)
|
1933de62d50fa0b8c6efdd94543f9b25e36374c0
|
f185c837110671db3e2632110d14d4d160d0eae5
|
/ScriptLoadData.r
|
4d073a3330667f34c39fe7747c767d8624fdb70e
|
[] |
no_license
|
irycisBioinfo/ResCap
|
eca0df674a21db126d74565ddecd8298b242b403
|
4fcffccb308849d6f871f6589e03f09337ff0a28
|
refs/heads/master
| 2020-04-08T12:37:40.659752
| 2017-05-08T13:34:23
| 2017-05-08T13:34:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 550
|
r
|
ScriptLoadData.r
|
#####
#
# R script to load count data using tidyverse package.
# All count files must be in the R working directory
#
####
library("tidyverse")
lista = dir()[grep("csv",dir())]
j=0
for(i in lista)
{
tabla = read_delim(i, delim = "\t", col_names = FALSE)
colnames(tabla) = c("Gene","Reads","RPK","Uniq","Coverage")
tabla$Sample = i
if(j > 0)
{
Full.table = bind_rows(Full.table,tabla)
}else{
Full.table = tabla
}
j=j+1
}
Full.table = Full.table %>% separate(Sample, c("Sample","DataSet","kk"), sep ="\\.") %>% select(-kk)
|
12a75a47ef2ea9debaed4fe8d50030a672e903f6
|
982e2e9fd6cc6415196b34454eec2ab3edd00785
|
/man/trackgroupout.Rd
|
0fcc1fe4ad63e6ed6ece65b2b77f5ab2bf1bdde4
|
[] |
no_license
|
allen-chen-noaa-gov/sur.kmeans
|
810f930ef3ef7be53c14590ba695e3e2a473d08d
|
04ab91c08d4110ccefd733eb74a93c4c58736f69
|
refs/heads/master
| 2021-07-15T18:30:09.058362
| 2020-09-22T06:17:37
| 2020-09-22T06:17:37
| 212,443,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 463
|
rd
|
trackgroupout.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trackgroupout.r
\name{trackgroupout}
\alias{trackgroupout}
\title{Algorithm to move into closest group}
\usage{
trackgroupout(FullTableFull, pooledvcov, testvars, nontestvars, sigmai,
betai, betaWFE, TrackGroup, YYlist, XXlist)
}
\arguments{
\item{FullTableFull}{data}
}
\value{
New groupings
}
\description{
Algorithm to move into closest group
}
\examples{
}
|
4f1339919eb7cb0e243446a103d2d0d4388d97ce
|
def0ab934ebe0d3b8593253d02e591ab428d199b
|
/cachematrix.R
|
cd139fe5424f0683a158c68a627bc8d8f6f2f0ed
|
[] |
no_license
|
JakeNel/ProgrammingAssignment2
|
7271ccd1cb3db47ec2420cff28d1890826ade4b6
|
3fd00486712adb00919eac63a75c461288e55187
|
refs/heads/master
| 2021-01-21T06:10:59.871212
| 2015-10-23T17:13:57
| 2015-10-23T17:13:57
| 44,654,277
| 0
| 0
| null | 2015-10-21T05:17:29
| 2015-10-21T05:17:29
| null |
UTF-8
|
R
| false
| false
| 1,131
|
r
|
cachematrix.R
|
##makeCacheMatrix works to both store the value of a matrix
##as well as it's inverse. cacheSolve calculates the inverse of a matrix and
##stores the value in makeCacheMatrix.
##This function stores four other functions in a list, all of which set or retrieve
##either the matrix or its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) mx <<- inverse
getinverse <- function() x
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
##First, this function checks to see if an inverse was already calculated,
##in which case it returns the inverse that was previously
##calculated without recalculation. Otherwise, it calculates the inverse and
##and stores the value in makeCacheMatrix()
##x must be set to the value of makeCacheMatrix(); x <- makeCacheMatrix()
cacheSolve <- function() {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
print(data)
m <- solve(data) %*% data
x$setinverse(m)
m
}
|
6471f30d9666ba49f1ec4d3a4aebfd813072af6d
|
773217982af4a02563a259ef2087c3a412c18741
|
/global.R
|
fc52db978faa34f66f603a2b6e45139167bb2bd7
|
[] |
no_license
|
cndesantana/DashboardCorreria
|
afb71fa87b27f8f1d690101b5847f36004c3b1a3
|
a5c1a9d9c29c3c2b8fe99072cfa9301ee2672179
|
refs/heads/master
| 2021-10-10T10:59:37.332773
| 2019-01-09T22:57:03
| 2019-01-09T22:57:03
| 113,132,380
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,876
|
r
|
global.R
|
library(shiny)
library(shinySignals)
library(dplyr)
library(bubbles)
library(stringr)
library(quanteda)
library(readtext)
library(ggplot2)
library(tidyr)
library(shinydashboard)
library(shinyFiles)
library(devtools)
library(Rfacebook)
library(lubridate)
library(stylo)
library(tidytext)
library(tm)
#library(wordcloud)
library(xlsx)
library(gdata)
library(readxl)
library(htmlwidgets)
library(httr)
library(devtools)
library(dplyr)
library(gdata)
library(ggplot2)
library(ggraph)
library(htmlwidgets)
library(httr)
library(igraph)
library(jpeg)
library(lubridate)
library(ngram)
library(plotrix)
library(quanteda)
library(readtext)
library(readxl)
library(reshape2)
library(Rfacebook)
library(RWeka)
library(scales)
library(shiny)
library(shinydashboard)
library(shinyFiles)
library(shinySignals)
library(stringr)
library(stylo)
library(tidyr)
library(tidytext)
library(tm)
library(tokenizers)
library(xlsx)
source("bloomfilter.R")
getFBID <- function(fburl){
return(unlist(strsplit(httr::POST(url='https://findmyfbid.com',body=list(url = fburl), encode="json")$headers$`amp-redirect-to`,'/'))[5])
}
#workdir <- "/home/cdesantana/DataSCOUT/Objectiva/PapoCorreria/dashboard"
getIndiceDeSentimentoReactions <- function(reactions){
reacoes <- toupper(reactions)
allreacoes <- c("LOVE","HAHA","ANGRY","SAD");
ml <- length(which(sentimentos==allsentimentos[1]));#Positivo
mh <- length(which(sentimentos==allsentimentos[2]));#Positivo
ma <- length(which(sentimentos==allsentimentos[3]));#Negativo
ms <- length(which(sentimentos==allsentimentos[4]));#Negativo
mt <- ml + mh + ma + ms;#Total
indicesentimento <- as.numeric((ml + mh - ma - ms)/mt)
return(indicesentimento)
}
#workdir <- "/srv/shiny-server/cns/BadogueExcel"
#workdir <- "/home/cdesantana/DataSCOUT/Objectiva/BadogueExcel"
workdir <- system("cat ~/auxfiles/myworkdir",intern=TRUE)
setwd(workdir)
badwords<-c("boa","scontent.xx.fbcdn.net","https","oh","oe","pra","v","como","para","de","do","da","das","dos","isso","esse","nisso","nesse","aquele","nesses","aqueles","aquela","aquelas","que","q","mais","com","está","por","uma","tem","vai","pelo","meu","sobre","não","já","nos","sem","quando","xed","xbd","ser","xbe","xa0","x8f","xb9","xb2","xb0","xb1","xb8","x8c","xa3","xbc","xaa","www.youtube.com","scontent.xx.fbcdn.net","https","oh","oe","pra","v","como","para","de","do","da","das","dos","isso","esse","nisso","nesse","aquele","nesses","aqueles","aquela","aquelas","que","q","é","sr","senhor","comentário","perfil","r","que","nao","sim","comentário","feito","comentario","imagem","secretaria","foi","photos","http","bit.ly","sou","mais","vídeo","timeline","video","er","enem","soumais","maisbahia","pmba","concurso","tres","mil","quinhentos","convocacao","convocação","convoca","500","habilitados","chama","convocados","PM","aprovados","Concurso","chamados","foram","serão","serao","pmba","etapas","restantes","elimintatórias","eliminatorias","convocou","apenas","essa","bata","tres","dois","conta","eliminatorias","eliminatórias")
getTidySentimentos <- function(file){
polaridade <- toupper(file$Polaridade)
text <- file$Conteúdo
myCorpus <- corpus(text)
metadoc(myCorpus, "language") <- "portuguese"
tokenInfo <- summary(myCorpus)
kwic(myCorpus, "gestor")
myStemMat <- dfm(myCorpus, remove = stopwords("portuguese"), stem = TRUE, remove_punct = TRUE)
byPolaridadeDfm <- dfm(myCorpus, groups = polaridade, remove = c(stopwords("portuguese"),badwords), remove_punct = TRUE)
ap_td <- tidy(byPolaridadeDfm)
names(ap_td) <- c("sentimento","term","count")
return(ap_td);
}
getDFMatrix <- function(text){
myCorpus <- corpus(text)
metadoc(myCorpus, "language") <- "portuguese"
tokenInfo <- summary(myCorpus)
kwic(myCorpus, "gestor")
myStemMat <- dfm(myCorpus, remove = stopwords("portuguese"), stem = TRUE, remove_punct = TRUE)
mydfm <- dfm(myCorpus, remove = c(stopwords("portuguese"),badwords), remove_punct = TRUE, remove_numbers= TRUE)
return(mydfm)
# ap_td <- tidy(mydfm)
# names(ap_td) <- c("sentimento","term","count")
# return(ap_td);
}
options(shiny.fullstacktrace = TRUE)
# An empty prototype of the data frame we want to create
prototype <- data.frame(date = character(), time = character(),
size = numeric(), r_version = character(), r_arch = character(),
r_os = character(), package = character(), version = character(),
country = character(), ip_id = character(), received = numeric())
# Connects to streaming log data for cran.rstudio.com and
# returns a reactive expression that serves up the cumulative
# results as a data frame
packageStream <- function(session) {
# Connect to data source
sock <- socketConnection("cransim.rstudio.com", 6789, blocking = FALSE, open = "r")
# Clean up when session is over
session$onSessionEnded(function() {
close(sock)
})
# Returns new lines
newLines <- reactive({
invalidateLater(1000, session)
readLines(sock)
})
# Parses newLines() into data frame
reactive({
if (length(newLines()) == 0)
return()
read.csv(textConnection(newLines()), header=FALSE, stringsAsFactors=FALSE,
col.names = names(prototype)
) %>% mutate(received = as.numeric(Sys.time()))
})
}
# Accumulates pkgStream rows over time; throws out any older than timeWindow
# (assuming the presence of a "received" field)
packageData <- function(pkgStream, timeWindow) {
shinySignals::reducePast(pkgStream, function(memo, value) {
rbind(memo, value) %>%
filter(received > as.numeric(Sys.time()) - timeWindow)
}, prototype)
}
# Count the total nrows of pkgStream
downloadCount <- function(pkgStream) {
shinySignals::reducePast(pkgStream, function(memo, df) {
if (is.null(df))
return(memo)
memo + nrow(df)
}, 0)
}
# Use a bloom filter to probabilistically track the number of unique
# users we have seen; using bloom filter means we will not have a
# perfectly accurate count, but the memory usage will be bounded.
userCount <- function(pkgStream) {
# These parameters estimate that with 5000 unique users added to
# the filter, we'll have a 1% chance of false positive on the next
# user to be queried.
bloomFilter <- BloomFilter$new(5000, 0.01)
total <- 0
reactive({
df <- pkgStream()
if (!is.null(df) && nrow(df) > 0) {
# ip_id is only unique on a per-day basis. To make them unique
# across days, include the date. And call unique() to make sure
# we don't double-count dupes in the current data frame.
ids <- paste(df$date, df$ip_id) %>% unique()
# Get indices of IDs we haven't seen before
newIds <- !sapply(ids, bloomFilter$has)
# Add the count of new IDs
total <<- total + length(newIds)
# Add the new IDs so we know for next time
sapply(ids[newIds], bloomFilter$set)
}
total
})
}
|
0118a31c2065272373ff979e97d246f5a4bffb49
|
dff4be894fcfabd2b7a201a0372170bd865a9e3e
|
/R/wine.R
|
45d3add11764f3913b30cb68c4552eb107b0fc43
|
[] |
no_license
|
zhaozhg81/AIXZ
|
ba106545a1aae22984dcbb4453318e63b7262f1a
|
49f835bf9d2ca9c9642ea3a40eb36ec925172f57
|
refs/heads/master
| 2023-05-04T20:19:10.771017
| 2023-04-17T14:51:10
| 2023-04-17T14:51:10
| 213,441,232
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,123
|
r
|
wine.R
|
library(fields)
library(LassoSIR)
library(Rdimtools)
wine <- read.table("./data/wine.data",sep=",")
## Pairwise scatterplot
pairs(~., data=wine[,2:14])
## Correlation
wine.cor <- cor( wine[,2:14] )
breaks <- seq(from=-0.9,to=0.9,length=65)
hmcols <- tim.colors(64)
image.plot(x=1:13, y=1:13, z = wine.cor, zlim = c(-0.9,0.9),col=tim.colors(64),breaks=breaks,
xlab="pariwise correlation",ylab="", cex.lab=1.5)
## Scatter plot.
plot(wine$V4, wine$V5)
text(wine$V4, wine$V5, wine$V1, cex=0.7, pos=4, col="red")
stand.concent <- as.matrix(scale(wine[2:14])) # standardise the variables
wine.pca <- princomp(stand.concent) # do a PCA
summary( wine.pca )
## Use the matrix to calculate the principle component analysis
S <- t(stand.concent)%*%stand.concent/177
eigen(S)$vectors[,1]
## postscript("wine_scree.eps", horizontal=FALSE)
screeplot(wine.pca, type="lines")
## dev.off()
y <- array(0, c(178, 3) )
y[1:59,1] <- 1
y[60:130,2] <- 1
y[131:178,3] <- 1
colnames(y)= c("cult1", "cult2", "cult3")
full.data <- data.frame(y, stand.concent)
library(VGAM)
base.logit <- vglm( cbind(cult1, cult2, cult3) ~ .-cult1-cult2-cult3 , data=full.data, fam=multinomial(parallel=TRUE) )
summary( base.logit)
AIC( base.logit )
lrtest( base.logit )
pca.data <- data.frame(y, wine.pca$scores[,1:5] )
base.logit.pca <- vglm( cbind(cult1, cult2, cult3) ~ Comp.1 + Comp.2 , data=pca.data, fam=multinomial(parallel=TRUE) )
summary(base.logit.pca)
AIC(base.logit.pca)
## postscript("wine_scatter_pca.eps",horizontal=FALSE)
plot(wine.pca$score[,1],wine.pca$score[,2]) # make a scatterplot
text(wine.pca$score[,1],wine.pca$score[,2], wine$V1, cex=0.7, pos=4, col="red") # add labels
## dev.off()
#### LDA
library(MASS)
wine.lda <- lda(V1 ~ V2 + V3 + V4 + V5 + V6 + V7 + V8 + V9 + V10 + V11 + V12 + V13 + V14, data=wine)
wine.lda2 <- lda(V1 ~ .-V1, data=wine )
wine.lda.values <- predict(wine.lda, wine[2:14])
wine.lda.values$x
## A stacked Histogram of the LDA values
## postscript("stack_hist_lda_1.eps", horizontal=FALSE)
ldahist(data = wine.lda.values$x[,1], g=wine$V1)
## dev.off()
## postscript("stack_hist_lda_2.eps", horizontal=FALSE)
ldahist(data = wine.lda.values$x[,2], g=wine$V1)
## dev.off()
## postscript("wine_lda_scatterplot.eps",horizontal=FALSE)
plot(wine.lda.values$x[,1],wine.lda.values$x[,2]) # make a scatterplot
text(wine.lda.values$x[,1],wine.lda.values$x[,2],wine$V1,cex=0.7,pos=4,col="red") # add labels
## dev.off()
## SIR
sir.res = do.sir( as.matrix( wine[,2:14],nrow=178, ncol=13), as.matrix(wine[,1],nrow=178, ncol=1), ndim=2 )
plot(sir.res$Y[,1], sir.res$Y[,2], ) # make a scatterplot
text(sir.res$Y[,1], sir.res$Y[,2], wine$V1, cex=0.7, pos=4, col="red") # add labels
## LassoSIR
wine.lasso.sir <- LassoSIR(as.matrix( wine[,2:14],nrow=178, ncol=13), as.matrix(wine[,1],nrow=178, ncol=1), solution.path = FALSE,
categorical = TRUE, nfolds = 10, no.dim=2)
X = as.matrix(wine[,2:14], nrow=178, ncol=13 )
x.comb = X%*%wine.lasso.sir$beta
plot(x.comb[,1], x.comb[,2])
text(x.comb[,1], x.comb[,2], wine$V1, cex=0.7, pos=4, col="red") # add labels
|
509e972f44db2ec78e1bd12db43766edabb89992
|
11394cd22cea3b4e644d20564ff4b500018d943e
|
/scripts/separateAnalysis/gen_cellphonedb_input.R
|
f25521f035a69c587dcec6018ceeedd3d570d1dd
|
[
"MIT"
] |
permissive
|
shunsunsun/single_cell_rna_seq_snakemake
|
3d153c9cb7db9988917aff38991217a35940aa64
|
f275546eb3bd63d5d535a13407ce47ee36e94cae
|
refs/heads/master
| 2023-04-17T08:44:13.954986
| 2021-04-27T08:13:50
| 2021-04-27T08:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,134
|
r
|
gen_cellphonedb_input.R
|
##https://www.cellphonedb.org/explore-sc-rna-seq
library(Seurat)
library("biomaRt")
library(data.table)
args <- commandArgs(trailingOnly=T)
infile <- args[1]
outprefix <- args[2]
assay <- args[3] #SCT or RNA
se <- readRDS(infile)
counts <- GetAssayData(se, assay=assay, slot="data") #normalized counts
meta <- se@meta.data
rm(se)
ensembl = useMart("ensembl",dataset="hsapiens_gene_ensembl")
genes <- getBM(filters='hgnc_symbol',attributes = c('ensembl_gene_id','hgnc_symbol'),
values = rownames(counts),mart = ensembl)
counts <- counts[rownames(counts) %in% genes$hgnc_symbol,]
counts <- tibble::rownames_to_column(as.data.frame(counts), var = 'hgnc_symbol')
counts <- merge(genes, counts, by="hgnc_symbol")
counts <- counts[,-1]
colnames(counts)[1]="Gene"
fwrite(counts, paste0(outprefix,'_count.txt'), sep='\t', quote=F, row.names=F, col.names=T)
clustRes <- paste0("C", meta[,grepl("_res\\.",colnames(meta))])
meta_data <- cbind(cell=rownames(meta), cell_type=clustRes) ##### cluster is the user’s specific cluster column
fwrite(meta_data, paste0(outprefix,'_meta.txt'), sep='\t', quote=F, row.names=F, col.names=T)
|
1b76d5cccefa70ba29bd97dc255267c3f041d5ef
|
4a0b1ae93f3f67fd1b499a4dd7dcc58659a7151a
|
/inst/examples/example_read_rra.R
|
993600bcefb961ceb81ae8dc4c41a724aa829cb4
|
[
"MIT"
] |
permissive
|
pldimitrov/Rrd
|
3481fa87353dbbe5874ac28c760f267a65e84b75
|
261f3865d1897e2162078efc99f45c3580782381
|
refs/heads/master
| 2021-05-04T09:38:30.689885
| 2018-05-13T22:31:23
| 2018-05-13T22:31:23
| 20,504,007
| 11
| 2
| null | 2018-05-13T22:29:16
| 2014-06-04T22:57:56
|
C
|
UTF-8
|
R
| false
| false
| 765
|
r
|
example_read_rra.R
|
rrd_cpu_0 <- system.file("extdata/cpu-0.rrd", package = "rrd")
# Note that the default end time is the current time (Sys.time())
# However, since the sample data is historic, specify the end time
start_time <- as.POSIXct("2018-05-01") # timestamp with data in example
end_time <- as.POSIXct("2018-05-02") # timestamp with data in example
# read archive by specifying start time
avg_60 <- read_rra(rrd_cpu_0, cf = "AVERAGE", step = 60L,
start = start_time,
end = end_time)
names(avg_60)
head(avg_60)
tail(avg_60)
# read archive by specifying number of rows to retrieve
avg_60 <- read_rra(rrd_cpu_0, cf = "AVERAGE", step = 60L,
n_rows = 5,
end = end_time)
names(avg_60)
avg_60
|
89333cdc3d1d298efa415ee4cac23762acb6185b
|
f39b96268d1379e951ed772147962c7dc109c305
|
/MalloryLaRusso_PCA10.R
|
affa78a8ffa7bc05bb05eb52c858df566c57f31b
|
[] |
no_license
|
mhlarusso/R-class-projects
|
98c8c5544feb9dbd96b764bf7334e53d6143d8d8
|
31810a656a738d69e2ca2c179d6d768c99355b3f
|
refs/heads/master
| 2020-07-30T12:33:29.680100
| 2019-12-10T20:39:27
| 2019-12-10T20:39:27
| 210,236,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 772
|
r
|
MalloryLaRusso_PCA10.R
|
####################################
##Pre-Class Activity 10
##Name: Mallory LaRusso
####################################
#Create an R script that will do the following:
library(readr)
library(dplyr)
riverData <- read_csv("https://raw.githubusercontent.com/jbpost2/IntermediateR/master/datasets/river.csv")
riverData <- riverData %>%
mutate(Size = ifelse(Size_km2 < 15000, "Small",
ifelse(Size_km2 < 100000, "Medium",
ifelse(Size_km2 < 350000, "Sizeable",
ifelse(Size_km2 < 700000, "Large", "Great")))))
riverData$Size <- ordered(riverData$Size, levels = c("Small", "Medium", "Sizeable", "Large", "Great"))
riverData %>% group_by(Size) %>% summarise(mean(Rc, na.rm = TRUE))
|
35af6c5baa391071f4c813275b8ae2365bc16697
|
50f66f58e47bc44ca599fc46826ed70288ac8439
|
/ecoregionMapInStudyAreaRaster.R
|
916834034db307f326a23046d54d82ea7e788948
|
[] |
no_license
|
ianmseddy/landwebNRV
|
a6c400acc0b1b4994afb68e720c594593ce4fbb0
|
88cf31e128c6543bfe8f72d63372f7a247fad8d1
|
refs/heads/master
| 2020-03-31T22:36:04.433059
| 2017-03-20T02:45:17
| 2017-03-20T02:45:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 920
|
r
|
ecoregionMapInStudyAreaRaster.R
|
rm(list=ls())
source('~/GitHub/landwebNRV/landwebNRV/R/ecoregionClassification.R')
studyarea <- readRDS("C:/Users/yonluo/Documents/LandWeb/landwebsimplePoly.rds")
ecoregionMap <- rgdal::readOGR("M:/data/Ecozones/ecozones.shp",
layer = "ecozones")
# this is the first function
dd <- ecoregionClassification(studyAreaMap = studyarea,
ecoregionMap = ecoregionMap,
cellSize = 100)
if(file.exists("~/GitHub/landwebNRV/studyareaecoregion.tif")){
file.remove("~/GitHub/landwebNRV/studyareaecoregion.tif")
}
raster::writeRaster(dd$studyareaecoregion, "~/GitHub/landwebNRV/studyareaecoregion.tif",
overwrite=TRUE)
w <- rgdal::readGDAL("~/GitHub/landwebNRV/studyareaecoregion.tif")
rgdal::writeGDAL(w, "~/GitHub/landwebNRV/studyareaecoregion.tif",
drivername = "GTiff", type = "Int32", mvFlag = 0)
|
f67f763ee08fe09c30499b3fd631d17a15e25e7e
|
cbffed995d684f470a02d1af779f34f7dfef340c
|
/PredictingActivity_SamsungSensor.R
|
cc83c108b33f50e54b70fac907ddc6314e606d11
|
[] |
no_license
|
kjy/Data-Analysis-stuff
|
92a4637087b669c7158e8cd9246ac691769133d7
|
b95889e52e42a0d292056bf90f7c8fe8f82098d0
|
refs/heads/master
| 2021-01-01T17:37:01.448919
| 2013-11-08T18:26:32
| 2013-11-08T18:26:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,939
|
r
|
PredictingActivity_SamsungSensor.R
|
load("/Users/karenyang/Downloads/samsungData.rda")
str(samsungData)
names(samsungData) <- tolower(names(samsungData))
names(samsungData) <- gsub("-", ".", names(samsungData))
names(samsungData) <- gsub(",", ".", names(samsungData))
names(samsungData) <- gsub("\\()", "", names(samsungData))
names(samsungData) <- gsub("\\(", ".", names(samsungData))
names(samsungData) <- gsub("\\)", "", names(samsungData))
names(samsungData)
names(samsungData)[duplicated(names(samsungData))]
samsungData <- data.frame(samsungData)
names(samsungData)
tail(samsungData)
table(samsungData$activity)
hist(samsungData$subject)
table(samsungData$subject)
hist(samsungData$subject)
sum(is.na(samsungData)) # 0 are NA
install.packages("randomForest")
library(randomForest)
install.packages("caret")
library(caret)
library(ElemStatLearn)
install.packages("e1071")
library(e1071)
install.packages("hydroGOF")
library(hydroGOF)
#Dataframes for train, validation, and test data sets
trainset <- samsungData[samsungData$subject%in%c(1,3,5,6,10,13,12,16,20,23), ]
dim(trainset) #2053 563 are the dimensions
str(trainset)
trainset$activity <-as.factor(trainset$activity)
str(trainset$activity)
validset <- samsungData[samsungData$subject%in%c(2,7,8,9,14,17,19,21,24,26), ]
dim(validset) #2440 563
str(validset)
validset$activity <-as.factor(validset$activity)
str(validset$activity)
testset <- samsungData[samsungData$subject%in%c(27,28,29,30,4,11,15,18,22,25), ]
dim(testset) #2859 563 are the dimensions
str(testset)
testset$activity <-as.factor(testset$activity)
str(testset$activity)
levels(testset$activity) #"laying" "sitting" "standing" "walk" "walkdown" "walkup"
sum(is.na(samsungData)) # 0 are NA
#Model 1 Random Forest
ss <- set.seed(456787, kind="default", normal.kind="default")
foresttrain <- randomForest(trainset$activity ~.-subject, data=trainset, prox=TRUE)
foresttrain
varImpPlot(foresttrain, sort=TRUE, pch=19, col = "brown", main="Variable Importance Plot for Random Forest")
legend("topleft", legend = "Sorted by Importance")
rfpredicted <- predict(foresttrain, newdata = validset)
rfpredicted
errorrateRF <- (sum(rfpredicted != validset$activity))/length(validset$activity)
errorrateRF #0.1122951
#Correlations
cor(trainset$tgravityacc.min.x, as.numeric(trainset$activity))#0.6365321
cor(trainset$angle.x.gravitymean, as.numeric(trainset$activity)) #-0.6049978
cor(trainset$tgravityacc.energy.x, as.numeric(trainset$activity)) #0.6318179
cor(trainset$tgravityacc.mean.x, as.numeric(trainset$activity)) #0.6432291
cor(trainset$tgravityacc.max.x, as.numeric(trainset$activity)) #0.6485595
cor(trainset$tgravityacc.max.y, as.numeric(trainset$activity)) #-0.6850469
cor(trainset$tgravityacc.min.y, as.numeric(trainset$activity)) #-0.695804
cor(trainset$angle.y.gravitymean, as.numeric(trainset$activity)) #0.6662157
cor(trainset$tgravityacc.energy.y, as.numeric(trainset$activity)) #-0.500473
cor(trainset$tgravityacc.mean.y, as.numeric(trainset$activity)) #-0.6927581
cor(trainset$tbodyacc.max.x, as.numeric(trainset$activity)) #0.8150434
#Model 2 SVM
ss <- set.seed(33833, kind="default", normal.kind="default")
svmModel <- svm(trainset$activity ~.-subject, data=trainset)
svmModel
SVMpredicted <- predict(svmModel, newdata = validset)
SVMpredicted
plot(SVMpredicted, validset$activity)
tab <- table(pred=SVMpredicted, true = validset$activity)
tab
classAgreement(tab)
errorrateSVM <-(sum(SVMpredicted != validset$activity))/length(validset$activity)
errorrateSVM #0.1467213
result <-confusionMatrix(SVMpredicted, validset$activity)
result
#Tuned model for random forest
ss <- set.seed(456787, kind="default", normal.kind="default")
foresttrain2 <- randomForest(trainset$activity ~tgravityacc.min.x + angle.x.gravitymean + tgravityacc.energy.x + tgravityacc.mean.x + tgravityacc.max.x + tgravityacc.max.y + tgravityacc.min.y + angle.y.gravitymean + tgravityacc.energy.y + tgravityacc.mean.y + tbodyacc.max.x, data=trainset, prox=TRUE)
foresttrain2
varImpPlot(foresttrain2, sort=TRUE, pch=19, col = "blue", main="Variable Importance Plot")
rfpredicted2 <- predict(foresttrain2, newdata = validset)
rfpredicted2
errorrateRF2 <- (sum(rfpredicted2 != validset$activity))/length(validset$activity)
errorrateRF2 # 0.2057377
#Tuned model for random forest
ss <- set.seed(456787, kind="default", normal.kind="default")
foresttrain2 <- randomForest(trainset$activity ~tgravityacc.min.x + angle.x.gravitymean + tgravityacc.energy.x + tgravityacc.mean.x + tgravityacc.max.x + tgravityacc.max.y + tgravityacc.min.y, data=trainset, prox=TRUE)
foresttrain2
varImpPlot(foresttrain2, sort=TRUE, pch=19, col = "blue", main="Random Forest of training dataset")
rfpredicted2 <- predict(foresttrain2, newdata = validset)
rfpredicted2
errorrateRF2 <- (sum(rfpredicted2 != validset$activity))/length(validset$activity)
errorrateRF2 # 0.3885246
#Model Test
ss <- set.seed(456787, kind="default", normal.kind="default")
foresttrain3 <- randomForest(trainset$activity ~tgravityacc.min.x + angle.x.gravitymean + tgravityacc.energy.x + tgravityacc.mean.x + tgravityacc.max.x + tgravityacc.max.y + tgravityacc.min.y + angle.y.gravitymean + tgravityacc.energy.y + tgravityacc.mean.y + tbodyacc.max.x, data=trainset, prox=TRUE)
foresttrain3
varImpPlot(foresttrain3, sort=TRUE, pch=19, col = "blue", main="Variable Importance Plot of Random Forest Model for Test Set", xlab="")
legend("topleft", legend = "Sorted by Importance")
rfpredicted3 <- predict(foresttrain3, newdata = testset)
rfpredicted3
errorrateRF <- (sum(rfpredicted3 != testset$activity))/length(testset$activity)
errorrateRF # 0.1699895
#Kitchen sink model
ss <- set.seed(456787, kind="default", normal.kind="default")
foresttrain <- randomForest(trainset$activity ~.-subject, data=trainset, prox=TRUE)
foresttrain
rfpredicted <-predict(foresttrain, newdata = testset)
rfpredicted
errorrateRF <- (sum(rfpredicted != testset$activity))/length(testset$activity)
errorrateRF
|
3b68cd61261c2a18ab1fc47ec87b5b4dc5cdb641
|
1e330c7d604dd28b1f79507ec5a5d4797ff5fa62
|
/Analisis/MosquitoReview.R
|
48bfa8a82e434ec216765ac08052ef08f375bcf6
|
[] |
no_license
|
Damian-VA/MosquitoReview
|
e87d76466849fad8723f4c4e82d7fbb9daa5daed
|
bfa0b26f49f6929172e499322520a306aa33b09d
|
refs/heads/main
| 2023-03-28T23:25:23.245235
| 2021-03-31T14:57:46
| 2021-03-31T14:57:46
| 311,420,366
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 63,210
|
r
|
MosquitoReview.R
|
# Analyses of "Mosquito bloodmeal preference
# in disturbed vs. wild landscapes (a review)"
# Guadalupe López-Nava
# Damián Villaseñor-Amador
# César A. Sandoval-Ruiz
#R map tutorial----
#By Kim Gilbert
#https://www.molecularecologist.com/2012/09/making-maps-with-r/
#Packages
library(maps) #map functions
library(mapdata) #basic world map data
library(maptools)#for shapefiles
library(scales) #for transparency
#Canada map
#Coordinates in decimals, R won't read
#degrees, minutes and seconds
map("worldHires","Canada",
xlim=c(-141,-53), #longitude
ylim=c(40,85), #latitude
col="gray90",
fill=T)
#superior left corner of USA+Latinamerica: -131.256851W, 51.229805N
#inferior right corner of USA+Latinamerica: -29.305435W, -56.884485S,
map(database="worldHires",
xlim=c(-131,-29), #longitude
ylim=c(51,-56)) #latitude
#long story short: I gave up, lets use
#the full world map for now
map()
#Mosquito database
#Note: it only has one mosquito species
#column and one landscape column
mosquito <- read.csv("./Analisis/MosquitoData.txt", header=T, sep="\t")
head(mosquito)
#Plotting mosquito study locations
#and saving it as a png image
png("MosquitoMap.png", width = 1080, height = 720, units = "px")
map()
points(mosquito$Long_dec, mosquito$Lat_dec, pch="\uD83E\uDD9F", col="navyblue", cex=2)
dev.off()
#Plotting temporal trend of mosquito
#studies (as suggested by Nakagawa
#et al., 2018; pp. 3)
plot(as.numeric(mosquito$ID)~mosquito$Year)
levels(mosquito$ID)
levels(as.factor(mosquito$Year))
length(levels(mosquito$ID))
#15 November 2020. Map 2nd try----
#superior left corner of USA+Latinamerica: -53 S, -131 W
#inferior right corner of USA+Latinamerica: 50 N, -23 W,
#Importing mosquito study sites
mosquito <- read.csv("./Analisis/MosquitoData.txt", header=T, sep="\t")
mosquitosymbol <- "\uD83E\uDD9F"
png("Map_WrongCoords.png", units="in", width=16, height=16, res=600)
map(database="worldHires",
regions = c("USA",
"Mexico",
"Guatemala",
"Belize",
"El Salvador",
"Honduras",
"Nicaragua",
"Panama",
"Costa Rica",
"Venezuela",
"Colombia",
"Guyana",
"French Guiana",
"Suriname",
"Brazil",
"Ecuador(?!:Galapagos)",
"Peru",
"Bolivia",
"Paraguay",
"Uruguay",
"Chile",
"Argentina"),
xlim=c(-124,-35), #longitude (left, right)
ylim=c(-35,49), #latitude (bottom, top)
col="gray90",
fill=T)
#plotting mosquito study sites
points(mosquito$Long_dec, mosquito$Lat_dec,
pch=21,
col="white",
bg="black",
lwd=2,
cex=5)
dev.off()
#Temporal trend 17-Nov-2020----
#Temporal publication series of the 20
#articles that we have chosen for the
#review
#Specific database to plot this graph
TempSeries = read.table("./Analisis/MosquitoYearPubs.txt", header=T)
head(TempSeries)
png("Mosquito_TempSer.png", units="in", width=8, height=4, res=300)
#Plot
plot(TempSeries$Year,TempSeries$Publications,
ylim = c(1,4),
las=1,
xlab = "",
ylab = "Publications",
type="b",
axes=F)
#Y axis
axis(2,at=c(1:4),labels=c(1:4),las=1)
#X axis
axis(1,at=levels(as.factor(TempSeries$Year)),labels=F,las=3)
#X axis labels
text(x = levels(as.factor(TempSeries$Year)),
y = par("usr")[3] - 0.45,
labels = levels(as.factor(TempSeries$Year)),
xpd = NA,
## Rotate the labels by 35 degrees.
srt = 40,
cex = 1,
adj=0.7)
dev.off()
#Lupita's map 18-Nov-20 ----
library(tidyverse)
library(ggthemes) # for a mapping theme
# if you have a more recent version of ggplot2, it seems to clash with the ggalt package
# installing this version of the ggalt package from GitHub solves it
# You might need to also restart your RStudio session
devtools::install_github("eliocamp/ggalt@new-coord-proj") # for
library(praise)
library(ggalt)
library(ggrepel) # for annotations
library(viridis) # for nice colours
library(broom) # for cleaning up models
devtools::install_github("wilkox/treemapify")
library(treemapify) # for making area graphs
library(wesanderson) # for nice colours
library(devtools)
library(ellipsis)
# Data
dir()
mosquitos <- read_csv("Mosquito_Review.csv")
view(mosquitos)
str(mosquitos)
mosquitos <- dplyr::select(mosquitos, LatN_dec, LongW_dec, author_key, MosquitoSpecies1, Year )
names(mosquitos) <- c("lat", "long", "author", "species", "year")
mosquitos<-mosquitos[!(mosquitos$author=="Komar2018"| mosquitos$author=="NA"),]
mosquitos$long <- as.numeric (mosquitos$long)
mosquitos$lat <- as.numeric (mosquitos$lat)
mosquitos$species <- as.factor (mosquitos$species)
str(mosquitos)
view(mosquitos)
# Get the shape of America
america <- map_data("world", region = c("USA", "Canada", "Argentina","Bolivia", "Brazil", "Chile", "Colombia", "Costa Rica", "Cuba", "Ecuador","El Salvador", "Guatemala", "Haití", "Honduras", "Mexico", "Nicaragua", "Panama", "Paraguay","Peru","Republica Dominicana","Uruguay","Venezuela", "Puerto Rico", "Guayana Francesa", "San Bartolomé", "Guadalupe"))
# A very basic map
(mosquitos_map1 <- ggplot() +
geom_map(map = america, data = america,
aes(long, lat, map_id = region),
color = "gray80", fill = "gray80", size = 0.1) +
# Add points for the site locations
geom_point(data = mosquitos,
aes(x = long, y = lat),
colour = "#3A5FCD") +
theme_classic())
ggsave(mosquitos_map1, filename = "mosquitos_map.png",
height = 5, width = 8)
#Richness barplots 20-Nov-20 ----
mosquitos <- read.csv("./Analisis/Mosquito_Review.csv",header=T)
names(mosquitos)
# #New column of landscape with two levels:
# #disturbed (urban, rural) and wild (wild)
# mosquitos$Landscape2 = mosquitos$Landscape1
# levels(mosquitos$Landscape2) <- c(levels(mosquitos$Landscape2), "disturbed")
# mosquitos$Landscape2[mosquitos$Landscape2 == 'rural'] <- 'disturbed'
# mosquitos$Landscape2[mosquitos$Landscape2 == 'urban'] <- 'disturbed'
# mosquitos$Landscape2 = droplevels(mosquitos$Landscape2)
# #Creating host richness column
#
# #Empty vector to fill it with values
# RichnessVector = numeric()
#
# #Loop to extrac host richness per row
# for(i in 1:length(mosquitos$BloodSource1)){
#
#
# #Numeric vector of hosts per row: each host's
# #ID is the number of letters of its name
# #(ex. "Bos_taurus" ID is "10")
# HostsPerRow = as.numeric(mosquitos[i,17:72])
#
# #Host richness per row (length of hosts' IDs)
# HostRichness = length(HostsPerRow[!is.na(HostsPerRow)])
#
# #Fill the empty vector with host richness values
# RichnessVector = c(RichnessVector,HostRichness)
#
# }
#
# #Fill the "host_richness" column of the
# #mosquitos' database with the values recovered
# #in the loop
# mosquitos$host_richness = RichnessVector
#
# write.csv(mosquitos,file="./Analisis/Mosquito_Review.csv")
#Boxplot of landscape vs. host richness
plot(mosquitos$Landscape1, mosquitos$host_richness)
#Boxplot of landscape (disturbed or wild)
#vs host richness:
plot(mosquitos$Landscape2, mosquitos$host_richness)
#_____________________________________________
#Number of hosts observations in disturbed: 118
length(mosquitos$host_richness[mosquitos$Landscape2=="disturbed"])
#Mean host observations in disturbed: 5.466102
mean(mosquitos$host_richness[mosquitos$Landscape2=="disturbed"])
#Median host observations in disturbed: 5
median(mosquitos$host_richness[mosquitos$Landscape2=="disturbed"])
#Standard deviation host observations in disturbed: 5.503196
sd(mosquitos$host_richness[mosquitos$Landscape2=="disturbed"])
#Standard error host observations in disturbed: 0.5087706
sd(mosquitos$host_richness[mosquitos$Landscape2=="disturbed"])/
sqrt(length(mosquitos$host_richness[mosquitos$Landscape2=="disturbed"])-1)
#Selecting subgroup of the dataset that only
#encompasses "disturbed environment" values
disturbed = mosquitos[mosquitos$Landscape2=="disturbed",]
#Naming that subgroup "disturbed frequencies" (distFreq)
distFreq = data.frame(disturbed$MosquitoSpecies1,disturbed$host_richness)
#Assigning column names to distFreq
colnames(distFreq) = c("mosquito","host")
#Dropping non-used levels of mosquito
#species in distFreq table
distFreq$mosquito = droplevels(distFreq$mosquito)
#Sorting distFreq by alphabetical order
#of mosquito species and from max to min
#host number (VERY IMPORTANT STEP!!!)
distFreq = distFreq[order(distFreq$mosquito,-distFreq$host),]
#Eliminate duplicated species rows
#If the distFreq data table hasn't been
#arranged by max to min host number, then
#you're going to LOOSE HOST RICHNESS
#so be careful about this step and
#the previous one
distMaxHost = distFreq[!(duplicated(distFreq$mosquito)),]
#Sort distMaxHost (the table only with
#the maximum amount possible of bloodmeal source
#hosts per mosquito species) from max to min
distMaxHost = distMaxHost[order(-distMaxHost$host),]
#Number of mosquito species in disturbed environments
length(distMaxHost$mosquito)
# 32 spp.
#Disturbed environment mosquito host frequencies represented in a barplot
#Save as image
png("Disturbed_Hosts.png", units="in", width=12, height=15, res=300)
#Overall plot settings
par(mai=c(1.5,3,0,1.5), cex=1.2)
#Mosquito species names vector
mosquitoSpecies<-gsub("_"," ",distMaxHost$mosquito)
#Barplot
barplot(sort(distMaxHost$host),horiz = T,
xlab= "",
xlim=c(0,35),
ylab="",
xaxt="n",
names.arg = rev(mosquitoSpecies),
las=1,
font=3,
cex.axis = 2,
col = "grey90")
axis(1,at=seq(from=0,to=35,by=10),labels = seq(from=0,to=35,by=10), cex.axis=1.5)
#X axis label
text(x = 26.5,
y = par("usr")[3] - 3,
labels = "Number of bloodmeal source hosts (disturbed)",
xpd = NA,
srt = 0,
cex = 2,
adj=0.7)
#Mosquito species with 1 host
text(2,1.5,labels = "1", cex=1.5)
#Mosquito species with 2 hosts
text(3,7.5,labels = "2", cex=1.5)
#Mosquito species with 3 hosts
text(4,11.5,labels = "3", cex=1.5)
#Mosquito species with 4 hosts
text(5,12.7,labels = "4", cex=1.5)
#Mosquito species with 5 hosts
text(6,18.5,labels = "5", cex=1.5)
#Mosquito species with 6 hosts
text(7,25.5,labels = "6", cex=1.5)
#Mosquito species with 7 hosts
text(8,29,labels = "7", cex=1.5)
#Mosquito species with 9 hosts
text(9.5,30.7,labels = "9", cex=1.5)
#Mosquito species with 10 hosts
text(11,32,labels = "10", cex=1.5)
#Culex erraticus with 17 hosts
text(17.5,33.2,labels = "17", cex=1.5)
#Culex restuans with 23 hosts
text(24,34.2,labels = "23", cex=1.5)
#Aedes vexans with 30 hosts
text(30.5,35.5,labels = "30", cex=1.5)
#Culex quinquefasciatus with 31 hosts
text(32,36.8,labels = "31", cex=1.5)
#Culex pipiens with 32 hosts
text(33,38,labels = "32", cex=1.5)
dev.off()
#__________________________________________
#Number of host observations in wild: 88
length(mosquitos$host_richness[mosquitos$Landscape2=="wild"])
#Mean host observations in wild: 5.397727
mean(mosquitos$host_richness[mosquitos$Landscape2=="wild"])
#Median host observations in wild: 5
median(mosquitos$host_richness[mosquitos$Landscape2=="wild"])
#Standard deviation host observations in wild: 6.114877
sd(mosquitos$host_richness[mosquitos$Landscape2=="wild"])
#Standard deviation host observations in wild: 0.6555836
sd(mosquitos$host_richness[mosquitos$Landscape2=="wild"])/
sqrt(length(mosquitos$host_richness[mosquitos$Landscape2=="wild"])-1)
#Selecting subgroup of the dataset that only
#encompasses "wild environment" values
wild = mosquitos[mosquitos$Landscape2=="wild",]
#Naming that subgroup "wild frequencies" (wildFreq)
wildFreq = data.frame(wild$MosquitoSpecies1,wild$host_richness)
#Assigning column names to wildFreq
colnames(wildFreq) = c("mosquito","host")
#Dropping non-used levels of mosquito
#species in wildFreq table
wildFreq$mosquito = droplevels(wildFreq$mosquito)
#Sorting wildFreq by alphabetical order
#of mosquito species and from max to min
#host number (VERY IMPORTANT STEP!!!)
wildFreq = wildFreq[order(wildFreq$mosquito,-wildFreq$host),]
# #Creating new column with only the maximum
# #observation number of hosts per mosquito
# #species
# wildFreq$maxHosts=ave(wildFreq$host,wildFreq$mosquito,FUN=max)
#Eliminate duplicated species rows
#If the wildFreq data table hasn't been
#arranged by max to min host number, then
#you're going to LOOSE HOST RICHNESS
#so be careful about this step and
#the previous
wildMaxHost = wildFreq[!(duplicated(wildFreq$mosquito)),]
#Sort wildMaxHost (the table only with
#the maximum amount possible of bloodmeal source
#hosts per mosquito species) from max to min
wildMaxHost = wildMaxHost[order(-wildMaxHost$host),]
#Number of mosquito species in wild environments
length(wildMaxHost$mosquito)
# 50 spp.
#Wild environment mosquito host frequencies
#represented in a barplot
#Save as image
png("Wild_Hosts.png", units="in", width=12, height=15, res=300)
#Overall plot settings
par(mai=c(1.5,3,0,1.5), cex=1.2)
#Mosquito species names vector
mosquitoSpecies<-gsub("_"," ",wildMaxHost$mosquito)
#Barplot
barplot(sort(wildMaxHost$host),horiz = T,
xlab= "",
xlim=c(0,60),
ylab="",
xaxt="n",
names.arg = rev(mosquitoSpecies),
las=1,
font=3,
cex.axis = 2,
col = "seagreen")
axis(1,at=seq(from=0,to=60,by=10),labels = seq(from=0,to=60,by=10), cex.axis=1.5)
#X axis label
text(x = 40,
y = par("usr")[3] - 5,
labels = "Number of bloodmeal source hosts (wild)",
xpd = NA,
srt = 0,
cex = 2,
adj=0.7)
#Mosquito species with 1 host
text(2,4.5,labels = "1", cex=1.5)
#Mosquito species with 2 hosts
text(3,12,labels = "2", cex=1.5)
#Mosquito species with 3 hosts
text(4,17,labels = "3", cex=1.5)
#Mosquito species with 4 hosts
text(5,24,labels = "4", cex=1.5)
#Mosquito species with 5 hosts
text(6,29.5,labels = "5", cex=1.5)
#Mosquito species with 6 hosts
text(7,34.5,labels = "6", cex=1.5)
#Mosquito species with 7 hosts
text(8,43.5,labels = "7", cex=1.5)
#Mosquito species with 8 hosts
text(9,52,labels = "8", cex=1.5)
#Mosquito species with 9 hosts
text(10,56,labels = "9", cex=1.5)
#Culex territans with 12 hosts
text(13,57,labels = "12", cex=1.5)
#Culex peccator with 20 hosts
text(22,58.2,labels = "20", cex=1.5)
#Culex erraticus with 56 hosts
text(58,59.5,labels = "56", cex=1.5)
dev.off()
#__________________________________________
#Wilcoxon test because host observations
#don't follow a normal distribution
wilcox.test(mosquitos$host_richness[mosquitos$Landscape2=="disturbed"],mosquitos$host_richness[mosquitos$Landscape2=="wild"])
#W = 4823, p-value = 0.3802
#Wilcoxon test for maximum amount
#of hosts of wild vs. disturbed
wilcox.test(wildMaxHost$host,distMaxHost$host)
#W = 924.5, p-value = 0.3387
#_________________________________________
disturbedVector = rep("disturbed",length(distMaxHost$mosquito))
disturbedTable = cbind(distMaxHost,disturbedVector)
colnames(disturbedTable) = c("mosquito_sp","host","landscape")
wildVector = rep("wild",length(wildMaxHost$mosquito))
wildTable = cbind(wildMaxHost,wildVector)
colnames(wildTable) = c("mosquito_sp","host","landscape")
distVSwild = rbind(disturbedTable,wildTable)
distVSwild = distVSwild[order(distVSwild$mosquito_sp),]
write.csv(distVSwild, file="./Analisis/mosquito_environment.csv", row.names = F)
#Lupita's updated map 20-Nov-20----
# Packages #
library(tidyverse)
library(ggthemes) # for a mapping theme
# if you have a more recent version of ggplot2, it seems to clash with the ggalt package
# installing this version of the ggalt package from GitHub solves it
# You might need to also restart your RStudio session
devtools::install_github("eliocamp/ggalt@new-coord-proj") # for
library(praise)
library(ggalt)
library(ggrepel) # for annotations
library(viridis) # for nice colours
library(broom) # for cleaning up models
devtools::install_github("wilkox/treemapify")
library(treemapify) # for making area graphs
library(wesanderson) # for nice colours
library(devtools)
library(ellipsis)
# Data #
dir()
mosquitos1 <- read_csv("Mosquito_map.csv")
view(mosquitos1)
str(mosquitos1)
# Get the shape of America #
america <- map_data("world", region = c("USA", "Canada", "Argentina","Bolivia", "Brazil", "Chile", "Colombia", "Costa Rica", "Cuba", "Ecuador","El Salvador", "Guatemala", "Haití", "Honduras", "Mexico", "Nicaragua", "Panama", "Paraguay","Peru","Republica Dominicana","Uruguay","Venezuela", "Puerto Rico", "Guayana Francesa", "San Bartolomé", "Guadalupe"))
# MAP without authors
(mosquitos_map1 <- ggplot() +
geom_map(map = america, data = america,
aes(long, lat, map_id = region),
color = "gray80", fill = "gray80", size = 0.3) +
coord_proj(paste0("+proj=wintri"),
xlim = c(-125, -30)) +
geom_point(data = mosquitos1,
aes(x =lon, y = lat, fill = landscape),
alpha = 0.8, size = 6, colour =
"grey30", shape = 21)+
theme_map(base_size = 20) + theme(legend.position = "left"))
ggsave(mosquitos_map1, filename = "mosquitos_map_noauthors.png",
height = 7, width = 9)
#map with authors
(mosquitos_map2 <- ggplot() +
geom_map(map = america, data = america,
aes(long, lat, map_id = region),
color = "gray80", fill = "gray80", size = 0.3) +
coord_proj(paste0("+proj=wintri"),
xlim = c(-125, -30)) +
geom_point(data = mosquitos1,
aes(x =lon, y = lat, fill = landscape),
alpha = 0.8, size = 6, colour =
"grey30", shape = 21)+
theme_map(base_size = 20) + theme(legend.position = "left") +
geom_label_repel(data = mosquitos1,
aes(x = lon, y = lat,
label = author),
box.padding = 1, size = 3, nudge_x= 0.5, nudge_y = 0.5))
ggsave(mosquitos_map2, filename = "mosquitos_map_new.png",
height = 7, width = 9)
#Disturbed vs. wild barplots 21-Nov-20----
mosq <- read.csv("./Analisis/mosquito_environment.csv",header=T)
#How many mosquito species
mosqFULL = mosq[!(duplicated(mosq$mosquito)),]
length(mosqFULL$mosquito_sp)
# 66 spp.
#How many mosquito species whose blood hosts
#observations were made in both disturbed and
#wild environments
mosqBOTH = mosq[(duplicated(mosq$mosquito)),]
length(mosqBOTH$mosquito_sp)
# 16 spp.
#create new data frame with only those
#species that appear in both environments
#(16 spp. x 2 environments = 32 rows)
mosqDW = mosq[duplicated(mosq$mosquito_sp)|duplicated(mosq$mosquito_sp, fromLast = T),]
#mosqDW = mosqDW[order(mosqDW$mosquito_sp,-mosqDW$host),]
#Barplot disturbed vs. wild host number per
#mosquito species
#Save as image
png("disturbedVSwild.png", units="in", width=12, height=15, res=300)
#Overall plot settings
par(mai=c(1.5,3,0,1.5), cex=1)
#Mosquito species names vector
mosquitoSpecies<-gsub("_"," ",mosqDW$mosquito_sp)
#Barplot
barplot(mosqDW$host,
horiz = T,
xlab= "",
xlim=c(0,60),
ylab="",
xaxt="n",
names.arg = mosquitoSpecies,
las=1,
font=3,
cex.axis = 2,
col = c("gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen","gray90","seagreen"))
axis(1,at=seq(from=0,to=60,by=10),labels = seq(from=0,to=60,by=10), cex.axis=1.5)
#X axis label
text(x = 34,
y = par("usr")[3] - 3,
labels = "Number of bloodmeal source hosts",
xpd = NA,
srt = 0,
cex = 2,
adj=0.7)
dev.off()
#Corrections 23-November-20
#Make mosquito species names larger, unique
#and both breaks for disturbed and wild must
#be glued to each other
mosq <- read.csv("./Analisis/mosquito_environment.csv",header=T)
mosqDW = mosq[duplicated(mosq$mosquito_sp)|duplicated(mosq$mosquito_sp, fromLast = T),]
#Separating hosts in wild and hosts in disturbed
#for those 16 mosquito species that have hosts in
#both environments
mosqDW
wildData = mosqDW[mosqDW$landscape=="wild",]
wildData = wildData[,c(1,2)]
colnames(wildData) = c("mosquito","hostsInWild")
disturbedData = mosqDW[mosqDW$landscape=="disturbed",]
disturbedData = disturbedData[,c(1,2)]
colnames(disturbedData) = c("mosquito","hostsInDisturbed")
wildDistDataframe = merge(wildData,disturbedData, by="mosquito")
#Converting dataframe to matrix
wildDistMatrix = rbind(as.numeric(wildDistDataframe$hostsInWild),as.numeric(wildDistDataframe$hostsInDisturbed))
rownames(wildDistMatrix) = c("wild","disturbed")
colnames(wildDistMatrix) = wildDistDataframe$mosquito
wildDistMatrix=wildDistMatrix[,ncol(wildDistMatrix):1]
#Save as image
png("disturbedVSwild.png", units="in", width=15, height=15, res=300)
#Overall plot settings
par(mai=c(1.5,8,0,0), cex=2)
#Bar colors
environmentColors = c("seagreen","gray90")
#Bar names
mosquitoSpp <- gsub("_"," ",wildDistDataframe$mosquito)
barplot(wildDistMatrix,
beside=T,
horiz = T,
xlim=c(0,65),
names.arg = rev(mosquitoSpp),
xlab="",
ylab="",
xaxt="n",
las=1,
font=3,
cex.names = 1.5,
col = environmentColors)
#X axis values
axis(1,at=c(0,10,20,30,40,50,60),labels = c("0","10","20","30","40","50","60"), cex.axis=1.2)
#X axis label
text(x = 39,
y = par("usr")[3] - 4.5,
labels = "Number of bloodmeal source hosts",
xpd = NA,
srt = 0,
cex = 1.2,
adj=0.7)
dev.off()
#Rep and Pseudorep 23-Nov-20----
mosquitos <- read.csv("./Analisis/Mosquito_Review.csv",header=T)
names(mosquitos)
#Extracting mosquito species, host richness,
#host Class and landscape (disturbed or wild)
mos = mosquitos[,c(10,11,93,73:76,78,9)]
colnames(mos) = c("sp","host","landscape","mammalia","aves","amphibia","reptilia","location","id")
#How many samples per mosquito species we have?
mosSamples = as.data.frame(table(mos$sp))
mosSamples = mosSamples[order(-mosSamples$Freq),]
colnames(mosSamples) = c("mosquito","samples")
write.csv(mosSamples,file="./Analisis/pseudoreplication.csv", row.names = F)
#Aedes scapularis has 25 samples, those
#aren't true replicates for we only have
#21 studies. That means, Aedes scapularis was
#sampled more than once in, at least, one
#study. Ergo this samples are pseudoreplicated
#Join species and study ID
mos$spID = paste(mos$sp,mos$id,sep = ",")
mosReplicates = as.data.frame(table(mos$spID))
mosReplicates = mosReplicates[order(-mosReplicates$Freq),]
colnames(mosReplicates) = c("mosquito","true_replicates")
head(mosReplicates,7)
head(mosSamples,7)
write.csv(mosReplicates,file="./Analisis/replication.csv", row.names = F)
#Dist VS wild barplot with aegypti and albopictus 25-Nov-20----
mosq <- read.csv("./Analisis/mosquito_environment.csv",header=T)
mosqDW = mosq[duplicated(mosq$mosquito_sp)|duplicated(mosq$mosquito_sp, fromLast = T),]
#Separating hosts in wild and hosts in disturbed
#for those 16 mosquito species that have hosts in
#both environments
mosqDW
wildData = mosqDW[mosqDW$landscape=="wild",]
wildData = wildData[,c(1,2)]
colnames(wildData) = c("mosquito","hostsInWild")
disturbedData = mosqDW[mosqDW$landscape=="disturbed",]
disturbedData = disturbedData[,c(1,2)]
colnames(disturbedData) = c("mosquito","hostsInDisturbed")
wildDistDataframe = merge(wildData,disturbedData, by="mosquito")
#Adding Aedes aegypti and Aedes albopictus to
#the dataframe
AeAegypti = c("Aedes_aegypti",0,mosq$host[mosq$mosquito_sp=="Aedes_aegypti"])
AeAlbopictus = c("Aedes_albopictus",0,mosq$host[mosq$mosquito_sp=="Aedes_albopictus"])
wildDistDataframe2 = rbind(wildDistDataframe,AeAegypti,AeAlbopictus)
wildDistDataframe2 = wildDistDataframe2[order(wildDistDataframe2$mosquito),]
#Converting dataframe to matrix
wildDistMatrix = rbind(as.numeric(wildDistDataframe2$hostsInWild),as.numeric(wildDistDataframe2$hostsInDisturbed))
rownames(wildDistMatrix) = c("wild","disturbed")
colnames(wildDistMatrix) = wildDistDataframe2$mosquito
wildDistMatrix=wildDistMatrix[,ncol(wildDistMatrix):1]
#Save as image
png("distVSwild_aegypti_albopictus.png", units="in", width=15, height=15, res=300)
#Overall plot settings
par(mai=c(1.5,8,0,0), cex=2)
#Bar colors
environmentColors = c("seagreen","gray90")
#Bar names
mosquitoSpp <- gsub("_"," ",wildDistDataframe2$mosquito)
barplot(wildDistMatrix,
beside=T,
horiz = T,
xlim=c(0,65),
names.arg = rev(mosquitoSpp),
xlab="",
ylab="",
xaxt="n",
las=1,
font=3,
cex.names = 1.4,
col = environmentColors)
#X axis values
axis(1,at=c(0,10,20,30,40,50,60),labels = c("0","10","20","30","40","50","60"), cex.axis=1.2)
#X axis label
text(x = 39,
y = par("usr")[3] - 4.5,
labels = "Number of bloodmeal source hosts",
xpd = NA,
srt = 0,
cex = 1.2,
adj=0.7)
dev.off()
#Host class stacked 25-Nov-20----
#Importing database and extracting the top 8
#mosquito records with the most host richness
#reported, with their respective number of mammal,
#bird, amphibian and reptile hosts
mosquitos <- read.csv("./Analisis/Mosquito_Review.csv",header=T)
mos = mosquitos[,c(10,11,93,73:76,78,9)]
colnames(mos) = c("sp","host","landscape","mammalia","aves","amphibia","reptilia","location","id")
mos = mos[order(mos$sp,-mos$host),]
mos = mos[!(duplicated(mos$sp)),]
mos = mos[order(-mos$host),]
mos[1:8,]
#Transforming dataframe to a matrix to barplot it
mosMatrix = rbind(as.numeric(mos$aves),as.numeric(mos$mammalia),as.numeric(mos$amphibia),as.numeric(mos$reptilia))
rownames(mosMatrix) = c("Aves","Mammalia","Amphibia","Reptilia")
colnames(mosMatrix) = mos$sp
mosMatrix[,1:8]
#Save as image
png("hostClass.png", units="in", width=28, height=15, res=300)
#Overall plot settings
par(mai=c(6,2,0,0), cex=2)
#Bar colors
hostClassColors = c("#00BFC4","#F8766D","#7CAE00","#C77CFF")
#Bar names
mosquitoSpp <- gsub("_"," ",colnames(mosMatrix[,1:20]))
hostClassbp = barplot(mosMatrix[,1:20],
horiz = F,
ylim=c(0,70),
names.arg = mosquitoSpp,
xlab="",
ylab="",
xaxt="n",
yaxt="n",
las=2,
font=3,
col = hostClassColors)
# #X axis values if horizontal barplots is true
# axis(1,at=c(0,10,20,30,40,50,60),labels = c("0","10","20","30","40","50","60"), cex.axis=1.2)
#
# #X axis label if horizontal barplots is true
# text(x = 39,
# y = par("usr")[3] - 4.5,
# labels = "Number of bloodmeal source hosts",
# xpd = NA,
# srt = 0,
# cex = 1.2,
# adj=0.7)
#Y axis values if vertical barplots is true
axis(2,at=c(0,10,20,30,40,50,60),labels = c("0","10","20","30","40","50","60"), cex.axis=1.2, las=1)
#Y axis label if vertical barplots is true
text(x = par("usr")[3] - 2.5,
y = 35,
labels = "Host richness",
xpd = NA,
srt = 90,
cex = 1.7,
adj=0.7)
#X axis values if vertical barplots is true
text(x = hostClassbp,
y = par("usr")[3] - 3,
labels = mosquitoSpp,
xpd = NA,
srt = 50,
cex = 1.7,
adj = 0.99,
font = 3)
legend(20,65,
legend = rownames(mosMatrix),
col = hostClassColors,
bty = "n",
pch = 16,
y.intersp = 1.4,
pt.cex=3,
cex=2)
dev.off()
#Checking Homo sapiens predominance 1-Dec-20----
#We want to know the top 20 mosquitoes
#with the biggest host richness
mosquitos <- read.csv("./Analisis/Mosquito_Review.csv",header=T)
mos = mosquitos[,c(10,11,93,73:76,78,9)]
colnames(mos) = c("sp","host","landscape","mammalia","aves","amphibia","reptilia","location","id")
mos = mos[order(mos$sp,-mos$host),]
mos = mos[!(duplicated(mos$sp)),]
mos = mos[order(-mos$host),]
head(mos,20)
#For each of the 20 mosquito species the number
#of human bloodmeals in relation to the entirety of
#mammal and total bloodmeals was manually retrieved:
CxErra = c("Culex_erraticus",31,691,1162) #Burkett-Cadena2011
CxPipi = c("Culex_pipiens",100,121,636) #Hamer2009
CxQuin = c("Culex_quinquefasciatus",0,19,96) #Greenberg2013
AeVexa = c("Aedes_vexans",11,206,213) #Greenberg2013
CxRest = c("Culex_restuans",31,37,221) #Hamer2009
CxPecc = c("Culex_peccator",5,8,158) #Burkett-Cadena2011
CxTerr = c("Culex_territans",0,0,118) #Burkett-Cadena2011
CqVene = c("Coquillettidia_venezuelensis",1,201,212) #Navia-Gine2013
AeAegy = c("Aedes_aegypti",119,135,136) #Stenn2018
AeMedi = c("Aedes_mediovittatus",107,217,218) #Barrera2012
AeScap = c("Aedes_scapularis",2,16,27) #Alencar2015
CxBast = c("Culex_bastagarius",3,26,34) #Alencar2015
CxDecl = c("Culex_declarator",2,16,28) #Alencar2015
CxPleu = c("Culex_pleuristriatus",3,13,26) #Alencar2015
AeSerr = c("Aedes_serratus",2,5,11) #Alencar2015
AnAlbi = c("Anopheles_albitarsis",3,15,32) #Alencar2015
AnEvan = c("Anopheles_evansae",3,19,44) #DosSantosSilva2012
ChFaja = c("Chagasia_fajardi",8,30,40) #DosSantosSilva2012
CxUsqu = c("Culex_usquatus",0,6,12) #Alencar2015
PsAlbi = c("Psorophora_albigenu",75,93,93) #Mucci2015
#Binding as data frame the data of said 20 mosquito
#species with the human bloodmeals, the mammal blood
#meals and the total bloodemeals:
MosBM = as.data.frame(rbind(CxErra,CxPipi,CxQuin,AeVexa,CxRest,CxPecc,CxTerr,CqVene,AeAegy,AeMedi,AeScap,CxBast,CxDecl,CxPleu,AeSerr,AnAlbi,AnEvan,ChFaja,CxUsqu,PsAlbi))
colnames(MosBM) = c("Mosquito","HumanBM","MammalBM","TotalBM")
MosBM$HumanBM=as.numeric(as.character(MosBM$HumanBM))
MosBM$MammalBM=as.numeric(as.character(MosBM$MammalBM))
MosBM$TotalBM=as.numeric(as.character(MosBM$TotalBM))
#Adding column of human bloodmeal proportion respective
#to mammal bloodmeal
MosBM$HuMam = round((MosBM$HumanBM/MosBM$MammalBM),2)
#Adding column of human bloodmeal proportion respective
#to total bloodmeal
MosBM$HuTot = round((MosBM$HumanBM/MosBM$TotalBM),2)
# #Saving the data frame as "Bloodmeal Proportions
# #of 20 mosquito species"
# write.csv(MosBM, file = "./Analisis/BloodmealProportions_20spp.csv", row.names = F)
#Table ordered so to see those mosquito species with
#50% or more mammal bloodmeals being human
MosBM[order(-MosBM$HuMam),]
#Aedes aegypti 88%
#Culex restuans 84%
#Culex pipiens 83%
#Psorophora albigenu 81%
#Culex peccator 62%
#Aedes mediovittatus 49%
#litsearchr Eliza Grames 18-Dec-20----
#More info about this section in the article:
# Grames EM, Stillman AN, Tingley MW, Elphick CS.
# An automated approach to identifying search
# terms for systematic reviews using keyword
# cooccurrence networks. Methods Ecol Evol.
# 2019;10:1645–1654.
# https ://doi.org/10.1111/2041-210X.13268
#Check this website for templates:
#https://elizagrames.github.io/litsearchr/introduction_vignette_v010.html
#https://elizagrames.github.io/litsearchr/litsearchr_vignette_v041.html
#Installation issues:
devtools::install_github("elizagrames/litsearchr")
# Error: Failed to install 'unknown package' from GitHub:
# HTTP error 404.
# No commit found for the ref master
#
# Did you spell the repo owner (`elizagrames`) and repo name (`litsearchr`) correctly?
# - If spelling is correct, check that you have the required permissions to access the repo.
remotes::install_github("elizagrames/litsearchr", ref="main")
# Installing package into ‘C:/Users/Damián/Documents/R/win-library/3.6’
# (as ‘lib’ is unspecified)
# * installing *source* package 'litsearchr' ...
# ** using staged installation
# Error in file(file, if (append) "a" else "w") :
# (convertido del aviso) cannot open file 'C:/Users/Damian/Documents/R/win-library/3.6/00LOCK-litsearchr/00new/litsearchr/DESCRIPTION': No such file or directory
# ERROR: installing package DESCRIPTION failed for package 'litsearchr'
# * removing 'C:/Users/Damián/Documents/R/win-library/3.6/litsearchr'
# Error: Failed to install 'litsearchr' from GitHub:
# (convertido del aviso) installation of package ‘C:/Users/DAMIN~1/AppData/Local/Temp/RtmpUnSQqg/file46f495a3faa/litsearchr_1.0.0.tar.gz’ had non-zero exit status
#litsearchr package still hasn't been uploaded
#to CRAN, so it isn't an R official package.
#In order to use it one will need to individually
#download the scripts where her functions have
#been written. Download Grames' scripts from:
#https://zenodo.org/record/2551701#.X9zWk9hKhaQ
source("litsearchrZENODO/R/import_and_clean_data.R") #detect_database() usable_databases() import_results() deduplicate() clean_keywords()
source("litsearchrZENODO/R/term_selection.R") #make_corpus() add_stopwords() extract_terms() make_dictionary() create_dfm() create_network() make_importance() select_ngrams() select_unigrams() find_knots() fit_splines() find_cutoff() get_keywords() reduce_graph() make_ngram_graph() condense_network() get_condensed_terms() get_similar_terms()
source("litsearchrZENODO/R/write_scrape_test_searches.R") #get_language_data() choose_languages() language_graphs() translate_search() should_stem() write_search() available_languages() write_title_search() scrape_hits() scrape_oatd() scrape_ndltd() scrape_openthesis() check_recall() search_performance()
source("litsearchrZENODO/R/pretty_plots.R") #make_wordle() plot_full_network()
source("litsearchrZENODO/R/data.R") #data examples
## About the package
# The *litsearchr* package for R is designed to partially automate search term selection and writing search strategies for systematic reviews. This vignette demonstrates its utility through a mock, example review examining the effects of fire on black-backed woodpeckers by demonstrating how the package: (1) Identifies potential keywords through the naive search input, (2) Builds a keyword co-occurence network to assist with building a more precise search strategy, (3) Uses a cutoff function to identify important changes in keyword importance, (4) Assists with grouping terms into concepts, and (5) Writes a Boolean search as a result of completion of the four previous steps.
usable_databases()
#> Platform Database
#> 1 Web of Science BIOSIS Citation Index
#> 2 Web of Science Zoological Record
#> 3 Scopus Scopus
#> 4 EBSCO Academic Search Premier
#> 5 EBSCO Agricola
#> 6 EBSCO GreenFILE
#> 7 EBSCO OpenDissertations
#> 8 EBSCO CAB Abstracts
#> 9 EBSCO MEDLINE
#> 10 EBSCO Science Reference Center
#> 11 ProQuest Earth, Atmospheric & Aquatic Science Database?
#> 12 ProQuest ProQuest Dissertations & Theses Global?
#> 13 ProQuest NTIS Database (National Technical Information Service)
#> 14 NDLTD Networked Digital Library of Theses and Dissertations
#> 15 OATD Open Access Theses and Dissertations
#> 16 OpenThesis OpenThesis
#> 17 CAB Direct (all databases)
#> 18 WorldCat OAIster
#> 19 WorldCat WorldCat
#> 20 Science.gov Science.gov
#> 21 IngentaConnect IngentaConnect
#> 22 PubMed PubMed
## Write and conduct naive search
# In our empirical example, we begin with a naive search intended to capture a set of relevant articles. Naive search terms: (("black-backed woodpecker" OR "picoides arcticus" OR "picoides tridactylus" AND (burn\* OR fire\*)). We ran the search in Scopus and Zoological Record (Web of Science), exporting results in .ris and .txt, respectively. These exported search results are then imported to litsearchr using the *import_results* function and next deduplicated using the *remove_duplicates* function. In some cases, it is best to run the *remove_duplicates* function two or more times, for example starting with exact matches and moving on to fuzzy matching.
# When writing a naive search, the first step is to clearly articulate the research question. This serves as the basis for identifying concept groups and naive search terms. In our case, the research question is "What processes lead to the decline in black-backed woodpecker occupancy of post-fire forest systems with time since fire?" Although the exact concept groups needed for a review will vary on a case-by-case basis, the PICO (Population Intervention Control Outcome) model used in public health and medical reviews can be transformed to work for ecology. Instead of a population, we have a study system; intervention becomes predictor variables; outcome becomes response variables. The control category doesn't translate well to ecological reviews and can generally be omitted from the search. In our case, we are interested in either the predictor (processes) or response (occupancy) variables in our system (woodpeckers in post-fire forest systems), so our search will combine the concept groups as ( (processes OR occupancy) AND fire AND woodpecker ). The "OR" operator will include all hits that have either a process term or an occupancy term. The "AND" operator will require all hits to also have a term both the fire and woodpecker category. The parentheses work just like basic order of operations; items inside parentheses are considered before items outside of parentheses.
#
# We truncated terms to include word forms by adding an asterisk (\*) to the end of a word stem. For example, occup\* will pick up occupancy, occupance, occupied, occupy, occupying, etc... We included alternate spellings (i.e. colonization and colonisation) when possible, though we did not truncate one letter earlier because coloni\* would also pick up colonies or colonial, which has a different meaning altogether. Because there are multiple ways to describe nest success, we represented this concept with two groups of terms separated by W/3. This operator forces a word to occur within a certain number of words to another word (in this case, 3 words). By combining the OR operator with W/3, we can get any articles that include the concept of nesting and success next to each other. For example, an article about "success of nestlings" would be captured because the terms occur within three words of each other and nest* captures nestlings. Because we want our naive search to be discrete (i.e. only capture results most relevant to our question to yield better keyword suggestions), we decided to only include birds in the tribe Dendropicini. We included both common names (woodpecker, sapsucker) and genus names to capture studies which used only latin species names. The bird terms were only searched in the full text because study systems are often not specified in the title, abstract, or keywords. Genus names were truncated to account for studies that refer to groups with the suffix "-ids".
#
# Naive search: (
#(occup\* OR occur\* OR presen\* OR coloniz\* OR colonis\* OR abundan\* OR "population size" OR "habitat suitability" OR "habitat selection" OR persist\*) OR ( (nest\* OR reproduct* OR breed\* OR fledg\*) W/3 (succe\* OR fail\* OR surviv\*) ) OR ( surviv\* OR mortalit\* OR death\* ) OR ( "food availab\*" OR forag\* OR provision\*) OR ( emigrat\* OR immigrat\* OR dispers\*) )
#AND (fire\* OR burn\* OR wildfire\*) )
#AND (woodpecker\* OR sapsucker\* OR Veniliorn\* OR Picoid\* OR Dendropic\* OR Melanerp\* OR Sphyrapic\*)
#
# Searches were conducted on 10/22/18 with no date restrictions. We searched two databases on Web of Science (BIOSIS Citation Index and Zoological Record) and Scopus. Number of hits were as follows: BIOSIS (212), Zoological Record (179), and Scopus (592).
#
# Although other databases could also be used, the import functions of this package are set up to work with commonly used databases and platforms in ecology or with .bib or .ris files from other databases. Instructions on how to export files to match what litsearchr is expecting are viewable with usable_databases().
#
# The original export files should not be altered at all - none of the columns need to be removed and default headers should be left alone. These are used as signatures to detect which database a file originated from. If one of your naive searches results in more than 500 hits and you need to export multiple files from BIOSIS or Zoological Record, they can be left as separate files and don't need to be manually combined -litsearchr will do this for you. However, note that if your naive search returns more than 500 hits, the search terms are likely too broad. This lack of specificity may mean that the updated search terms returned by litsearchr will not adequately capture the desired level of inference.
#
# Optionally, if you want to return extremely specific keywords, you can conduct a critical appraisal of your naive search results to remove articles that you know aren't relevant to your question. However, if these articles are relatively rare, their keywords should be filtered out by litsearchr as unimportant.
source("litsearchrGitHub/R/import_and_clean_data.R")
naiveimport <- import_results(directory = "litsearchrGitHub/inst/extdata/",verbose=TRUE)
table(naiveimport$database)
naiveresults <- remove_duplicates(naiveimport, field = "title", method="string_osa")
#19-Dec-20----
source("litsearchrGitHub/R/import_and_clean_data.R")#to get the remove_duplicates() function
#First import the results from different search engines databases
#that you downloaded: in this case we used our naive search of the
#19 of december 2020:
# “mosquito*”
# AND (“landscape” OR “deforestation” OR “soil use change” OR “logging”)
# AND (“blood*” OR “blood meal” OR “blood meal source*” OR “host” OR “blood feeding” OR “feed*” OR “forag*”)
#In the WoS (160 results - title and abstract search), Scopus (227 results - title and abstract
#search) and PubMed (128 results - all fields) = 515 records (515 rows)
mosquitoimport = import_results(directory = "Analisis/naive_19Dec20/", verbose=T) #429 rows, 86 records missing
mosquitoresults = remove_duplicates(mosquitoimport, field = "title", method="string_osa") #299 rows, 130 duplicates
write.csv(mosquitoresults,file="naiveDeduplicated_19Dec20.csv",row.names = F) #save deduplicated database
#23-Dec-20 Sorting december articles----
#All articles pooled:
mosLup = read.csv("./Analisis/FullDatabaseMosquito_21-12-20.csv")
length(mosLup$Title) #2137 articles recovered
mosLup$Database = as.factor(mosLup$Database)
length(mosLup$Database[mosLup$Database=="SCOPUS"]) #1011 scopus articles
length(mosLup$Database[mosLup$Database=="WoS"]) #1098 WoS articles
length(mosLup$Database[mosLup$Database=="SciELO"]) #28 SciELO articles
#To deduplicate the database check
#https://cran.r-project.org/web/packages/synthesisr/vignettes/synthesisr_vignette.html:
library(synthesisr)
#Use function deduplicate to eliminate duplicate articles by title.
#The method is exact, which will remove articles that have identical
# titles. This is a fairly conservative approach, so we will remove
# them without review.
exactMosLup = deduplicate(mosLup, match_by = "Title", method = "exact")
#It now says that there're 1682 unique articles. Which implies
#455 records are duplicates.
#But we still need to eliminate highly similar titles: those that only
#differ in an extra space or the use of a dash-line. For this we'll use
#string distance:
stringMosLup = find_duplicates(exactMosLup$Title, method = "string_osa", to_lower=T, rm_punctuation = T, threshold = 7)
# We can extract the line numbers from the dataset that are likely
# duplicated this lets us manually review those titles to confirm
# they are duplicates:
manual_checks <- review_duplicates(exactMosLup$Title, stringMosLup)
length(manual_checks$title) #453 duplicates
# Now we can extract unique references from our dataset: we need to
#pass it the exact method dataset (exactMosLup) and the matching
#articles (stringMosLup)
uniqueMosLup = extract_unique_references(exactMosLup, stringMosLup)
#1455 unique articles (914 scopus, 515 WoS, 26 SciELO).
#682 duplicates.
#______________________________________________
#Screened 177 title and abstract Lupita's articles:
data = read.csv("./Analisis/177articulos.csv")
names(data)
#Spot duplicates by string distance
ddata = find_duplicates(data$Title, method = "string_osa", to_lower=T, rm_punctuation = T, threshold = 7)
manual_checks <- review_duplicates(data$Title, ddata)
length(manual_checks$title) #12 duplicates
write.csv(manual_checks,"./Analisis/177duplicates.csv")
#Extract unique records (eliminate duplicates)
udata = extract_unique_references(data, ddata)
write.csv(udata,"./Analisis/todos2.csv")
#_____________________________________________
#To the 164 unique new records the 21 records already
#selected since september 2020 were added, for a total
#of 185 records:
data = read.csv("./Analisis/todos3.csv")
names(data)
length(data$Title)
#Spot duplicates by string distance
ddata = find_duplicates(data$Title, method = "string_osa", to_lower=T, rm_punctuation = T, threshold = 7)
manual_checks <- review_duplicates(data$Title, ddata)
length(manual_checks$title) #12 duplicates
write.csv(manual_checks, "./Analisis/185duplicates.csv")
#Extract unique records (eliminate duplicates)
udata = extract_unique_references(data, ddata)
write.csv(udata,"./Analisis/todos3.csv")
#14-Jan-21 Disturbed vs. wild barplots ----
mosquitos <- read.csv("./Analisis/Mosquito_Review_39articles.csv",header=T)
#Number of mosquito species
length(levels(factor(mosquitos$MosquitoSpecies)))
#91 spp.
#__________________________________________
#Selecting subgroup of the dataset that only encompasses "disturbed
#environment" values
disturbed = mosquitos[mosquitos$Landscape=="disturbed",]
#Naming that subgroup "disturbed frequencies" (distFreq)
distFreq = data.frame(disturbed$MosquitoSpecies,disturbed$HostRichness,disturbed$Landscape,disturbed$AuthorKey)
#Assigning column names to distFreq
colnames(distFreq) = c("mosquito","host","landscape","ID")
#Dropping non-used levels of mosquito species in distFreq table
distFreq$mosquito = as.factor(distFreq$mosquito)
#Sorting distFreq by alphabetical order of mosquito species and
#from max to min host number (VERY IMPORTANT STEP!!!)
distFreq = distFreq[order(distFreq$mosquito,-distFreq$host),]
#Eliminate duplicated species rows. If the distFreq data table
#hasn't been arranged by max to min host number, then you're
#going to LOOSE HOST RICHNESS so be careful about this step and
#the previous one
distMaxHost = distFreq[!(duplicated(distFreq$mosquito)),]
#Sort distMaxHost (the table only with the maximum amount possible
#of bloodmeal source hosts per mosquito species) from max to min
distMaxHost = distMaxHost[order(-distMaxHost$host),]
#Number of mosquito species in disturbed environments
length(distMaxHost$mosquito)
# 63 spp.
#_________________________________________
#_________________________________________
#Selecting subgroup of the dataset that only encompasses "wild
#environment" values
wild = mosquitos[mosquitos$Landscape=="wild",]
#Naming that subgroup "wild frequencies" (wildFreq)
wildFreq = data.frame(wild$MosquitoSpecies,wild$HostRichness,wild$Landscape,wild$AuthorKey)
#Assigning column names to wildFreq
colnames(wildFreq) = c("mosquito","host","landscape","ID")
#Dropping non-used levels of mosquito species in wildFreq table
wildFreq$mosquito = as.factor(wildFreq$mosquito)
#Sorting wildFreq by alphabetical order of mosquito species and
#from max to min host number (VERY IMPORTANT STEP!!!)
wildFreq = wildFreq[order(wildFreq$mosquito,-wildFreq$host),]
#Eliminate duplicated species rows. If the wildFreq data table
#hasn't been arranged by max to min host number, then you're going
#to LOOSE HOST RICHNESS so be careful about this step and the
#previous one
wildMaxHost = wildFreq[!(duplicated(wildFreq$mosquito)),]
#Sort wildMaxHost (the table only with the maximum amount possible
#of bloodmeal source hosts per mosquito species) from max to min
wildMaxHost = wildMaxHost[order(-wildMaxHost$host),]
#Number of mosquito species in wild environments
length(wildMaxHost$mosquito)
# 57 spp.
#_________________________________________
#Joining distMaxHost and wildMaxHost (the tables for both disturbed
#and wild environments with only the maximum amount possible of
#bloodemal source hosts per mosquito species)
dw = rbind(distMaxHost,wildMaxHost)
#Obtaining mosquito species in both landscape types (disturbed and wild)
dw2 = dw[duplicated(dw$mosquito)|duplicated(dw$mosquito, fromLast = T),]
dw2[order(dw2$mosquito),]
length(dw2$mosquito)
#There're 23 mosquito species that feed from bloodhosts in both landscapes
#Reformat the database so that it has 3 columns: mosquito species,
#bloodhosts in disturbed landscape and bloodhosts in wild landscapes
wildData = dw2[dw2$landscape=="wild",]
wildData = wildData[,c(1,2)]
colnames(wildData) = c("mosquito","hostWild")
disturbedData = dw2[dw2$landscape=="disturbed",]
disturbedData = disturbedData[,c(1,2)]
colnames(disturbedData) = c("mosquito","hostDist")
dw3 = merge(disturbedData,wildData, by="mosquito")
#Adding Aedes aegypti to the dataframe
AeAegypti = c("Aedes_aegypti",dw$host[dw$mosquito=="Aedes_aegypti"],0)
dw4 = rbind(dw3,AeAegypti)
dw4 = dw4[order(dw4$mosquito),]
#Converting dataframe to matrix to plot it easier
dw4.matrix = rbind(as.numeric(dw4$hostWild),as.numeric(dw4$hostDist))
rownames(dw4.matrix) = c("wild","disturbed")
colnames(dw4.matrix) = dw4$mosquito
dw4.matrix=dw4.matrix[,ncol(dw4.matrix):1]
#________________________________________
#Save as image the barplot of the landscape types
png("LandscapeBarplot_24spp.png", units="in", width=15, height=15, res=300)
#Overall plot settings
par(mai=c(1.5,8,0,0), cex=2)
#Bar colors
environmentColors = c("seagreen","gray90")
#Bar names
mosquitoSpp <- gsub("_"," ",dw4$mosquito)
barplot(dw4.matrix,
beside=T,
horiz = T,
xlim=c(0,65),
names.arg = rev(mosquitoSpp),
xlab="",
ylab="",
xaxt="n",
las=1,
font=3,
cex.names = 1,
col = environmentColors)
#X axis values
axis(1,at=c(0,10,20,30,40,50,60),labels = c("0","10","20","30","40","50","60"), cex.axis=1.2)
#X axis label
text(x = 39,
y = par("usr")[3] - 6,
labels = "Number of bloodmeal source hosts",
xpd = NA,
srt = 0,
cex = 1.2,
adj=0.7)
#Landscape type legend
legend(42,72,
legend = c("Wild","Disturbed"),
pch = 22,
col = "black",
pt.bg = environmentColors,
pt.cex = 1.5)
dev.off()
#_________________________________________
#Extracting mosquito species, host richness, host class and
#landscape (disturbed or wild)
mosquitos <- read.csv("./Analisis/Mosquito_Review_39articles.csv",header=T)
mos = mosquitos[,c("MosquitoSpecies","HostRichness","Landscape","Mammalia","Aves","Reptilia","Amphibia","AuthorKey")]
colnames(mos) = c("sp","host","landscape","mammalia","aves","reptilia","amphibia","id")
head(mos)
#Find true replicates: how many studies researched each of
#the 91 mosquito species
mos$spID = paste(mos$sp,mos$id,sep = ",")
levels(factor(mos$spID))
mos2 = (sort(levels(factor(mos$spID))))
mos3 = strsplit(mos2, ",")
mos4 = unlist(lapply(mos3 , '[[', 1))
mos5 = as.data.frame(table(mos4))
colnames(mos5) = c("sp","studies")
mos5[order(-mos5$studies),]
#Checking which studies had data for both disturbed and wild landscapes
mos$id=factor(mos$id)
mos$landscape=factor(mos$landscape)
md = mos$id[mos$landscape=="disturbed"]
md = factor(md)
mdl = levels(md)
mw = mos$id[mos$landscape=="wild"]
mw = factor(mw)
mwl = levels(mw)
wdtab = data.frame(table(c(mdl,mwl)))
wdtab[order(-wdtab$Freq),]
#19-Jan-21 Host class stacked barplots----
#Importing database and extracting the top mosquito records with
#the most host richness reported, with their respective number of
#mammal, bird, amphibian and reptile hosts
mosquitos <- read.csv("./Analisis/Mosquito_Review_39articles.csv",header=T)
mos = mosquitos[,c("MosquitoSpecies","HostRichness","BloodengorgedMosquitoes","Landscape","Mammalia","Aves","Amphibia","Reptilia","AuthorKey")]
colnames(mos) = c("sp","host","engorged","landscape","mammalia","aves","amphibia","reptilia","id")
mos = mos[order(mos$sp,-mos$host),]
mos = mos[!(duplicated(mos$sp)),]
mos = mos[order(-mos$host),]
mos[1:20,]
#Transforming dataframe to a matrix to barplot it
mosMatrix = rbind(as.numeric(mos$aves),as.numeric(mos$mammalia),as.numeric(mos$amphibia),as.numeric(mos$reptilia))
rownames(mosMatrix) = c("Aves","Mammalia","Amphibia","Reptilia")
colnames(mosMatrix) = mos$sp
mosMatrix[,1:20]
#Save as image
png("hostClass17mos.png", units="in", width=28, height=15, res=300)
#Overall plot settings
par(mai=c(6,2,0,0), cex=2)
#Bar colors
hostClassColors = c("#00BFC4","#F8766D","#7CAE00","#C77CFF")
#Bar names
mosquitoSpp <- gsub("_"," ",colnames(mosMatrix[,1:20]))
hostClassbp = barplot(mosMatrix[,1:20],
horiz = F,
ylim=c(0,70),
names.arg = mosquitoSpp,
xlab="",
ylab="",
xaxt="n",
yaxt="n",
las=2,
font=3,
col = hostClassColors)
# #X axis values if horizontal barplots is true
# axis(1,at=c(0,10,20,30,40,50,60),labels = c("0","10","20","30","40","50","60"), cex.axis=1.2)
#
# #X axis label if horizontal barplots is true
# text(x = 39,
# y = par("usr")[3] - 4.5,
# labels = "Number of bloodmeal source hosts",
# xpd = NA,
# srt = 0,
# cex = 1.2,
# adj=0.7)
#Y axis values if vertical barplots is true
axis(2,at=c(0,10,20,30,40,50,60),labels = c("0","10","20","30","40","50","60"), cex.axis=1.2, las=1)
#Y axis label if vertical barplots is true
text(x = par("usr")[3] - 2.5,
y = 35,
labels = "Host richness",
xpd = NA,
srt = 90,
cex = 1.7,
adj=0.7)
#X axis values if vertical barplots is true
text(x = hostClassbp,
y = par("usr")[3] - 3,
labels = mosquitoSpp,
xpd = NA,
srt = 50,
cex = 1.7,
adj = 0.99,
font = 3)
legend(20,65,
legend = rownames(mosMatrix),
col = hostClassColors,
bty = "n",
pch = 16,
y.intersp = 1.4,
pt.cex=3,
cex=2)
dev.off()
#Studying the blood hosts labeling
bloodsources = mosquitos[,14:69]
names(bloodsources)
b1=unlist(bloodsources) #transform data frame into vector by columns
b2=data.frame(table(b1)) #obtain the frequency of each blood source host label as a data frame
b3 = b2[order(-b2$Freq),]
colnames(b3) = c("bloodhost","replicates")
head(b3)
length(b3$bloodhost)
#317 not-curated blood host labels
#1899 blood host replicates
#Extract all the blood host labels that aren't a species names
#and are probably wrongly written
b4=subset(b3, !(grepl("_", bloodhost)))
b4
sum(b4$replicates) #232 hosts labeled NOT to species level
sum(b4$replicates[c(1,5,6,10,12,17,20,27)]) #8 labels are genera names, its total replicate sum is equal to 35
#Extract all labels identified to genus level (endend in sp.)
b5=subset(b3, (grepl("_sp", bloodhost)))
b5
sum(b5$replicates[c(4,7,8)]) #7 hosts labeled only to genus level
#Extract all the blood host labels that ARE species names
b6=subset(b3, (grepl("_", bloodhost)))
b6
sum(b6$replicates) #1667 hosts labeled to species level
#22-Jan-21 correcting labels in database----
mosquitos <- read.csv("./Analisis/Mosquito_Review_39articles.csv",header=T)
mc = mosquitos
#Studying the blood hosts labeling
bloodsources = mc[,14:69]
b1=unlist(bloodsources) #transform data frame into vector by columns
b2=data.frame(table(b1)) #obtain the frequency of each blood source host label as a data frame
b3 = b2[order(b2$b1),]
colnames(b3) = c("bloodhost","replicates") #length 317
#Correcting records
mc=replace(mc, mc=="Alligator_missisippiensis", "Alligator_mississippiensis")
mc=replace(mc, mc=="Alligator_mississippiens", "Alligator_mississippiensis")
mc=replace(mc, mc=="Alligator_mississippiens", "Alligator_mississippiensis")
mc=replace(mc, mc=="Anas_platyrhynchos_domesticus", "Anas_platyrhynchos")
mc=replace(mc, mc=="Anhinga", "Anhinga_anhinga")
mc=replace(mc, mc=="Anolis_caronilensis", "Anolis_carolinensis")
mc=replace(mc, mc=="Ardea_herdias", "Ardea_herodias")
mc=replace(mc, mc=="Bare_faced_ibis", "Phimosus_infuscatus")
mc=replace(mc, mc=="Bird", "bird")
mc=replace(mc, mc=="Black_crowdned_night_heron", "Nycticorax_nycticorax")
mc=replace(mc, mc=="bos_taurus", "Bos_taurus")
mc=replace(mc, mc=="Branta_canadenis", "Branta_canadensis")
mc=replace(mc, mc=="Canis_familiaris", "Canis_lupus_familiaris")
mc=replace(mc, mc=="Cannis_lupus", "Canis_lupus_familiaris")
mc=replace(mc, mc=="Capra_aegagrus_hircus", "Capra_hircus")
mc=replace(mc, mc=="Capybara", "Hydrochoerus_hydrochaeris")
mc=replace(mc, mc=="Carolina", "Carolina_sp")
mc=replace(mc, mc=="cat", "Felis_catus")
mc=replace(mc, mc=="Chatarus_gattatus", "Catharus_guttatus")
mc=replace(mc, mc=="Chicken", "Gallus_gallus")
mc=replace(mc, mc=="Cocoi_heron", "Ardea_cocoi")
mc=replace(mc, mc=="Common_moorhen", "Gallinula_chloropus")
mc=replace(mc, mc=="Common_opossum", "Didelphis_sp")
mc=replace(mc, mc=="Corvus_brachyrhyncho", "Corvus_brachyrhynchos")
mc=replace(mc, mc=="Cow", "Bos_taurus")
mc=replace(mc, mc=="dasypus_novemcinctus", "Dasypus_novemcinctus")
mc=replace(mc, mc=="Didelphis", "Didelphis_sp")
mc=replace(mc, mc=="Dog", "Canis_lupus_familiaris")
mc=replace(mc, mc=="Equus", "Equus_caballus")
mc=replace(mc, mc=="Equus_ferus_caballus", "Equus_caballus")
mc=replace(mc, mc=="Felis_silvestris_catus", "Felis_catus")
mc=replace(mc, mc=="Frog", "Anura")
mc=replace(mc, mc=="gallus_gallus", "Gallus_gallus")
mc=replace(mc, mc=="gallu_gallus", "Gallus_gallus")
mc=replace(mc, mc=="gallus_gallus_domesticus", "Gallus_gallus")
mc=replace(mc, mc=="Gallus_gallus_domesticus", "Gallus_gallus")
mc=replace(mc, mc=="Goat", "Capra_hircus")
mc=replace(mc, mc=="Goat(sheep)", "Capra_hircus")
mc=replace(mc, mc=="Homo", "Homo_sapiens")
mc=replace(mc, mc=="homo_sapiens", "Homo_sapiens")
mc=replace(mc, mc=="Homo_sapiens_sapiens", "Homo_sapiens")
mc=replace(mc, mc=="Human", "Homo_sapiens")
mc=replace(mc, mc=="Horse", "Equus_caballus")
mc=replace(mc, mc=="Iguana", "Iguana_iguana")
mc=replace(mc, mc=="Least_bittern", "Ixobrychus_exilis")
mc=replace(mc, mc=="Limpkin", "Aramus_guarauna")
mc=replace(mc, mc=="Lithobates_sp.", "Lithobates_sp")
mc=replace(mc, mc=="Lithobates_sphenocephelus", "Lithobates_sphenocephalus")
mc=replace(mc, mc=="Melospize_melodia", "Melospiza_melodia")
mc=replace(mc, mc=="Memphitis_memphitis", "Mephitis_mephitis")
mc=replace(mc, mc=="Myprocta_pratti", "Myoprocta_prattis")
mc=replace(mc, mc=="Nerodia_erythrogaster,", "Nerodia_erythrogaster")
mc=replace(mc, mc=="Nyctanassa_violace", "Nyctanassa_violacea")
mc=replace(mc, mc=="Nycticorax_cycticorax", "Nycticorax_nycticorax")
mc=replace(mc, mc=="Nycticorax_myticorax", "Nycticorax_nycticorax")
mc=replace(mc, mc=="Nyctiocorax_nyctiocorax", "Nycticorax_nycticorax")
mc=replace(mc, mc=="Odocoileus_virginiamus", "Odocoileus_virginianus")
mc=replace(mc, mc=="Odocoileus_virginuanus", "Odocoileus_virginianus")
mc=replace(mc, mc=="Opossum", "Didelphis_sp")
mc=replace(mc, mc=="Pig", "Sus_scrofa")
mc=replace(mc, mc=="Pinneated_bittern", "Botaurus_pinnatus")
mc=replace(mc, mc=="Poecile_atricapilla", "Poecile_atricapillus")
mc=replace(mc, mc=="Poecile_atricapilla", "Poecile_atricapillus")
mc=replace(mc, mc=="primate", "Primates")
mc=replace(mc, mc=="Rattus_norvergicus", "Rattus_norvegicus")
mc=replace(mc, mc=="Rattus_norvergicus,", "Rattus_norvegicus")
mc=replace(mc, mc=="Rattus_sp.", "Rattus_sp")
mc=replace(mc, mc=="Rattus", "Rattus_sp")
mc=replace(mc, mc=="Rodent", "Rodentia")
mc=replace(mc, mc=="Sheep", "Ovis_aries")
mc=replace(mc, mc=="Straited_heron", "Butorides_striata")
mc=replace(mc, mc=="Sus_scrofa_domesticus", "Sus_scrofa")
mc=replace(mc, mc=="Sylvialagus_floridanus", "Sylvilagus_floridanus")
mc=replace(mc, mc=="Terrapene_carolina_carolina", "Terrapene_carolina")
mc=replace(mc, mc=="Tiarsis_bicolor", "Tiaris_bicolor")
mc=replace(mc, mc=="Toxostoma_curviroste", "Toxostoma_curvirostre")
mc=replace(mc, mc=="Tropidurus_sp.", "Tropidurus_sp")
mc=replace(mc, mc=="turtle", "turtle")
mc=replace(mc, mc=="Turtle", "turtle")
mc=replace(mc, mc=="Zanaida_asiatica", "Zenaida_asiatica")
#Studying the blood hosts labeling after correcting errors
b.corr = mc[,14:69]
b.corr1=unlist(b.corr)
b.corr2=data.frame(table(b.corr1))
b.corr3 = b.corr2[order(b.corr2$b.corr1),]
colnames(b.corr3) = c("bloodhost","replicates")
length(b.corr3$bloodhost) #252 bloodhost labels
sum(b.corr3$replicates) #1899 bloodhost records
b.corr3 = b.corr3[order(-b.corr3$replicates),]
#Extract all the blood host labels that aren't a species names
#and are probably wrongly written
b.corr4=subset(b.corr3, !(grepl("_", bloodhost)))
b.corr4
sum(b.corr4$replicates) #174 non-bloodhost records plus...
#Extract all labels identified to genus level (endend in sp.)
b.corr5=subset(b.corr3, (grepl("_sp", bloodhost)))
b.corr5
sum(b.corr5$replicates[c(-1,-4,-5,-7)]) #37 bloodhost genera not id to species level
# #Save as CSV the table with the 252 bloodhost labels and its replicates
# write.csv(b.corr3, file="Hosts.csv", row.names=F)
# #Save as XLSX the database with 252 bloodhosts correct labels
# write.csv(mc, file="Mosquito_Review_39articles.csv", row.names = F)
#31 May 2021. Labeled MAP (for reference)----
#Packages
library(maps) #map functions
library(mapdata) #basic world map data
library(maptools)#for shapefiles
library(scales) #for transparency
#Importing most updated database:
msqt = read.csv("./Analisis/Mosquito_Review_39articles.csv", header=T, sep=",")
head(msqt)
#Extracting columns of interest: Title, LongW, LatN, ID, AuthorKey, MosquitoSpecies, Landscape, HostRichness
msqt2 = msqt[,c("Title","LongW","LatN","ID","AuthorKey","MosquitoSpecies","Landscape","HostRichness")]
head(msqt2)
#Obtaining unique coordinates. Currently we have 343 rows, but only 146 unique coordinates. Duplicates are because more than one mosquito species was identified in a single site.
length(msqt2$LongW) #343 rows
msqt3 = msqt2[!(duplicated(msqt2$LongW)),]
length(msqt3$LongW) #146 rows
#Plotting map with labels
png("MapAuthorKey.png", units="in", width=16, height=16, res=600)
map(database="worldHires",
regions = c("USA",
"Mexico",
"Guatemala",
"Belize",
"El Salvador",
"Honduras",
"Nicaragua",
"Panama",
"Costa Rica",
"Venezuela",
"Colombia",
"Guyana",
"French Guiana",
"Suriname",
"Brazil",
"Ecuador(?!:Galapagos)",
"Peru",
"Bolivia",
"Paraguay",
"Uruguay",
"Chile",
"Argentina"),
xlim=c(-124,-35), #longitude (left, right)
ylim=c(-35,49), #latitude (bottom, top)
col="gray90",
fill=T)
# #plotting mosquito study sites
# points(msqt3$LongW, msqt3$LatN,
# pch=21,
# col="white",
# bg="black",
# lwd=2,
# cex=5)
#Labels for each study (each point)
text(msqt3$LongW, msqt3$LatN, labels = msqt3$ID, cex=0.5, col="red")
dev.off()
|
92c35bd56bb412d7ac6e19dcc5e0d40d4c75d7d8
|
55efbadb0cc8e772639aa3e2aa89317385615711
|
/encounter.metro_s_meanonly_sim.R
|
56ed56ae2037ab1dd783dd0018ed860f2ed0afe7
|
[
"MIT"
] |
permissive
|
Sandy4321/hierarchical-association-rules
|
ee53e2a4c22e58b01e806e4d25dd0abf11cbf9f2
|
820adbe222c50584e40f9868cc1533f6a57133ff
|
refs/heads/master
| 2020-04-25T17:35:59.707778
| 2018-02-02T00:14:08
| 2018-02-02T00:14:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,072
|
r
|
encounter.metro_s_meanonly_sim.R
|
#only keep the mean to try to use all rules
#######this version only keeps probabilities.
pt.tmp.fcn<-function(beta.tmp,g,x.matrix=x.mat){
out=exp((x.matrix%*%beta.tmp)+g)
return(out)}
f.metro<-function(y,n, x.mat=NULL, n.iter=5000,n.thin=10,n.burn=1000,n.chain=3){
#beta is n.rules*ncol(x)+1
#gamma is n.users long
#tau is n.rules*n.users
print(date())
if(is.null(x.mat)){stop("havent coded intercept only mode yet! Need at least one covariate")}
#n.iter=5000
#n.thin=10
#n.burn=1000
#n.chain=3
#x.mat=x.true
#dimensions
n.rules<-ncol(y)
n.users<-nrow(y)
k=ncol(x.mat)
n.par<-(n.users*n.rules)+n.users+(n.users*n.rules)
n.keep<-(n.iter-n.burn)/n.thin
sims<-array(0, c(n.chain,n.par))
for(ccc in 1:n.chain){
## initialization
mc.v.p<-abs(y/ifelse(n>0,n,1)+runif(length(y),-.1,.1))
mc.v.beta<-matrix(rnorm((n.rules*(k)),0,sqrt(8)),n.rules,k)
mc.v.tbeta<-matrix(rnorm((n.rules*(k)),0,sqrt(8)),n.rules,k)
mc.v.gamma<-rnorm(n.users,0,sqrt(8))
mc.v.tau<-rnorm(n.users,0,sqrt(4))
mc.v.mu.beta<-colMeans(mc.v.beta)
mc.v.mu.tbeta<-colMeans(mc.v.tbeta)
mc.v.mu.beta<-runif(k,-2,2)
mc.v.mu.tbeta<-rep(0,k)
mc.v.mu.gamma<-mean(mc.v.gamma)
#mc.v.mu.tau<-mean(mc.v.tau)
#mc.v.mu.gamma=0
mc.v.mu.tau=0
mc.v.sigma.beta<-apply(mc.v.beta,2,'var')
mc.v.sigma.tbeta<-apply(mc.v.tbeta,2,'var')
mc.v.sigma.gamma<-var(mc.v.gamma)
mc.v.sigma.tau<-var(mc.v.tau)
bd.jump<-matrix(.1,n.rules,k)
tbd.jump<-matrix(.1,n.rules,k)
gd.jump<-rep(.1,n.users)
td.jump<-rep(.1,n.users)
### jump's are the variance of the jump distribution.
last.20p<-array(NA, c((n.users)+n.users+(2*n.rules*(k)),50))
p.ct<-0
for(t in 1:n.iter){
temp<-f.update(y=y,n=n, x.mat=x.mat, v.p=mc.v.p, v.tbeta=mc.v.tbeta,v.beta=mc.v.beta, v.gamma=mc.v.gamma,v.tau=mc.v.tau, v.mu.tbeta=mc.v.mu.tbeta,v.mu.beta=mc.v.mu.beta,v.mu.gamma=mc.v.mu.gamma,v.mu.tau=mc.v.mu.tau,v.sigma.beta=mc.v.sigma.beta,v.sigma.gamma=mc.v.sigma.gamma,v.sigma.tbeta=mc.v.sigma.tbeta,v.sigma.tau=mc.v.sigma.tau,bd.jump=bd.jump,td.jump=td.jump,gd.jump=gd.jump,tbd.jump=tbd.jump)
mc.v.p<-temp$v.p
mc.v.beta<-temp$v.beta
mc.v.gamma<-temp$v.gamma
mc.v.tau<-temp$v.tau
mc.v.tbeta<-temp$v.tbeta
mc.v.mu.beta<-temp$v.mu.beta
mc.v.mu.tbeta<-temp$v.mu.tbeta
mc.v.mu.gamma<-temp$v.mu.gamma
mc.v.mu.tau<-temp$v.mu.tau
mc.v.sigma.beta<-temp$v.sigma.beta
mc.v.sigma.tbeta<-temp$v.sigma.tbeta
mc.v.sigma.gamma<-temp$v.sigma.gamma
mc.v.sigma.tau<-temp$v.sigma.tau
#if(t<5001){
p.ct<-p.ct+1
last.20p[,p.ct]<-temp$jump.p
#order for jumping probabilities
#c(t.p,g.p,b.p)
if(p.ct==50){
p.ct<-0
jmp.t.mean<-apply(last.20p[1:(n.users),],1,mean)
jmp.g.mean<-apply(last.20p[((n.users)+1):(n.users+n.users),],1,mean)
jmp.b.mean<-apply(last.20p[(n.users+n.users+1):(n.users+n.users+((k)*n.rules)),],1,mean)
jmp.tb.mean<-apply(last.20p[(n.users+n.users+((k)*n.rules)+1):(n.users+n.users+(((k)*n.rules)+((k)*n.rules))),],1,mean)
td.jump<-pmax(pmin(log(0.23)*td.jump/log(jmp.t.mean), pmin(10*td.jump,2)), 0.2)
gd.jump<-pmax(pmin(log(0.23)*gd.jump/log(jmp.g.mean), pmin(10*gd.jump,2)), 0.2)
bd.jump<-pmax(pmin(log(0.23)*bd.jump/log(jmp.b.mean), pmin(10*bd.jump,2)), .01)
tbd.jump<-pmax(pmin(log(0.23)*tbd.jump/log(jmp.tb.mean), pmin(10*tbd.jump,2)), .01)}
#}
if (t%%500==0){
#print("gamma 0 mean")
print(c(t,date()))}
if(t%%n.thin==0 & t>n.burn){
sims[ccc,(1:(n.users*n.rules))] <-sims[ccc,(1:(n.users*n.rules))]+mc.v.p
sims[ccc,((n.users*n.rules)+1):(((n.users*n.rules)+n.users))] <-sims[ccc,((n.users*n.rules)+1):(((n.users*n.rules)+n.users))]+exp(mc.v.tau)
b.tmp<-exp((x.mat%*%mc.v.beta[1,])+mc.v.gamma)
for(u in 2:n.rules){
b.tmp<-cbind(b.tmp,exp((x.mat%*%mc.v.beta[u,])+mc.v.gamma))}
sims[ccc,(((n.users*n.rules)+n.users+1):((n.users*n.rules)+n.users+(n.users*n.rules)))]<-sims[ccc,(((n.users*n.rules)+n.users+1):((n.users*n.rules)+n.users+(n.users*n.rules)))]+as.vector(b.tmp)
# c(mc.v.p, mc.v.beta, mc.v.tbeta,mc.v.gamma,mc.v.tau, #mc.v.mu.beta,mc.v.mu.tbeta,mc.v.mu.gamma,mc.v.mu.tau,mc.v.sigma.beta,mc.v.sigma.tbeta,mc.v.sigma.gamma,mc.v.sigma.tau)
}
}
}
ord<-"mc.v.p, gamma, pi"
#return(list(sims=sims,ord=ord,b.out=b.out,p.out=p.out,g.out=g.out,t.out=t.out))
return(list(sims=sims,ord=ord,n.iter=n.iter,n.thin=n.thin,n.burn=n.burn))
}
f.update<-function(y,n, x.mat, v.p, v.beta, v.gamma,v.tau,v.tbeta,v.mu.tbeta, v.mu.beta,v.mu.gamma,v.mu.tau,v.sigma.tbeta,v.sigma.beta,v.sigma.gamma,v.sigma.tau,bd.jump,td.jump,gd.jump,tbd.jump){
n.users<-nrow(y)
n.rules<-ncol(y)
k=ncol(x.mat)
# # #####comment this out unless debugging###
# v.gamma=mc.v.gamma
# v.beta=mc.v.beta
# v.tau=mc.v.tau
# v.mu.tau=mc.v.mu.tau
# v.sigma.tau=mc.v.sigma.tau
# td.jump=td.jump
# gd.jump=gd.jump
# bd.jump=bd.jump
# v.mu.gamma=mc.v.mu.gamma
# v.sigma.gamma=mc.v.sigma.gamma
# v.mu.beta=mc.v.mu.beta
# v.sigma.beta=mc.v.sigma.beta
###
#
### updating p
v.p<-matrix(NA,n.users,n.rules)
for(r in 1:n.rules){
pi.tmp<-exp((x.mat%*%v.beta[r,])+v.gamma)
tau.tmp<-exp((x.mat%*%v.tbeta[r,])+v.tau)
v.p[,r]<-rbeta(n.users,(y[,r]+pi.tmp),(n[,r]-y[,r]+tau.tmp))
v.p[v.p[,r]==1,r]<-v.p[v.p[,r]==1,r]-runif(1,1e-16,1e-15)
v.p[v.p[,r]<1e-15,r]<-1e-15}
###updating gamma
g.old<-(v.gamma)
pi.tmp<-apply(v.beta,1,pt.tmp.fcn,g=g.old,x.matrix=x.mat)
tau.tmp<-apply(v.tbeta,1,pt.tmp.fcn,g=v.tau,x.matrix=x.mat)
lik.old<-rowSums((-log(beta(pi.tmp,tau.tmp)+1e-320))+((y+pi.tmp-1)*log(v.p))+(log(1-v.p)*(n-y+tau.tmp-1)))+log(dnorm(g.old,v.mu.gamma,sqrt(v.sigma.gamma)))
g.star<-g.old+rnorm(n.users,0,sqrt(gd.jump))
pi.tmp<-apply(v.beta,1,pt.tmp.fcn,g=g.star,x.matrix=x.mat)
lik.star<-rowSums((-log(beta(pi.tmp,tau.tmp)+1e-320))+((y+pi.tmp-1)*log(v.p))+(log(1-v.p)*(n-y+tau.tmp-1)))+log(dnorm(g.star,v.mu.gamma,sqrt(v.sigma.gamma)))
prob.diff<-lik.star-lik.old
g.valid<-(!is.infinite(prob.diff))&(!(is.na(prob.diff)))
jump.new<-rep(0,length(v.gamma))
jump.new[g.valid]<-rbinom(sum(g.valid),1,exp(pmin(prob.diff[g.valid],0)))
v.gamma[jump.new==1]<-g.star[jump.new==1]
v.gamma[jump.new<1]<-g.old[jump.new<1]
g.p<-rep(0,length(v.gamma))
g.p[g.valid]<-exp(pmin(prob.diff[g.valid],0))
### updating v.mu.gamma
v.mu.gamma<-rnorm(1, mean(v.gamma, na.rm=T), sqrt(v.sigma.gamma/n.users))
###updating v.sigma.gamma
v.sigma.gamma<-sum((v.gamma-v.mu.gamma)^2, na.rm=T)/rchisq(1, n.users-1)
###updating tau
t.old<-(v.tau)
pi.tmp<-apply(v.beta,1,pt.tmp.fcn,g=v.gamma,x.matrix=x.mat)
tau.tmp<-apply(v.tbeta,1,pt.tmp.fcn,g=t.old,x.matrix=x.mat)
lik.old<-rowSums((-log(beta(pi.tmp,tau.tmp)+1e-320))+((y+pi.tmp-1)*log(v.p))+(log(1-v.p)*(n-y+tau.tmp-1)))+(log(dnorm(t.old,0,sqrt(v.sigma.tau))))
t.star<-t.old+rnorm(n.users,0,sqrt(td.jump))
tau.tmp<-apply(v.tbeta,1,pt.tmp.fcn,g=t.star,x.matrix=x.mat)
lik.star<-rowSums((-log(beta(pi.tmp,tau.tmp)+1e-320))+((y+pi.tmp-1)*log(v.p))+(log(1-v.p)*(n-y+tau.tmp-1)))+(log(dnorm(t.star,0,sqrt(v.sigma.tau))))
prob.diff<-t(lik.star-lik.old)
t.valid<-(!is.infinite(prob.diff))&(!(is.na(prob.diff)))
jump.new<-rep(0,length(v.tau))
jump.new[t.valid]<-rbinom(sum(t.valid),1,exp(pmin(prob.diff[t.valid],0)))
v.tau[jump.new==1]<-t.star[jump.new==1]
v.tau[jump.new<1]<-t.old[jump.new<1]
t.p<-rep(0,length(v.tau))
t.p[t.valid]<-exp(pmin(prob.diff[t.valid],0))
### updating v.mu.tau
#v.mu.tau<-rnorm(1, mean(v.tau, na.rm=T), sqrt(v.sigma.tau/n.users))
v.mu.tau=0
###updating v.sigma.tau
v.sigma.tau<-sum((v.tau-v.mu.tau)^2, na.rm=T)/rchisq(1, n.users-1)
tb.p<-matrix(0,nrow(v.beta),ncol(v.beta))
v.tbeta<-matrix(0,nrow(v.beta),ncol(v.beta))
v.mu.tbeta<-rep(0,k)
v.sigma.tbeta<-rep(0,k)
###updating beta
b.p<-matrix(0,nrow(v.beta),ncol(v.beta))
for(a in 1:n.rules){
for(kkk in 1:k){
b.old<-v.beta[a,]
pi.tmp<-exp((x.mat%*%b.old)+v.gamma)
tau.tmp<-exp((x.mat%*%v.tbeta[a,])+v.tau)
lik.old<-sum((-log(beta(pi.tmp,tau.tmp)+1e-320))+((y[,a]+pi.tmp-1)*log(v.p[,a]))+(log(1-v.p[,a])*(n[,a]-y[,a]+tau.tmp-1)))+((log(dnorm(b.old[kkk],v.mu.beta[kkk],sqrt(v.sigma.beta[kkk])))))
b.star<-b.old
b.star[kkk]<-b.old[kkk]+rnorm(1,0,sqrt(matrix(bd.jump,n.rules,(k))[a,kkk]))
pi.tmp<-exp((x.mat%*%b.star)+v.gamma)
lik.star<-sum((-log(beta(pi.tmp,tau.tmp)+1e-320))+((y[,a]+pi.tmp-1)*log(v.p[,a]))+(log(1-v.p[,a])*(n[,a]-y[,a]+tau.tmp-1)))+((log(dnorm(b.star[kkk],v.mu.beta[kkk],sqrt(v.sigma.beta[kkk])))))
prob.diff<-lik.star-lik.old
#print(prob.diff)
b.valid<-(!is.infinite(prob.diff))&(!(is.na(prob.diff)))
if(b.valid>0){
jump.new<-rbinom(1,1,exp(pmin(prob.diff,0)))
if(jump.new>0){
v.beta[a,kkk]<-b.star[kkk]
b.p[a,kkk]<-exp(pmin(prob.diff,0))}
if(jump.new<1){
v.beta[a,kkk]<-b.old[kkk]
b.p[a,kkk]<-exp(pmin(prob.diff,0))}}
if(b.valid==0){
#print('drrrr')
ind<-rbinom(1,1,0)
v.beta[a,kkk]<-(1-ind)*b.old[kkk]+ind*b.star[kkk]
b.p[a,kkk]<-0
}}}
b.p<-as.vector(b.p)
for(aa in 1:k){
###updating v.mu.beta
v.mu.beta[aa]<-rnorm(1, mean(v.beta, na.rm=T), sqrt(v.sigma.beta[aa]/(k*n.rules)))
#v.mu.beta[aa]<-0
###updating v.sigma.beta
v.sigma.beta[aa]<-sum((v.beta-v.mu.beta)^2, na.rm=T)/rchisq(1, (k*n.rules)-1)}
return(list(v.p=v.p, v.tau=v.tau,v.gamma=v.gamma,v.beta=v.beta,v.tbeta=v.tbeta,v.mu.tbeta=v.mu.tbeta,v.mu.tau=v.mu.tau,v.mu.gamma=v.mu.gamma,v.mu.beta=v.mu.beta,v.sigma.tau=v.sigma.tau,v.sigma.gamma=v.sigma.gamma,v.sigma.beta=v.sigma.beta, v.sigma.tbeta=v.sigma.tbeta,jump.p=c(t.p,g.p,b.p,tb.p)))
}
|
c8b7e9cea247f87849889c7e3fe28cf9bacab709
|
c750c1991c8d0ed18b174dc72f3014fd35e5bd8c
|
/pkgs/bayesm/man/bank.Rd
|
3155e72f47c60aa219a170c5346f9d3692a3c5df
|
[] |
no_license
|
vaguiar/EDAV_Project_2017
|
4b190e66fe7a6b4078cfe1b875bccd9b5a594b25
|
288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f
|
refs/heads/base
| 2021-01-23T02:39:36.272851
| 2017-05-01T23:21:03
| 2017-05-01T23:21:03
| 86,010,131
| 1
| 0
| null | 2017-05-01T23:43:04
| 2017-03-24T00:21:20
|
HTML
|
UTF-8
|
R
| false
| false
| 4,021
|
rd
|
bank.Rd
|
\name{bank}
\alias{bank}
\docType{data}
\title{ Bank Card Conjoint Data of Allenby and Ginter (1995)}
\description{
Data from a conjoint experiment in which two partial profiles of
credit cards were presented to 946 respondents. The variable
bank$choiceAtt$choice indicates which profile was chosen. The
profiles are coded as the difference in attribute levels. Thus,
a "-1" means the profile coded as a choice of "0" has the attribute.
A value of 0 means that the attribute was not present in the
comparison.
data on age,income and gender (female=1) are also recorded in
bank$demo
}
\usage{data(bank)}
\format{
This R object is a list of two data frames, list(choiceAtt,demo).
List of 2
$ choiceAtt:`data.frame': 14799 obs. of 16 variables:\cr
\ldots$ id : int [1:14799] 1 1 1 1 1 1 1 1 1 1 \cr
\ldots$ choice : int [1:14799] 1 1 1 1 1 1 1 1 0 1 \cr
\ldots$ Med_FInt : int [1:14799] 1 1 1 0 0 0 0 0 0 0 \cr
\ldots$ Low_FInt : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
\ldots$ Med_VInt : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
\ldots$ Rewrd_2 : int [1:14799] -1 1 0 0 0 0 0 1 -1 0 \cr
\ldots$ Rewrd_3 : int [1:14799] 0 -1 1 0 0 0 0 0 1 -1 \cr
\ldots$ Rewrd_4 : int [1:14799] 0 0 -1 0 0 0 0 0 0 1 \cr
\ldots$ Med_Fee : int [1:14799] 0 0 0 1 1 -1 -1 0 0 0 \cr
\ldots$ Low_Fee : int [1:14799] 0 0 0 0 0 1 1 0 0 0 \cr
\ldots$ Bank_B : int [1:14799] 0 0 0 -1 1 -1 1 0 0 0 \cr
\ldots$ Out_State : int [1:14799] 0 0 0 0 -1 0 -1 0 0 0 \cr
\ldots$ Med_Rebate : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
\ldots$ High_Rebate : int [1:14799] 0 0 0 0 0 0 0 0 0 0 \cr
\ldots$ High_CredLine: int [1:14799] 0 0 0 0 0 0 0 -1 -1 -1 \cr
\ldots$ Long_Grace : int [1:14799] 0 0 0 0 0 0 0 0 0 0
$ demo :`data.frame': 946 obs. of 4 variables:\cr
\ldots$ id : int [1:946] 1 2 3 4 6 7 8 9 10 11 \cr
\ldots$ age : int [1:946] 60 40 75 40 30 30 50 50 50 40 \cr
\ldots$ income: int [1:946] 20 40 30 40 30 60 50 100 50 40 \cr
\ldots$ gender: int [1:946] 1 1 0 0 0 0 1 0 0 0 \cr
}
\details{
Each respondent was presented with between 13 and 17 paired comparisons. Thus, this
dataset has a panel structure.
}
\source{
Allenby and Ginter (1995), "Using Extremes to Design Products and Segment
Markets," \emph{JMR}, 392-403.
}
\references{ Appendix A, \emph{Bayesian Statistics and Marketing}
by Rossi, Allenby and McCulloch. \cr
\url{http://www.perossi.org/home/bsm-1}
}
\examples{
data(bank)
cat(" table of Binary Dep Var", fill=TRUE)
print(table(bank$choiceAtt[,2]))
cat(" table of Attribute Variables",fill=TRUE)
mat=apply(as.matrix(bank$choiceAtt[,3:16]),2,table)
print(mat)
cat(" means of Demographic Variables",fill=TRUE)
mat=apply(as.matrix(bank$demo[,2:3]),2,mean)
print(mat)
## example of processing for use with rhierBinLogit
##
if(0)
{
choiceAtt=bank$choiceAtt
Z=bank$demo
## center demo data so that mean of random-effects
## distribution can be interpreted as the average respondent
Z[,1]=rep(1,nrow(Z))
Z[,2]=Z[,2]-mean(Z[,2])
Z[,3]=Z[,3]-mean(Z[,3])
Z[,4]=Z[,4]-mean(Z[,4])
Z=as.matrix(Z)
hh=levels(factor(choiceAtt$id))
nhh=length(hh)
lgtdata=NULL
for (i in 1:nhh) {
y=choiceAtt[choiceAtt[,1]==hh[i],2]
nobs=length(y)
X=as.matrix(choiceAtt[choiceAtt[,1]==hh[i],c(3:16)])
lgtdata[[i]]=list(y=y,X=X)
}
cat("Finished Reading data",fill=TRUE)
fsh()
Data=list(lgtdata=lgtdata,Z=Z)
Mcmc=list(R=10000,sbeta=0.2,keep=20)
set.seed(66)
out=rhierBinLogit(Data=Data,Mcmc=Mcmc)
begin=5000/20
end=10000/20
summary(out$Deltadraw,burnin=begin)
summary(out$Vbetadraw,burnin=begin)
if(0){
## plotting examples
## plot grand means of random effects distribution (first row of Delta)
index=4*c(0:13)+1
matplot(out$Deltadraw[,index],type="l",xlab="Iterations/20",ylab="",
main="Average Respondent Part-Worths")
## plot hierarchical coefs
plot(out$betadraw)
## plot log-likelihood
plot(out$llike,type="l",xlab="Iterations/20",ylab="",main="Log Likelihood")
}
}
}
\keyword{datasets}
|
2acf26e6b3e405253680b65580da9cc9461af8a9
|
bac2ab705e25265df85dade5af77b501267abe0d
|
/R-Programming for Absolute Beginers-Udemy/24-The ifelse() Function.R
|
6d7b239875f5ba88853f17d5f0018a1953b141a8
|
[] |
no_license
|
manasohara/R-Programming
|
ee7ec51ab3d2b8019f90c5869b228687e77eea12
|
869212761fcc5da751f9631723951a35b9ac5885
|
refs/heads/main
| 2023-08-18T08:10:01.410635
| 2021-09-20T06:39:53
| 2021-09-20T06:39:53
| 399,898,235
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 626
|
r
|
24-The ifelse() Function.R
|
#ifelse(condition,action if true,action if false)
x<-c(6,10,9,5,20,7,16)
y<-ifelse(x%%2 == 0,x/2,x)
y
#explanation
#if x%%2==0
#true condition given is x/2 which means
#divide the value by 2 so 6 became 3
#else x
#false condition is x only
#so the x value copied as it is
## see 9
#ex2
x<-c(2,6,15,10,20,14)
y<- ifelse(x<12,x*5,x*13)
#this means if x > 12
# do x*5
#if not then do x*3
y
#ex3
x<-c(25,-36,100,0,-1,49,4,-68)
y<-ifelse(x>=0,sqrt(x),sqrt(-x))
y
#we'll get Nan warning to remove
#we can do this
y<-sqrt(ifelse(x>=0,x,-x))
y
#ex4
x<-c(15,6,10,30,4)
y<-ifelse(x^2>100,1,0)
y
|
5366cd52d4e5c4c4f9a1f245ed8853603efa5b3a
|
c211370f32539ab7a746d0ce69b3b39098f604cf
|
/Scripts/ANOVA.R
|
f0411dbfbb494f40e77bfe2538b2121b55798837
|
[
"MIT"
] |
permissive
|
grvsrm/My-Tidy-Stats
|
3f038e741fffeef93f7162f8c27b0d5db8e56ab3
|
580765febc48602de75ba38e568e2127dff85f5c
|
refs/heads/main
| 2023-01-24T08:00:38.319485
| 2020-12-07T10:19:12
| 2020-12-07T10:19:12
| 318,995,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,790
|
r
|
ANOVA.R
|
# Pre-requisites
library(palmerpenguins)
library(tidyverse)
library(rstatix)
library(car)
# Data
dat <- penguins %>%
select(species, flipper_length_mm) %>%
drop_na()
dat %>%
ggplot(aes(species, flipper_length_mm, color = species)) +
geom_jitter()
# Test Independence
durbinWatsonTest(dat$flipper_length_mm)
# Results of durbin watson test is almost equal to 0 which indicates zero auto correlation
# Test Normality of Residuals
# For that lets perform anova first
penguin_anova <- aov(flipper_length_mm~species, data = penguins)
# Histogram
penguin_anova$residuals %>% as_tibble() %>%
ggplot(aes(value)) +
geom_histogram()
# Q-Q Plot
qqPlot(penguin_anova$residuals,id = F)
# From the histogram and QQ-plot above, we can already see that the normality assumption seems to be met.
# Still it would be good to perform a shapiro wilk statistical test for normality.
# Shapiro-Wilk Test
shapiro.test(penguin_anova$residuals)
# We can't reject the null hypothesis, hence the data is normal.
# There is another test to check normality
# Kolmogorov-Smirnov test
ks.test(penguin_anova$residuals, "pnorm")
# We can also test the normality on raw data rather than the residuals.
dat %>%
ggplot(aes(flipper_length_mm, fill = species)) +
geom_histogram() +
facet_wrap(~species, scales = "free")
# Visuals suggest that the data is normal at group level also. Hence we will assume normality.
# Testing homogenity or in other words homoscedasticity
dat %>%
ggplot(aes(species, flipper_length_mm, color = species)) +
geom_boxplot(show.legend = F)
dat %>%
ggplot(aes(species, flipper_length_mm)) +
geom_jitter()
# We see in these plots that dispersion for all the groups is more or less same, hence
# it is safe to assume that they have almost equal variance.
# Still it would be a good idea to test homogenity using a statistical test called levene test
leveneTest(flipper_length_mm ~ species, data = dat)
# p-value is high, We can't reject the null hypothesis, hence the data is normal.
dat %>%
group_by(species) %>%
summarise(mean = mean(flipper_length_mm),
sd = sd(flipper_length_mm))
# Now let's perform ANOVA
# We will use two methods
# One wat ANOVA
anova_1 <- oneway.test(flipper_length_mm~species, data = dat, var.equal = T)
# p-valueis too small, we have to reject the null hypothesis, means these three group are not same.
anova_2 <- aov(flipper_length_mm~species, data = dat)
anova_2 %>%
summary()
anova_3 <- anova_test(flipper_length_mm~species, data = dat)
# results are significant. p-value is too small, we can reject the null hypothesis, hence these three groups are not same.
# Post hoc tests
TukeyHSD(anova_3)
tukey_hsd(anova_2)
dunn_test(anova_1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.