blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
709e6add836b8e182d091a935088dca81cca8bd2
|
b00e4ed446bd69731e01bdbbdda962e6a98f4d1b
|
/man/compute_contrast.Rd
|
fed4f33b38b4022c7ed96235511697d6e82af65e
|
[] |
no_license
|
petershan1119/conText
|
63fd3aef2e46b6d5243c1b8901e03ba0d13d4e35
|
46e80d84c9d5d762a56928c55453cfa5f56c17f4
|
refs/heads/master
| 2023-07-08T10:54:07.817912
| 2021-08-25T21:21:54
| 2021-08-25T21:21:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,056
|
rd
|
compute_contrast.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/contrast_nns.R
\name{compute_contrast}
\alias{compute_contrast}
\title{Compute similarity and similarity ratios}
\usage{
compute_contrast(
target_embeddings1 = NULL,
target_embeddings2 = NULL,
pre_trained = NULL,
candidates = NULL,
norm = NULL
)
}
\arguments{
\item{target_embeddings1}{ALC embeddings for group 1}
\item{target_embeddings2}{ALC embeddings for group 2}
\item{pre_trained}{a V x D matrix of numeric values - pretrained embeddings with V = size of vocabulary and D = embedding dimensions}
\item{candidates}{character vector defining the candidates for nearest neighbors - e.g. output from \code{get_local_vocab}}
\item{norm}{character = c("l2", "none") - set to 'l2' for cosine similarity and to 'none' for inner product (see ?sim2 in text2vec)}
}
\value{
a list with three elements, nns for group 1, nns for group 2 and nns_ratio comparing with ratios of similarities between the two groups
}
\description{
Compute similarity and similarity ratios
}
|
0fc2a2f92405f2fa9b4899f4d3ef9264a10454a2
|
9963ec3df3ee5b86bfae64ac5a026fda00f03500
|
/Proyecto/proyecto.R
|
992231b6eed5460ac19d3b8b3a08d52bf25fe8d6
|
[] |
no_license
|
yanelyluna/EBayesiana
|
e74859b403099c78577fa70494f40dd65a91c77f
|
b8a21cf39a6c388a594766d2b705f922f5c2c1f4
|
refs/heads/master
| 2023-07-04T11:05:11.450803
| 2021-07-29T17:04:00
| 2021-07-29T17:04:00
| 364,960,276
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,838
|
r
|
proyecto.R
|
########### ESTADÍSTICA BAYESIANA ######################
############### PROYECTO ###############################
# Cargar librerías -------------
library(ggplot2)
# Cargar datos ---------------
heart_failure <- read.csv("Proyecto/datos/heart_failure_clinical_records_dataset.csv")
str(heart_failure) #299 obs. of 13 variables
#1. age ---- edad
#2. anaemia ---- decremento de células rojas (boolean)
#3. creatinine_phosphokinase ---- nivel de enzima CPK en la sangre
#4. diabetes ---- si el paciente tiene diabetes (boolean)
#5. ejection_fraction ---- % de sangre que deja el corazón en cada contracción
#6. high_blood_pressure ---- si el paciente tiene hipertensión (boolean)
#7. platelets ---- plaquetas en la sangre
#8. serum_creatinine ---- nivel de serum creatinine en la sangre
#9. serum_sodium ---- nivel de serum sodio en la sangre
#10. sex ---- Mujer u hombre (factor)
#11. smoking ---- si el paciente fuma o no (boolean)
#12. time ---- días de seguimiento del paciente
#13. DEATH_EVENT ---- si el paciente murió durante el periodo de seguimiento
heart_failure$anaemia <- as.factor(heart_failure$anaemia)
heart_failure$diabetes <- as.factor(heart_failure$diabetes)
heart_failure$high_blood_pressure <- as.factor(heart_failure$high_blood_pressure)
heart_failure$sex <- as.factor(heart_failure$sex)
heart_failure$smoking <- as.factor(heart_failure$smoking)
heart_failure$DEATH_EVENT <- as.factor(heart_failure$DEATH_EVENT)
attach(heart_failure)
####### ANÁLISIS DESCRIPTIVO ###############
# time --------------
summary(time)
hist(time)
# DEATH_EVENT --------
table(DEATH_EVENT)
barplot(table(DEATH_EVENT))
# age --------
plot(age,DEATH_EVENT)
|
d592a68aeff30b1e49cf80dddee3c9fe1cbda965
|
df1580a2bc381dd96774bdb98bdee0478e4b5a56
|
/Configuratore/server.R
|
335c47e10ebe387a796e64dfac88dcaa828a653d
|
[] |
no_license
|
optimaitalia/crm-analitico
|
6c274334783ff891383f471813f5f11bde050c2e
|
cc802b10e38d38cd221b367dad50b23b0b50feb4
|
refs/heads/master
| 2020-06-14T14:59:47.945278
| 2014-05-08T07:30:31
| 2014-05-08T07:30:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 431
|
r
|
server.R
|
library(shiny)
setwd("~/REPO/crm-analitico/Configuratore")
source("ScriptClass.R")
load("BundleDett.Rdata")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$idCliente <- renderText(
{
paste("ciao, hai inserito l'idCliente:",input$idCliente)
}
)
output$prova <-{reactive(
htmlpage1.PI(PI_constructor(input$idCliente,BundleDett=BundleDett))
)
}
})
|
da0708a75824039c80722cb674976557f10d2c98
|
9126d2396ce4536cfd934d9c2b09bb8511fa64a9
|
/man/OptionalInfluenceCurve-Class.Rd
|
14a61a87d27129953e879a1873a2bbd5f69ba97d
|
[] |
no_license
|
cran/RobAStBase
|
97d818333ca03d01c1eb1fa904ffddf8b8db88cb
|
63a5c3d20b6440e23f3c9787c0955bb7bf571854
|
refs/heads/master
| 2022-11-24T09:42:41.138259
| 2022-11-16T02:50:25
| 2022-11-16T02:50:25
| 17,693,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,087
|
rd
|
OptionalInfluenceCurve-Class.Rd
|
\name{OptionalInfluenceCurve-class}
\docType{class}
\alias{OptionalInfluenceCurve-class}
\alias{OptionalInfluenceCurveOrCall-class}
\alias{OptionalpICList-class}
\alias{StartClass-class}
\alias{pICList-class}
\alias{show,pICList-method}
\alias{show,OptionalpICList-method}
\title{Some helper Classes in package 'RobAStBase'}
\description{Some helper Classes in package 'RobAStBase':
Classes \code{OptionalInfluenceCurve}, \code{OptionalpICList},
\code{StartClass}, \code{pICList}}
\section{Class Unions}{
\code{OptionalInfluenceCurve} is a class union of classes
\code{InfluenceCurve} and \code{NULL};
\code{OptionalInfluenceCurveOrCall} is a class union of classes
\code{InfluenceCurve}, \code{call}, and \code{NULL} --- it is the slot
class of slot \code{pIC} in \code{ALEstimate};
\code{OptionalpICList} is a class union of classes
\code{pICList} and \code{NULL} --- it is the slot
class of slot \code{pICList} in \code{kStepEstimate};
\code{StartClass} is a class union of classes
\code{function}, \code{numeric} and \code{Estimate} --- it is the slot
class of slot \code{start} in \code{kStepEstimate}.
}
\section{List Classes}{
\code{pICList} is a descendant of class \code{list} which requires
its members ---if any--- to be of class \code{pIC}.
}
\section{Methods}{
\describe{
\item{show}{\code{signature(object = "OptionalpICList")}:
particular show-method. }
\item{show}{\code{signature(object = "pICList")}:
particular show-method. }
}}
\references{
Hampel et al. (1986) \emph{Robust Statistics}.
The Approach Based on Influence Functions. New York: Wiley.
Rieder, H. (1994) \emph{Robust Asymptotic Statistics}. New York: Springer.
Kohl, M. (2005) \emph{Numerical Contributions to the Asymptotic Theory of Robustness}.
Bayreuth: Dissertation.
}
\author{Peter Ruckdeschel \email{peter.ruckdeschel@uni-oldenburg.de}}
%\note{}
\seealso{\code{\link{InfluenceCurve}}, \code{\link[distrMod]{RiskType-class}}}
\concept{influence curve}
\keyword{classes}
\keyword{robust}
|
c612bc16a1c64c1c2734920aedd0afbddac65ff0
|
bc973bf2d0f74fb6cefac6027fdcec37e5bdcbe9
|
/5.Lists/Subset and extend lists3.R
|
bf41702615dfdd0f7670eefcc16924fc207081a6
|
[] |
no_license
|
jinkyukim-me/Intro_R
|
4334fe4a52274421747504df77a2029482108df1
|
81aaf7abdb525310cf82a81be92debc28e399efc
|
refs/heads/master
| 2020-08-13T15:31:41.529689
| 2019-10-24T17:20:28
| 2019-10-24T17:20:28
| 214,992,842
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 842
|
r
|
Subset and extend lists3.R
|
# Vector Subsetting vs. List Subsetting
# All these single and double square brackets to perform subsetting on vectors, matrices, factors and now also lists might lead to some confusion. As a summarizing exercise on vector subsetting, consider the following 4 commands.
#
# shining_list has been extended with even more information (source: imagination); the list is available in the workspace. Which of the following statements are correct?
#
# A) shining_list$boxoffice[1,2] gives the non-US box office of the first release.
# B) shining_list[[c(2,4)]] creates a list containing the actors vector and the box office matrix.
# C) shining_list[[c(2,4)]] returns "Scatman Crothers".
# D) shining_list$reviews[1] > shining_list[[3]][3] is invalid syntax.
# Vector Subsetting vs. List Subsetting
shining_list$box[1,2]
shining_list[[c(2,4)]]
|
55e1cb68d105c488a3fb5d50dd4f5d7d2e164a86
|
1839b1bc21a43384e9c169f0bf5fd0a3e4c68b0a
|
/w18/R/filterMatches.R
|
bdc3a736a5ed60511be2497cb18048ed5401e3aa
|
[] |
no_license
|
CarlosMoraMartinez/worm19
|
b592fa703896e1bbb6b83e41289674c63a046313
|
99fb3ef35d13739ee83f08b2ac1107179ea05ee2
|
refs/heads/master
| 2020-07-18T23:25:13.542031
| 2019-07-03T14:53:04
| 2019-07-03T14:53:04
| 206,333,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 840
|
r
|
filterMatches.R
|
#' filterMatches
#'
#' Filters sequences that do not match a pattern.
#'
#' @param views an object of the class XStringViews
#' @param pattern a string of characters that contains the motif core
#' pattern (e.g.: "[AT]CAA") to filter the matrix alignments. Accepts ""
#' as a pattern.
#' @return A dataframe with the matches (all the 'views' objects in it).
#' @keywords filters, sequences, matches
#' @import Biostrings
#' @export
filterMatches <- function(views, pattern = ""){
matches <- data.frame(start = start(views),
end = end(views),
width = width(views),
offsets = (start(views) + width(views)/2),
seq = as.character(views))
filtered_matches <- matches[grep(pattern=pattern, x=matches$seq), ]
return(filtered_matches)
}
|
80ccf39a2bba47f05996d1e79d71d5695fc07cd0
|
e303df5eae907f833a2cffe66c0bbcea5897104d
|
/dist/Debug/GNU-Linux/command_line.R
|
89b2d3c136d6855c01c20753fe8f4b9c76c47df7
|
[] |
no_license
|
ESapenaVentura/IchTB
|
1c9c56692fca04b9f056ad795da41fc5607bf95f
|
b1ec31b43351fb971ca49a15914566a7b4492152
|
refs/heads/master
| 2020-04-21T00:51:45.795878
| 2019-02-05T08:11:05
| 2019-02-05T08:11:05
| 169,208,103
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 631
|
r
|
command_line.R
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
Numeros = as.numeric(args[2:length(args)])
Nombre = as.character(args[1])
Numeros_frame = data.frame(Numeros)
mediana = median(Numeros)
media = mean(Numeros)
desvest = sd(Numeros)
#plot = ggplot2::ggplot(Numeros_frame, ggplot2::aes(x = "", y = Numeros)) + ggplot2::geom_boxplot() + ggplot2::ylab("Valores de distancia") + ggplot2::xlab(Nombre)
#dir.create(file.path(getwd(), "Boxplots"), showWarnings = FALSE)
#ggplot2::ggsave(paste0("Boxplots/", Nombre, "_boxplot_.png"), plot = plot, dpi=300)
cat(mediana)
cat("\n")
cat(media)
cat("\n")
cat(desvest)
|
4959be1514b29df9251b95c96fe77d3f3f3bd94a
|
f3d8fc1ee5f8304dd8cbf571f630b0c432a7cd99
|
/plot2.R
|
39573fd9f946409239b81f27b304aea5abc8933e
|
[] |
no_license
|
jasonfraser/ExData_Plotting1
|
6aab14ca1950adccfc3c7887c00d7ced29c49ada
|
ee45e4db10e0f064c0142c923dc41eb87c606322
|
refs/heads/master
| 2021-01-21T07:46:01.834712
| 2014-10-10T06:30:24
| 2014-10-10T06:30:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
plot2.R
|
#Here's the read in stuff I need for all files
dataall<-read.csv("household_power_consumption.txt", sep=";" ,dec=".", stringsAsFactors = FALSE, header=TRUE)
data1<-subset(dataall,Date=="1/2/2007")
data2<-subset(dataall,Date=="2/2/2007")
data<-rbind(data1,data2)
#Create my proper file
png(filename = "plot2.png",width = 480, height = 480)
#Let's grab just the data I need
Global_active_power<-as.numeric(data$Global_active_power)
plot(Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)", xaxt ="n")
axis(side = 1, at = c(0,1450,2900), labels = c("Thu", "Fri", "Sat"))
dev.off()
|
ab37971ee7709e5c48202a217ca4d8c54941d599
|
d09bd36acdde95c8596287fbcebb89dd4c7fb906
|
/R/LNA_chpt_slice.R
|
356392ee1568d0cec8b4dcbbb3c9f41a85444629
|
[] |
no_license
|
MingweiWilliamTang/phyloInt
|
de68a79de30880573527de3ff0700ab3cd2b9f0e
|
504b651261ed6edc5aedca4c48b656fe401f52c5
|
refs/heads/master
| 2020-04-25T14:42:46.085222
| 2019-07-29T19:11:35
| 2019-07-29T19:11:35
| 172,850,819
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,829
|
r
|
LNA_chpt_slice.R
|
#'
#' parId: index for parameters we want to update
#' isLog: indication for whether using proposal on log space for each parameter
#' priorId: index for prior distribution corresponds to each prior
#' proposeId: index for proposals corresponds to updating each parameter
#'
#'
#'
update_Param_joint = function(MCMC_obj, MCMC_setting, method = "admcmc", parId, isLog, priorId,
proposeId = NULL){
# parId = unlist(parIdlist)
# islog = unlist(isLoglist)
# priorId = unlist(priorIdlist)
# proposalId = unlist(proposalIdlist)
if(length(parId) != length(isLog)){
stop("parameters do not match")
}
if(length(parId) != length(priorId)){
stop("priors do not match")
}
d = length(parId)
p = MCMC_obj$p
x_i = MCMC_setting$x_i
par_new = MCMC_obj$par
initialId = parId[parId <= p]
paramId = parId[parId > p & parId <= (p + x_i[1] + x_i[2] + 1)]
initial_new = MCMC_obj$par[1:p]
param_new = MCMC_obj$par[-(1:p)]
hyperId = (p + MCMC_setting$x_i[1] + MCMC_setting$x_i[2] + 1)
if(method == "admcmc"){
par_probs = MCMC_obj$par_probs
RawTransParam = MCMC_obj$par[parId]
RawTransParam[isLog == 1] = log(RawTransParam[isLog == 1])
RawTransParam_new = mvrnorm(1,RawTransParam, MCMC_setting$PCOV)
prior_proposal_offset = 0
for(i in 1:length(parId)){
newdiff = 0
if(priorId[i] %in% c(1,3,4)){
pr = MCMC_setting$prior[[ priorId[i] ]]
newdiff = dnorm(RawTransParam_new[i], pr[1], pr[2], log = T) - dnorm(RawTransParam[i], pr[1], pr[2], log = T)
par_probs[priorId[i]] = dnorm(RawTransParam_new[i], pr[1], pr[2], log = T)
}
if(priorId[i] == 2){
pr = MCMC_setting$prior[[priorId[i]]]
newdiff = dunif(RawTransParam_new[i], pr[1], pr[2], log = T) - dunif(RawTransParam[i], pr[1], pr[2], log = T)
par_probs[priorId[i]] = dunif(RawTransParam_new[i], pr[1], pr[2], log = T)
}
if(priorId[i] == 5){
pr = MCMC_setting$prior[[priorId[i]]]
newdiff = dgamma(RawTransParam_new[i], pr[1], pr[2], log = T) - dgamma(RawTransParam[i], pr[1], pr[2], log = T)
par_probs[priorId[i]] = dgamma(RawTransParam_new[i], pr[1], pr[2], log = T)
}
prior_proposal_offset = prior_proposal_offset + newdiff
}
RawTransParam_new[isLog == 1] = exp(RawTransParam_new[isLog == 1])
if(hyperId %in% parId){
par_new[(p + x_i[2] + 1): (hyperId - 1)] = exp(log(par_new[(p + x_i[2] + 1): (hyperId - 1)]) *
RawTransParam[d] / RawTransParam_new[d])
}
par_new[parId] = RawTransParam_new
initial_new[initialId] = par_new[initialId]
param_new = par_new[-(1:p)]
update_res = Update_Param(param_new, initial_new, MCMC_setting$times, MCMC_obj$OriginTraj,
MCMC_setting$x_r, MCMC_setting$x_i, MCMC_setting$Init, MCMC_setting$gridsize, MCMC_obj$coalLog,prior_proposal_offset,
MCMC_setting$t_correct, model = MCMC_setting$model,
volz = MCMC_setting$likelihood == "volz")
if(update_res$accept){
MCMC_obj$par = par_new
MCMC_obj$FT = update_res$FT
MCMC_obj$Ode_Traj_coarse = update_res$Ode
MCMC_obj$betaN = update_res$betaN
MCMC_obj$coalLog = update_res$coalLog
MCMC_obj$LatentTraj = update_res$LatentTraj
MCMC_obj$par_probs = par_probs
}
return(list(MCMC_obj = MCMC_obj))
}else if(method == "jointProp"){
# update some parameters together, some parameters alone
if(length(parId) != length(proposeId)){
stop("propsals do not match")
}
RawTransParam = MCMC_obj$par[parId]
RawTransParam[isLog == 1] = log(RawTransParam[isLog == 1])
RawTransParam_new = RawTransParam
prior_proposal_offset = 0
for(i in 1:length(parId)){
newdiff = 0
par_probs = MCMC_obj$par_probs
if(priorId[i] %in% c(1,3,4)){
pr = MCMC_setting$prior[[ priorId[i] ]]
po = MCMC_setting$proposal[[proposeId[i]]]
RawTransParam_new[i] = RawTransParam[i] + runif(1,-po, po)
newdiff = dnorm(RawTransParam_new[i], pr[1], pr[2], log = T) - dnorm(RawTransParam[i], pr[1], pr[2], log = T)
par_probs[priorId[i]] = dnorm(RawTransParam_new[i], pr[1], pr[2], log = T)
}
if(priorId[i] == 2){
pr = MCMC_setting$prior[[priorId[i]]]
po = MCMC_setting$proposal[[proposeId[i]]]
RawTransParam_new[i] = RawTransParam[i] + runif(1,-po, po)
newdiff = dunif(RawTransParam_new[i], pr[1], pr[2], log = T) - dunif(RawTransParam[i], pr[1], pr[2], log = T)
par_probs[priorId[i]] = dunif(RawTransParam_new[i], pr[1], pr[2], log = T)
}
# if(priorId[i] == 5){
# pr = MCMC_setting$prior[[priorId[i]]]
# po = MCMC_setting$proposal[[proposeId[i]]]
# u = runif(1,-po, po)
# RawTransParam_new[i] = RawTransParam[i] * exp(u)
# newdiff = dgamma(RawTransParam_new[i], pr[1], pr[2], log = T) - u - dgamma(RawTransParam[i], pr[1], pr[2], log = T)
# par_probs[priorId[i]] = dgamma(RawTransParam_new[i], pr[1], pr[2], log = T)
# }
prior_proposal_offset = prior_proposal_offset + newdiff
}
RawTransParam_new[isLog == 1] = exp(RawTransParam_new[isLog == 1])
if(hyperId %in% parId){
par_new[(p + x_i[2] + 1): (hyperId - 1)] = exp(log(par_new[(p + x_i[2] + 1): (hyperId - 1)]) *
RawTransParam[d] / RawTransParam_new[d])
}
par_new[parId] = RawTransParam_new
initial_new[initialId] = par_new[initialId]
param_new = par_new[-(1:p)]
update_res = Update_Param(param_new, initial_new, MCMC_setting$times, MCMC_obj$OriginTraj,
MCMC_setting$x_r, MCMC_setting$x_i, MCMC_setting$Init, MCMC_setting$gridsize, MCMC_obj$coalLog,prior_proposal_offset,
MCMC_setting$t_correct, model = MCMC_setting$model,
volz = MCMC_setting$likelihood == "volz")
if(update_res$accept){
MCMC_obj$par = par_new
MCMC_obj$FT = update_res$FT
MCMC_obj$Ode_Traj_coarse = update_res$Ode
MCMC_obj$betaN = update_res$betaN
MCMC_obj$coalLog = update_res$coalLog
MCMC_obj$LatentTraj = update_res$LatentTraj
MCMC_obj$par_probs = par_probs
}
return(list(MCMC_obj = MCMC_obj))
}
}
check_list_eq = function(list1, list2){
q1 = (length(list1) == length(list2))
q2 = (all.equal(unlist(lapply(list1, length)), unlist(lapply(list2,length)),
check.attributes = FALSE) == TRUE)
return(q1 && q2)
}
Update_Param_each_Norm = function(MCMC_obj, MCMC_setting, parIdlist, isLoglist, priorIdlist, proposeIdlist,hyper = T, joint = F, enable = c(T, T)){
if(is.null((parIdlist))){
return(list(MCMC_obj = MCMC_obj))
}
if(!check_list_eq(parIdlist, isLoglist)){
stop("parameters do not match")
}
if(!check_list_eq(parIdlist, priorIdlist)){
stop("priors do not match")
}
if(!check_list_eq(parIdlist, proposeIdlist)){
stop("priors do not match")
}
d = length(parIdlist)
p = MCMC_obj$p
x_i = MCMC_setting$x_i
hyperId = (p + MCMC_setting$x_i[1] + MCMC_setting$x_i[2] + 1)
for(i in 1:d){
par_probs = MCMC_obj$par_probs
par_new = MCMC_obj$par
initial_new = MCMC_obj$par[1:p]
param_new = MCMC_obj$par[-(1:p)]
subd = length(parIdlist[[i]])
parId = parIdlist[[i]]
isLog = isLoglist[[i]]
priorId = priorIdlist[[i]]
proposeId = proposeIdlist[[i]]
initialId = parId[parId <= p]
paramId = parId[parId > p & parId <= (p + x_i[1] + x_i[2] + 1)]
RawTransParam = MCMC_obj$par[parId]
RawTransParam[isLog == 1] = log(RawTransParam[isLog == 1])
RawTransParam_new = RawTransParam
prior_proposal_offset = 0
for(i in 1:length(parId)){
newdiff = 0
if(priorId[i] %in% c(1:4)){
pr = MCMC_setting$prior[[ priorId[i] ]]
po = MCMC_setting$proposal[[proposeId[i]]]
RawTransParam_new[i] = RawTransParam[i] + runif(1,-po, po)
newdiff = dnorm(RawTransParam_new[i], pr[1], pr[2], log = T) - dnorm(RawTransParam[i], pr[1], pr[2], log = T)
par_probs[priorId[i]] = dlnorm(exp(RawTransParam_new[i]), pr[1], pr[2], log = T)
}
if(hyper == T){
if(priorId[i] == 5){
pr = MCMC_setting$prior[[priorId[i]]]
po = MCMC_setting$proposal[[proposeId[i]]]
u = runif(1,-po, po)
RawTransParam_new[i] = RawTransParam[i] * exp(u)
newdiff = dlnorm(RawTransParam_new[i], pr[1], pr[2], log = T) - u - dlnorm(RawTransParam[i], pr[1], pr[2], log = T)
par_probs[priorId[i]] = dlnorm(RawTransParam_new[i], pr[1], pr[2], log = T)
}
}
prior_proposal_offset = prior_proposal_offset + newdiff
}
RawTransParam_new[isLog == 1] = exp(RawTransParam_new[isLog == 1])
if(hyperId %in% parId){
par_new[(p + x_i[2] + 1): (hyperId - 1)] = exp(log(par_new[(p + x_i[2] + 1): (hyperId - 1)]) *
RawTransParam[subd] / RawTransParam_new[subd])
}
par_new[parId] = RawTransParam_new
initial_new[initialId] = par_new[initialId]
param_new = par_new[-(1:p)]
update_res = NULL
if(joint){
update_res = Update_Param_JointData(param_new, initial_new, MCMC_obj$incid_par, MCMC_setting$times, MCMC_obj$OriginTraj,
MCMC_setting$x_r, MCMC_setting$x_i, MCMC_setting$Init, MCMC_setting$Incidence, MCMC_setting$gridsize, MCMC_obj$coalLog,MCMC_obj$IncidLog,prior_proposal_offset,
MCMC_setting$t_correct, model = MCMC_setting$model,
volz = MCMC_setting$likelihood == "volz", addCoal = enable[1], addIncid = enable[2])
}else{
update_res = Update_Param(param_new, initial_new, MCMC_setting$times, MCMC_obj$OriginTraj,
MCMC_setting$x_r, MCMC_setting$x_i, MCMC_setting$Init, MCMC_setting$gridsize, MCMC_obj$coalLog,prior_proposal_offset,
MCMC_setting$t_correct, model = MCMC_setting$model,
volz = MCMC_setting$likelihood == "volz")
}
if(update_res$accept){
MCMC_obj$par = par_new
MCMC_obj$FT = update_res$FT
MCMC_obj$Ode_Traj_coarse = update_res$Ode
MCMC_obj$betaN = update_res$betaN
MCMC_obj$coalLog = update_res$coalLog
MCMC_obj$LatentTraj = update_res$LatentTraj
MCMC_obj$par_probs = par_probs
if(joint){
#print(update_res$incidLog - log_incidence(MCMC_obj$LatentTraj, MCMC_setting$Incidence,MCMC_obj$incid_par))
MCMC_obj$IncidLog = update_res$incidLog
}
}
}
return(list(MCMC_obj = MCMC_obj, par_probs = par_probs))
}
updateTraj_general_NC = function(MCMC_obj,MCMC_setting,i, joint = F, enable = c(T,T)){
new_CoalLog = 0
if(MCMC_setting$likelihood == "structural"){
Res = ESlice_general_NC_Structural(MCMC_obj$OriginTraj, MCMC_obj$Ode_Traj_coarse,
MCMC_obj$FT, MCMC_setting$Init_Detail,
MCMC_obj$par[(MCMC_obj$p+1):(MCMC_obj$p + MCMC_setting$x_i[1] + MCMC_setting$x_i[2])],
MCMC_setting$x_r, MCMC_setting$x_i,
coal_log = MCMC_obj$coalLog,
model = MCMC_setting$model)
MCMC_obj$coalLog = Res$CoalLog
}else{
if(joint){
Res = ESlice_general_NC_joint(MCMC_obj$OriginTraj,MCMC_obj$Ode_Traj_coarse,
MCMC_obj$FT, MCMC_obj$par[1:MCMC_obj$p], MCMC_obj$incid_par, MCMC_setting$Init, MCMC_setting$Incidence,
betaN = betaTs(MCMC_obj$par[(MCMC_obj$p+1):(MCMC_setting$x_i[1]+MCMC_setting$x_i[2]+MCMC_obj$p)],MCMC_obj$LatentTraj[,1], MCMC_setting$x_r,MCMC_setting$x_i),
MCMC_setting$t_correct,lambda = 1,
coal_log = MCMC_obj$coalLog, IncidLog = MCMC_obj$IncidLog, MCMC_setting$gridsize,
volz = (MCMC_setting$likelihood == "volz"), model = MCMC_setting$model,addCoal = enable[1], addIncid = enable[2])
}else{
Res = ESlice_general_NC(MCMC_obj$OriginTraj,MCMC_obj$Ode_Traj_coarse,
MCMC_obj$FT, MCMC_obj$par[1:MCMC_obj$p], MCMC_setting$Init,
betaN = betaTs(MCMC_obj$par[(MCMC_obj$p+1):(MCMC_setting$x_i[1]+MCMC_setting$x_i[2]+MCMC_obj$p)],MCMC_obj$LatentTraj[,1], MCMC_setting$x_r,MCMC_setting$x_i),
MCMC_setting$t_correct,lambda = 1,
coal_log = MCMC_obj$coalLog, MCMC_setting$gridsize,
volz = (MCMC_setting$likelihood == "volz"), model = MCMC_setting$model)
}
if(MCMC_setting$likelihood == "volz"){
new_CoalLog = volz_loglik_nh_adj(MCMC_setting$Init, Res$LatentTraj,
betaTs(MCMC_obj$par[(MCMC_obj$p+1):(MCMC_setting$x_i[1]+MCMC_setting$x_i[2]+MCMC_obj$p)],MCMC_obj$LatentTraj[,1], MCMC_setting$x_r,MCMC_setting$x_i),
MCMC_setting$t_correct,
index = MCMC_setting$x_i[3:4],enable = enable[1])
}else{
new_CoalLog = coal_loglik(MCMC_setting$Init,LogTraj(Res$LatentTraj ),MCMC_setting$t_correct,
MCMC_obj$par[5],MCMC_setting$gridsize)
}
}
if(!joint && new_CoalLog - MCMC_obj$coalLog < -20){
print(paste("problem with eslice traj" , i))
print(paste("compare list res", new_CoalLog - Res$CoalLog))
}
if(joint && new_CoalLog - MCMC_obj$coalLog + Res$IncidLog - MCMC_obj$IncidLog < -20){
print(paste("problem with eslice traj" , i))
print(paste("compare list res", new_CoalLog - Res$CoalLog))
}
MCMC_obj$coalLog = new_CoalLog
MCMC_obj$LatentTraj = Res$LatentTraj
MCMC_obj$OriginTraj = Res$OriginTraj
MCMC_obj$logOrigin = Res$logOrigin
if(joint){
MCMC_obj$IncidLog = Res$IncidLog
}
return(list(MCMC_obj = MCMC_obj))
}
prob_Eval = function(par, priorList, hyperId){
logProb = numeric(5)
for(i in 1:3){
logProb[i] = dlnorm(par[i+1], priorList[[i]][1], priorList[[i]][2],log = T)
}
logProb[5] = dlnorm(par[hyperId], priorList[[4]][1], priorList[[4]][2],log = T)
return(logProb)
}
####
update_Par_ESlice_combine2 = function(MCMC_obj, MCMC_setting, priorList, ESS_vec,i,enable=c(T,T)){
p = MCMC_setting$p
#print(paste("==",MCMC_obj$IncidLog - log_incidence(MCMC_obj$LatentTraj, MCMC_setting$Incidence, MCMC_obj$incid_par)))
ESlice_Result = ESlice_par_General_JointData(MCMC_obj$par, MCMC_obj$incid_par, MCMC_setting$times, MCMC_obj$OriginTraj,
priorList, MCMC_setting$x_r, MCMC_setting$x_i, MCMC_setting$Init, MCMC_setting$Incidence,
MCMC_setting$gridsize, ESS_vec, coal_log = MCMC_obj$coalLog, incid_log = MCMC_obj$IncidLog, MCMC_setting$t_correct,
addCoal = enable[1],addIncid = enable[2])
MCMC_obj$par = ESlice_Result$par
MCMC_obj$LatentTraj = ESlice_Result$LatentTraj
MCMC_obj$betaN = ESlice_Result$betaN
MCMC_obj$FT = ESlice_Result$FT
if(ESlice_Result$CoalLog + ESlice_Result$IncidLog - MCMC_obj$IncidLog - MCMC_obj$coalLog < -25){
print(paste("ChangePoint slice sampling problem",i," th"))
}
MCMC_obj$coalLog = ESlice_Result$CoalLog
MCMC_obj$Ode_Traj_coarse = ESlice_Result$OdeTraj
MCMC_obj$par_probs = prob_Eval(MCMC_obj$par, priorList, sum(MCMC_setting$x_i[1:2]) + p + 1)
MCMC_obj$IncidLog = ESlice_Result$IncidLog
return(MCMC_obj)
}
##########
General_MCMC_cont = function(MCMC_setting, MCMC_obj, niter, updateVec = c(1,1,1,0,1,1,1),
PCOV, tune = 0.01, method = "admcmc",
parIdlist = NULL, isLoglist= NULL, priorIdlist = NULL,
updateHP = FALSE){
p = MCMC_setting$p
nparam = sum(MCMC_setting$x_i[1:2]) + 1
params = matrix(nrow = niter, ncol = dim(MCMC_obj$par)[2])
if(dim(PCOV)[2]!= sum(updateVec[1:5])){
stop("Proposal matrix does not have correct dim")
}
l = numeric(niter)
l1 = l
l3 = l
l2 = matrix(ncol = 5, nrow = niter)
tjs = array(dim = c(dim(MCMC_obj$LatentTraj),niter / thin))
logIndexAll = c(1,0,1,0)
parIndexALL = c(p:(p + MCMC_setting$x_i[2]), nparam+p)
parId = parIndexALL[updateVec[c(1:3,5)] > 0]
logId = logIndexAll[updateVec[c(1:3,5)] > 0]
priorId = which(updateVec[c(1:4,5)] > 0)
proposeId = which(updateVec[c(1:4,5)] > 0)
MCMC_setting$PCOV = PCOV * tune
for (i in 1:niter){
MCMC_obj = tryCatch({update_Param_joint(MCMC_obj, MCMC_setting, method, parId, logId, priorId, proposeId)$MCMC_obj},
error = function(cond){
message(cond)
# Choose a return value in case of error
return(MCMC_obj)
})
MCMC_obj = tryCatch({Update_Param_each(MCMC_obj, MCMC_setting, parIdlist,
isLoglist, options$priorIdlist, priorIdlist)$MCMC_obj
}, error = function(cond){
message(cond)
# Choose a return value in case of error
return(MCMC_obj)
})
if(updateHP){
MCMC_obj = update_hyper(MCMC_obj, MCMC_setting, i)$MCMC_obj
}
tjs[,,i] = MCMC_obj$LatentTraj
params[i,] = MCMC_obj$par
l[i] = MCMC_obj$logOrigin
l1[i] = MCMC_obj$coalLog
l3[i] = MCMC_obj$chpr
}
return(list(par = params,Trajectory = tjs,l=l,l1=l1,l2 = l2, l3 = l3, MX = MCMC_setting$PCOV, MCMC_setting = MCMC_setting, MCMC_obj = MCMC_obj))
}
|
6ee1fef36d9c16b05d5f88447dd9b5d1e1f80f8f
|
f80467fd390ed06c6445a638d73656c60d6a86bf
|
/Statistics and Data Analysis for Financial Engineering/ch_15-16/Rlab.R
|
5ed51a67234016757effdeab4d9dbf7e79dd80e6
|
[
"MIT"
] |
permissive
|
mlozanoqf/Statistical-Computing
|
d534baed17e8ea3510307b98fdf74cea0a73d4d5
|
606e6bb222013c38867c1aee7e79fae762e7a445
|
refs/heads/master
| 2022-11-22T15:17:05.445692
| 2020-07-24T19:18:39
| 2020-07-24T19:18:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,120
|
r
|
Rlab.R
|
#
# Written by:
# --
# John L. Weatherwax 2009-04-21
#
# email: wax@alum.mit.edu
#
# Please send comments and especially bug reports to the
# above email address.
#
#-----
if( !require('linprog') ){
install.packages('linprog', dependencies=TRUE, repos='http://cran.rstudio.com/')
}
library(linprog)
save_plots = FALSE
set.seed(0)
src.dir <- "D:/Projects/MSDS-RiskAnalytics/Module_08/"
setwd(src.dir)
source('util_fns.R')
# Test the function "efficient_frontier" using the book's example on EPage 478:
#
library(Ecdat)
data(CRSPday)
R = 100*CRSPday[,4:6] # in percents
mean_vect = apply(R, 2, mean)
cov_mat = cov(R)
sd_vect = sqrt(diag(cov_mat))
# The target portfolio returns:
muP = seq(0.05, 0.14, length.out=300)
mufree = 1.3/253
result = efficient_frontier(R, muP, mufree)
mean_vect = result$mean_vect
sd_vect = result$sd_vect
weights = result$weights
ind_ms = result$max_sharpe_ind
# Plot our results:
#
plot_efficient_frontier(result, "Duplicate Fig. 16.3")
print(weights[ind_ms, ]) # print the weights of the tangency portfolio
text(sd_vect[1], mean_vect[1], 'GE', cex=1.5)
text(sd_vect[2], mean_vect[2], 'IBM', cex=1.5)
text(sd_vect[3], mean_vect[3], 'Mobil', cex=1.5)
# Test the function "maximize_expected_utility" using the book's example on EPage 487:
#
dat = read.csv("../../BookCode/Data/Stock_Bond.csv", header=TRUE)
y = dat[, c(3, 5, 7, 9, 11, 13, 15, 17, 19, 21)]
n = dim(y)[1]
m = dim(y)[2]
r= y[-1,] / y[-n, ] - 1
nlambda = 250
loglambda_vect = seq(2, 8, length=nlambda)
meu_result = maximize_expected_utility(r, loglambda_vect)
# This code reproduces the output of Fig. 16.8:
#
par(mfrow=c(1, 3))
#
plot(loglambda_vect, meu_result$mu_vect, type='l', xlab='log(\u03BB)', ylab='E(return)') # \u03BB is the unicode character for the LaTex symbol $\lambda$
grid()
plot(loglambda_vect, meu_result$sd_vect, type='l', xlab='log(\u03BB)', ylab='SD(return)')
grid()
plot(meu_result$sd_vect, meu_result$mu_vect, type='l', xlab='SD(return)', ylab='E(return)', main='Efficient Frontier')
grid()
par(mfrow=c(1, 1))
# P 1: EPage 488-489:
#
dat = read.csv("../../BookCode/Data/Stock_Bond.csv", header=TRUE)
prices = cbind(dat$GM_AC, dat$F_AC, dat$CAT_AC, dat$UTX_AC, dat$MRK_AC, dat$IBM_AC)
n = dim(prices)[1]
returns = 100 * ( prices[2:n,] / prices[1:(n-1),] - 1 )
#pairs(returns)
mean_vect = colMeans(returns)
cov_mat = cov(returns)
sd_vect = sqrt(diag(cov_mat) )
mufree = 3/365
muP = seq(min(mean_vect), max(mean_vect), length.out=500)
result = efficient_frontier(returns, muP, mufree, w_lower_limit=-0.1, w_upper_limit=0.5)
mean_vect = result$mean_vect
sd_vect = result$sd_vect
weights = result$weights
ind_ms = result$max_sharpe_ind
if( save_plots ){ postscript("../../WriteUp/Graphics/Chapter16/chap_16_rlab_prob_1_ef.eps", onefile=FALSE, horizontal=FALSE) }
plot_efficient_frontier(result, 'Problem 1')
print(round(weights[ind_ms,], 4)) # print the weights of the tangency portfolio
text(sd_vect[1], mean_vect[1], 'GM', cex=1.5)
text(sd_vect[2], mean_vect[2], 'F', cex=1.5)
text(sd_vect[3], mean_vect[3], 'CAT', cex=1.5)
text(sd_vect[4], mean_vect[4], 'UTX', cex=1.5)
text(sd_vect[5], mean_vect[5], 'MRK', cex=1.5)
text(sd_vect[6], mean_vect[6], 'TBM', cex=1.5)
if( save_plots ){ dev.off() }
# P 2: EPage 489
#
ind = result$max_sharpe_ind
E_R_P = 0.0007 * 100 # the desired return (as a percent)
E_R_T = result$muP[ind] # the return of the tangency portfolio
c( E_R_P, E_R_T, mufree )
omega = ( E_R_P - mufree ) / ( E_R_T - mufree )
print(sprintf("omega= %10.6f", omega) )
print("Tangency porfolio weights:")
ind = result$max_sharpe_ind
print(round(omega*result$weights[ind,], 4))
S = 100000
S * omega * result$weights[ind, ]
# P 4; EPage 489-490
#
dat = read.csv("../../BookCode/Data/FourStocks_Daily2013.csv", header=TRUE)
prices = dat[, -1]
n = dim(prices)[1]
returns = 100 * ( prices[-1,] / prices[-n,] - 1 )
mufree = 1.3/365
muP = seq(0.045, 0.06, length.out=500)
result = efficient_frontier(returns, muP, mufree, w_lower_limit=-0.5, w_upper_limit=0.5)
mean_vect = result$mean_vect
sd_vect = result$sd_vect
weights = result$weights
ind_ms = result$max_sharpe_ind
if( save_plots ){ postscript("../../WriteUp/Graphics/Chapter16/chap_16_rlab_prob_4_ef.eps", onefile=FALSE, horizontal=FALSE) }
plot_efficient_frontier(result, 'Problem 4')
print(sprintf('max sharpe (daily)= %10.6f', result$max_sharpe) )
print(sprintf('max sharpe (yearly)= %10.6f', result$max_sharpe * sqrt(252)))
print('Tangency portfolio weights:')
print(round(weights[ind_ms,], 4)) # print the weights of the tangency portfolio
text(sd_vect[1], mean_vect[1], 'AAPL', cex=1.5)
text(sd_vect[2], mean_vect[2], 'XOM', cex=1.5)
text(sd_vect[3], mean_vect[3], 'TGT', cex=1.5)
text(sd_vect[4], mean_vect[4], 'MCD', cex=1.5)
if( save_plots ){ dev.off() }
# P 5: EPage 490
#
dat = read.csv("../../BookCode/Data/FourStocks_Daily2013.csv", header=TRUE)
prices = dat[, -1]
n = dim(prices)[1]
returns = 100 * ( prices[-1,] / prices[-n,] - 1 )
loglambda_vect = seq(1.e-3, 8, length.out=200)
meu_result = maximize_expected_utility(returns, loglambda_vect)
if( save_plots ){
postscript("../../WriteUp/Graphics/Chapter16/chap_16_rlab_prob_5.eps", onefile=FALSE, horizontal=FALSE)
xlab='log(lambda)'
}else{
xlab='log(\u03BB)' # \u03BB is the unicode character for the LaTex symbol $\lambda$
}
par(mfrow=c(1, 3))
plot(loglambda_vect, meu_result$mu_vect, type='l', xlab=xlab, ylab='E(return)')
abline(h=0.0506, col='red')
grid()
plot(loglambda_vect, meu_result$sd_vect, type='l', xlab=xlab, ylab='SD(return)')
grid()
plot(meu_result$sd_vect, meu_result$mu_vect, type='l', xlab='SD(return)', ylab='E(return)', main='Efficient Frontier')
grid()
par(mfrow=c(1, 1))
if( save_plots ){ dev.off() }
mu_P = 0.0506
indx = which.min(abs(meu_result$mu_vect - mu_P))
print(sprintf('For mu_P= %f take lambda= %f', mu_P, exp(loglambda_vect[indx])))
#indx = which.min(abs(meu_result$sd_vect - sqrt(result$min_variance) ) )
#print(sprintf('closest I can get to min_sd= %f is sf where lambda= %f', sqrt(result$min_variance), sqrt(meu_result$sd_vect[indx]), exp(loglambda_vect[indx]) ))
dat = read.csv("../../BookCode/Data/Stock_Bond.csv", header=TRUE)
prices = cbind(dat$GM_AC, dat$F_AC, dat$CAT_AC, dat$UTX_AC, dat$MRK_AC, dat$IBM_AC)
n = dim(prices)[1]
returns = 100 * ( prices[2:n,] / prices[1:(n-1),] - 1 )
# As a "test" of the routine possible_expected_returns lets verify that a long only portfolio has its min/max returns
# given by the smallest/largest returns from the assets
#
possible_rets = possible_expected_returns(returns, B1=1.0, B2=0.0)
print('colMeans(returns)= ')
print(colMeans(returns))
print(sprintf('Long only portfolio return bounds: minRet= %f; maxRet= %f', possible_rets$minRet$opt, possible_rets$maxRet$opt))
# P 6: EPage 490-491
#
possible_rets = possible_expected_returns(returns, B1=0.3, B2=0.1)
print(sprintf('Problem 6: minRet= %f; maxRet= %f', possible_rets$minRet$opt, possible_rets$maxRet$opt))
# P 7: EPage 491
#
possible_rets = possible_expected_returns(returns, B1=0.15, B2=0.15)
print(possible_rets) # note both solveLP report errors
|
18e09bedb0c7705ae29791d2047c8d7ddbab71be
|
36e72e5c8c4aee47143ebd1d23ad556fbca8bcb7
|
/man/factorContainer-class.Rd
|
ace792a1d5df93033d873a521d6417b78da3627f
|
[] |
no_license
|
mmrabe/designr
|
32e8c4c5f5367505aad1b0fb78c090857d812a40
|
b462c0c44f8ab603e7a4abe3cf3538711633ca8c
|
refs/heads/master
| 2023-05-23T21:33:18.977919
| 2023-05-05T12:39:48
| 2023-05-05T12:39:48
| 171,673,427
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 686
|
rd
|
factorContainer-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R, R/methods.R
\docType{class}
\name{factorContainer-class}
\alias{factorContainer-class}
\alias{show,factorContainer-method}
\title{Design matrix S4 functions}
\usage{
\S4method{show}{factorContainer}(object)
}
\arguments{
\item{object}{A \code{factorDesign} object.}
}
\description{
Design matrix S4 functions
}
\section{Methods (by generic)}{
\itemize{
\item \code{show(factorContainer)}: Display a factor container
}}
\examples{
des <- factor.design()
des <- fixed.factor("Factor1", c("1A","1B")) +
fixed.factor("Factor2", c("2A","2B")) +
random.factor("Subject", c("Factor1"))
}
|
6329e018952a80e5a545237b7ccf67437cf9dfdd
|
e4834a42c49b821d233ca29276ac8ec9cd16fb57
|
/man/compute_count.Rd
|
28c388c136734ae98d45d3c7b979fd631d618de6
|
[] |
no_license
|
XiaosuTong/ggstat
|
7a5103d8d8d6f5b7c69d6e225139f94f7469a7de
|
964aa346570f4a37f172f98b5a6d3e28f549e1f7
|
refs/heads/master
| 2021-01-18T01:43:59.638877
| 2016-04-04T19:19:47
| 2016-04-04T19:19:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 810
|
rd
|
compute_count.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/count-compute.R
\name{compute_count}
\alias{compute_count}
\title{Count unique observations.}
\usage{
compute_count(x, x_var, w_var = NULL)
}
\arguments{
\item{x}{Dataset-like object to count. Built-in methods for data frames,
grouped data frames and ggvis visualisations.}
\item{x_var, w_var}{Names of x and weight variables.}
}
\description{
Count unique observations.
}
\examples{
mtcars \%>\% compute_count(~cyl)
# Weight the counts by car weight value
mtcars \%>\% compute_count(~cyl, ~wt)
# If there's one weight value at each x, it effectively just renames columns.
pressure \%>\% compute_count(~temperature, ~pressure)
}
\seealso{
\code{\link{compute_bin}} For counting cases within ranges of
a continuous variable.
}
|
29e057d003ee44691c9fbdf089a794694f592245
|
4ee1c72700e82657faa1e7661164873569dfc5cd
|
/minfi_ppswan.R
|
36bb1082c43c2064cde3dc72abc5871f0e49a0e9
|
[] |
no_license
|
kpbioteam/minfi_ppswan
|
b66fad3560a927812d4a51245de9eb27dbc3ada6
|
56fb9eb4ab9b80b77793e4aa33ddcd56ea81fa36
|
refs/heads/master
| 2021-01-25T12:08:57.165223
| 2018-05-27T18:23:37
| 2018-05-27T18:23:37
| 123,456,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 359
|
r
|
minfi_ppswan.R
|
# we need that to not crash galaxy with an UTF8 error on German LC settings.
loc <- Sys.setlocale("LC_MESSAGES", "en_US.UTF-8")
require("minfi", quietly = TRUE)
args <- commandArgs(trailingOnly = TRUE)
input = args[1]
output = args[2]
RGSet <- get(load(input))
swan <- preprocessSWAN(RGSet, mSet = NULL, verbose = FALSE)
save(swan, file = output)
|
19517d9022b993cac6ac5e5b97cb663bf0cde979
|
7b2cacf99fe488c001d09b6a51eac439bdfa5272
|
/source/rCNV2/man/plot.rects.for.highlight.Rd
|
a8db1a0e571cc0dfa1666d97795e404609999a34
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
talkowski-lab/rCNV2
|
d4fc066478db96322b7aa062f4ece268098b9de3
|
7e97d4c1562372a6edd7f67cdf36d4167da216f8
|
refs/heads/master
| 2023-04-11T08:48:40.884027
| 2023-01-25T15:59:13
| 2023-01-25T15:59:13
| 178,399,375
| 14
| 4
|
MIT
| 2022-03-16T16:42:46
| 2019-03-29T12:13:54
|
R
|
UTF-8
|
R
| false
| true
| 895
|
rd
|
plot.rects.for.highlight.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/highlight_plot.R
\name{plot.rects.for.highlight}
\alias{plot.rects.for.highlight}
\title{Plot simple rectangles for locus highlight}
\usage{
\method{plot}{rects.for.highlight}(
xlefts,
xrights,
y0,
col = blueblack,
border = blueblack,
panel.height = 0.2,
rect.height.cex = 1,
y.axis.title = NULL
)
}
\arguments{
\item{xlefts}{left-most position(s) for rectangle(s)}
\item{xrights}{right-most position(s) for rectangle(s)}
\item{col}{color(s) to fill rectangle(s)}
\item{border}{color(s) for rectangle outline(s)}
\item{panel.height}{relative height of panel [default: 0.2]}
\item{rect.height.cex}{relative vertical expansion scalar for rectangles
[default: 1]}
\item{y.axis.title}{title for Y axis [default: no title]}
}
\description{
Add panel of simple colored rectangles to locus highlight
}
|
8bdfc3e287d34ce00a054d0c18225d60ee0f0192
|
1324597126961aacd0aa1fe9e49fb81095c1a237
|
/man/find_controls_by_GoF.Rd
|
6b990b4537f49867652b8a2e2e2f89143e283b52
|
[] |
no_license
|
TeamMacLean/atacr
|
7b7de56604a089cb312d4cf34921702c6c8a5ccc
|
96e43e75a56e39acbad2022021bf7c519aedfbb6
|
refs/heads/master
| 2020-07-09T14:48:43.565265
| 2018-06-11T10:44:23
| 2018-06-11T10:44:23
| 74,016,813
| 14
| 1
| null | 2016-11-25T11:37:38
| 2016-11-17T10:35:18
|
R
|
UTF-8
|
R
| false
| true
| 809
|
rd
|
find_controls_by_GoF.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalisation.R
\name{find_controls_by_GoF}
\alias{find_controls_by_GoF}
\title{find control windows by convergence method in https://academic.oup.com/biostatistics/article/13/3/523/248016/Normalization-testing-and-false-discovery-rate}
\usage{
find_controls_by_GoF(atacr, which = "bait_windows")
}
\arguments{
\item{atacr}{a list of SummarizedExperiment objects from atacr::make_counts()}
\item{which}{the subdivision of the genome to calculate GoF either 'whole_genome', 'bait_windows' or 'non_bait_windows'}
}
\value{
a character vector of window names
}
\description{
find control windows by convergence method in https://academic.oup.com/biostatistics/article/13/3/523/248016/Normalization-testing-and-false-discovery-rate
}
|
cb01f4868a454beb49aeea96513c92c342a0d7f8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/queuecomputer/examples/as.server.list.Rd.R
|
99ce3e713507e90b0f92653702a29f7fcc3c9c95
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 389
|
r
|
as.server.list.Rd.R
|
library(queuecomputer)
### Name: as.server.list
### Title: Creates a '"server.list"' object from a list of times and
### starting availability.
### Aliases: as.server.list
### ** Examples
# Create a server.list object with the first server available anytime before time 10,
# and the second server available between time 15 and time 30.
as.server.list(list(10, c(15,30)), c(1,0))
|
25b335c5723d424f5ac3f33f0d51cee8bb5d228c
|
a3b4960a2592ea3e610a2c01137c2b55a148bf6e
|
/src/BayesianNetworks/chapters/03chapter01.R
|
2b65d29961fec260ff9b0c6c6656eed26895c528
|
[
"CC-BY-NC-SA-4.0"
] |
permissive
|
wilsonify/ThinkBayes2
|
8ed69576054387f845b6dc6e94daf8d5f52b33a8
|
53f199d81b9ebd992739513a71d7bdd63a329866
|
refs/heads/master
| 2022-09-03T02:39:28.670333
| 2021-08-15T20:25:43
| 2021-08-15T20:25:43
| 238,930,208
| 0
| 0
|
MIT
| 2021-08-15T20:39:07
| 2020-02-07T13:29:01
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,570
|
r
|
03chapter01.R
|
# Title : An Introduction to R
# Objective : a simple Monte Carlo to explore the behavior of the two-sample t statistic
# Created by: thom
# Created on: 11/26/20
library(LearnBayes)
studentdata <- LearnBayes::studentdata
data(studentdata)
studentdata[1, ]
attach(studentdata)
table(Drink)
table(Drink)
barplot(table(Drink),xlab="Drink",ylab="Count")
hours.of.sleep <- WakeUp - ToSleep
summary(hours.of.sleep)
hist(hours.of.sleep,main="")
boxplot(hours.of.sleep~Gender, ylab="Hours of Sleep")
female.Haircut <- Haircut[Gender=="female"]
male.Haircut <- Haircut[Gender=="male"]
summary(female.Haircut)
summary(male.Haircut)
plot(jitter(ToSleep),jitter(hours.of.sleep))
fit <- lm(hours.of.sleep~ToSleep)
fit
abline(fit)
# Writing a Function to Compute the t Statistic
x <- rnorm(10,mean=50,sd=10)
y <- rnorm(10,mean=50,sd=10)
m <- length(x)
n <- length(y)
alpha=0.05
sp <- sqrt(((m-1)*sd(x)^2+(n-1)*sd(y)^2)/(m+n-2)) # pooled standard deviation
t.stat <- (mean(x)-mean(y))/(sp*sqrt(1/m+1/n))
tstatistic = function(x,y) {
m=length(x)
n=length(y)
sp=sqrt(((m-1)*sd(x)^2+(n-1)*sd(y)^2)/(m+n-2))
t.stat=(mean(x)-mean(y))/(sp*sqrt(1/m+1/n))
return(t.stat)
}
ttest = function(t,alpha,n,m) {
tcrit <- qt(1-alpha/2, n+m-2)
result <- abs(t.stat) > tcrit
return(result)
}
data.x <- c(1,4,3,6,5)
data.y <- c(5,4,7,6,10)
tstatistic(data.x, data.y)
alpha=.1
m <- 10
n <- 10
N <- 10000 # sets the number of simulations
n.reject <- 0 # counter of number of rejections
for (i in 1:N) {
#x=rnorm(m,mean=0,sd=1) # simulates xs from population 1
#y=rnorm(n,mean=0,sd=1) # simulates ys from population 2
#x=rnorm(m,mean=0,sd=1)
#y=rnorm(n,mean=0,sd=1)
#x=rnorm(m,mean=0,sd=1)
#y=rnorm(n,mean=0,sd=10)
#x=rt(m,df=4)
#y=rt(n,df=4)
#x=rexp(m,rate=1)
#y=rexp(n,rate=1)
#x=rnorm(m,mean=10,sd=2)
#y=rexp(n,rate=1/10)
x=rnorm(m,mean=10,sd=2)
y=rexp(n,rate=1/10)
statistic=tstatistic(x,y) # computes the t statistic
tcrit <- qt(1-alpha/2, n+m-2)
if (abs(statistic) > tcrit) { # reject if |T| exceeds critical pt
n.reject=n.reject+1
}
}
true.sig.level <- n.reject/N # proportion of rejections
my.tsimulation <- function() {
alpha=.1
m <- 10
n <- 10
x=rnorm(m,mean=10,sd=2)
y=rexp(n,rate=1/10)
statistic=tstatistic(x,y) # computes the t statistic
return(statistic)
}
my.tsimulation()
tstat.vector <- replicate(10000, my.tsimulation())
plot(density(tstat.vector),xlim=c(-5,8),ylim=c(0,.4),lwd=3)
curve(dt(x,df=18),add=TRUE)
legend(4,.3,c("exact","t(18)"),lwd=c(3,1))
|
e2124a116546c86b8096429420cb70afd12a07a2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/multinets/examples/mode_transformation.Rd.R
|
2b0570e354852e1ecaa6db422fa8b51876071519
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 450
|
r
|
mode_transformation.Rd.R
|
library(multinets)
### Name: mode_transformation
### Title: 2-mode to 1-mode transformation
### Aliases: mode_transformation
### ** Examples
# First, extract the mesolevel of the multilevel network
affiliation <- extract_mesolevel(linked_sim)
# To obtain both transformed networks
transformed <- mode_transformation(affiliation)
# To obtain just one transformed network
high_transformed <- mode_transformation(affiliation, which = "high")
|
6996b67151dafc47f97f6e6874913ec0ddec21b8
|
2c6cad6728b4ad2181981fb0ca5d7345d69b149b
|
/R/get_region_from_coordinates.R
|
03e89205f5615f0d5e724db9b230cf907b90ecde
|
[] |
no_license
|
matteodefelice/panas
|
a765b2cca4a51b41485dbe538d55a57ba5aff6f9
|
428a8b7cd2c1560f8ef9d54533e2c8e93e344c19
|
refs/heads/master
| 2022-04-05T12:25:29.429213
| 2020-02-27T20:46:54
| 2020-02-27T20:46:54
| 111,098,601
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,770
|
r
|
get_region_from_coordinates.R
|
#' Return the geographci region of a set of coordinates according to administrative boundaries.
#'
#' The function defines in which region a set of coordinates (stored into a \code{data.frame}) lie
#' The administrative boundaries can be chosen from a set of pre-definites shapefiles.
#' @param obj A data frame of coordinates stored using lat/lon coordinates. See Details section for further details.
#' @param shapefile The shapefile to be used to aggregate the grid points.
#' @return A vector containing all the \code{shapefile_id_field}
#' @author Matteo De Felice
#' @export
#' @details Details
#' \code{obj} must be a data frame containing two variables:
#' 1. Latitude: a double in the range -90, 90 named as lat, Lat, latitude, latit, etc.
#' 2. Longitude: a double in the range -180, 180 named as lon, Lon, longitude, long, etc.
#'
#' The shapefiles available are the following:
#' \itemize{
#' \item \code{NUTS0-2}: Data from EUROSTAT NUTS (small islands removed) https://ec.europa.eu/eurostat/web/gisco/geodata/reference-data/administrative-units-statistical-units/nutscountries_EU
#' \item \code{eh2050}: cluster as defined in the FP7 e-Highway2050 project
#' \item \code{hybas05}: HydroBASINS Level 5 (http://www.hydrosheds.org/page/hydrobasins)
#' \item \code{hybas06}: HydroBASINS Level 6 (http://www.hydrosheds.org/page/hydrobasins)
#' \item \code{WAPP}: WAPP catchments from the JRC LISFLOOD hydrological model
#' }
get_region_from_coordinates <- function(obj, shapefile = 'NUTS0', path_to_shapefile = NULL) {
if (shapefile == 'NUTS2') {
eumap = read_rds(system.file("NUTS_RG_01M_2016_4326_LEVL_2.reduced.rds", package = "panas"))
shapefile_id_field = 'NUTS_ID'
} else if (shapefile == 'NUTS1') {
eumap = read_rds(system.file("NUTS_RG_01M_2016_4326_LEVL_1.reduced.rds", package = "panas"))
shapefile_id_field = 'NUTS_ID'
} else if (shapefile == 'NUTS0'){
eumap = read_rds(system.file("NUTS_RG_01M_2016_4326_LEVL_0.reduced.rds", package = "panas"))
shapefile_id_field = 'NUTS_ID'
} else if (shapefile == 'eh2050') {
eumap = read_rds(system.file("eh2050_clusters.rds", package = "panas"))
shapefile_id_field = 'NUTS_ID'
} else if (shapefile == 'hybas05') {
eumap = read_rds(system.file("hybas_lev05.rds", package = "panas"))
shapefile_id_field = 'HYBAS_ID'
} else if (shapefile == 'hybas06') {
eumap = read_rds(system.file("hybas_lev06.rds", package = "panas"))
shapefile_id_field = 'HYBAS_ID'
} else if (shapefile == 'WAPP') {
eumap = read_rds(system.file("wapp_catchments.rds", package = "panas"))
shapefile_id_field = 'name'
} else {
stop('Shape option not existent')
}
# Check if obj is a well-formed data frame
if (!is.data.frame(obj)) {
stop("Obj must be a data frame. ")
} else if (ncol(obj) != 2) {
stop("Obj must have two columns")
} else if (!any(str_detect(str_to_lower(names(obj)), 'lat'))) {
stop('One of the two variables in obj must contain lat')
} else if (!any(str_detect(str_to_lower(names(obj)), 'lon'))) {
stop('One of the two variables in obj must contain lon')
}
# Create a canonical data frame (lat, lon)
names(obj) = str_to_lower(names(obj))
if (str_detect(names(obj)[1], 'lat')) {
names(obj) = c('lat', 'lon')
} else {
names(obj) = c('lon', 'lat')
}
# Check lon
if (any(obj$lon > 180)) {
obj$lon[obj$lon > 180] = obj$lon[obj$lon > 180] - 360
}
# Convert pts to a Spatial object
coordinates(obj) = c("lon", "lat")
# CHECK if all shapefiles have proj4string
proj4string(obj) = proj4string(eumap)
# Calculate the spatial overlay of pts points over the shapefile
over_target = over(obj, as(eumap, "SpatialPolygons"))
return(eumap[[shapefile_id_field]][over_target])
}
|
c3ca92c57dfb8b10ef5fa4becd6ee27057174fcc
|
e6a5b9032f7d00fa54cfd2dfb68c34a09dd1ae6e
|
/plot1.R
|
9366df400485699f804a24f92673121fa710c5fd
|
[] |
no_license
|
sgmbartl/ExData_Plotting1
|
5780df80da1a1109ea3061ba8699b926c1f45c2c
|
3e82b6abc263ba17206485399becd93a392fba27
|
refs/heads/master
| 2020-04-14T21:27:36.243449
| 2019-01-04T16:27:51
| 2019-01-04T16:27:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
plot1.R
|
data1 <- read.table("C:\\Users\\MeganBartlett\\Documents\\Coursera\\EDA\\household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
head(data1)
data <- data1[data1$Date %in% c("1/2/2007","2/2/2007") ,]
globalactivepower <- suppressWarnings(as.numeric(data$Global_active_power))
png("plot1.png", width = 480, height = 480)
hist(as.numeric(globalactivepower), xlab = "Global Active Power (kilowatts)", main = "Global Active Power", col = "red")
dev.off()
|
5a1a34622b91e120e9e158a2c2b07aad44cf25f5
|
265efa7ecaae327e3107d6b6f998d081c9c664e0
|
/R/bea2Tab.r
|
effe6042b35df773b4ec22b5db35021fbce18cca
|
[
"CC0-1.0"
] |
permissive
|
us-bea/bea.R
|
2d01b081a82346d7eb38aa9d0e6374cf4d2c9258
|
7f4d069d05afcb699ae568fd11e9d2d9bc726956
|
refs/heads/master
| 2022-04-05T03:32:20.868193
| 2018-03-14T20:39:12
| 2018-03-14T20:39:12
| 67,622,525
| 102
| 47
|
NOASSERTION
| 2020-02-11T12:40:33
| 2016-09-07T16:01:39
|
R
|
UTF-8
|
R
| false
| false
| 5,673
|
r
|
bea2Tab.r
|
#' Convert BEA API httr response or list payload to data.table
#'
#' @param beaPayload An object of class 'list' or httr 'response' returned from beaGet() call to BEA API
#' @param asWide Return data.table in wide format (default: TRUE)
#' @param iTableStyle If "asWide = TRUE", setting "iTableStyle = TRUE" will return data.table in same format as shown on BEA website, with dates and attributes as column headers and series as rows; otherwise, results have series codes as column headers (default: TRUE)
#' @description Convert BEA API httr response or list payload to data.table. Also, converts LONG data frame (default API format - see bea2List results) to WIDE data (with years as columns) by default
#' @return An object of class 'data.table' containing data from beaGet(...) with custom attributes(BDT)$params.
#' @import data.table
#' @export
#' @examples
#' userSpecList <- list('UserID' = 'yourKey' ,
#' 'Method' = 'GetData',
#' 'datasetname' = 'NIPA',
#' 'Frequency' = 'A',
#' 'TableID' = '68',
#' 'Year' = 'X')
#' resp <- beaGet(userSpecList)
#' BDT <- bea2Tab(resp)
bea2Tab <- function(beaPayload, asWide = TRUE, iTableStyle = TRUE) {
requireNamespace('data.table', quietly = TRUE)
if('response' %in% class(beaPayload)){
beaResponse <- bea.R::bea2List(beaPayload)
} else {
beaResponse <- beaPayload
}
if('error' %in% tolower(
attributes(beaResponse)$names
)
){
return(beaResponse$Error$APIErrorDescription)
}
DataValue <- NULL
TimePeriod <- NULL
Year <- NULL
LineNumber <- NULL
beaResults <- data.table::as.data.table(beaResponse)
attributes(beaResults)$is.wide <- FALSE
#Some datasets use "Year" while others use "TimePeriod"; you must remove both during reshape to wide
TimeIntersect <- intersect(attributes(beaResponse)$detail$Dimensions$Name, c('TimePeriod', 'Year'))
if(length(TimeIntersect) > 1){
TimeColName <- 'TimePeriod'
} else {
TimeColName <- TimeIntersect
}
#Convert wide matrix to long
#(less common as data comes as long, but needed for beaViz)
if('data.frame' %in% class(beaPayload)){
if(
attributes(beaPayload)$is.wide == TRUE &&
!asWide
) {
beaTab <- beaResults
id <- NULL
dateColNames <- sort(attributes(beaTab)$names[
grepl(
'DataValue_',
attributes(beaTab)$names,
fixed = TRUE
)
])
dateVector <- sort(gsub(
'DataValue_',
'',
dateColNames
))
beaResults <- try(stats::reshape(
beaTab,
varying = dateColNames,
v.names = 'DataValue',
timevar = TimeColName,
times = dateVector,
direction = 'long')[,
id:=NULL
]
)
if(length(TimeIntersect) > 1){
suppressWarnings(beaResults[, Year := substr(TimePeriod, 1, 4)])
}
attributes(beaResults)$is.wide <- FALSE
}
}
#Convert long matrix to wide (if needed)
if(
asWide &&
!is.null(attributes(beaResponse)$detail)
){
beaTab <- beaResults
eval(parse(text = paste0('data.table::setkey(beaTab, key = ', TimeColName, ')')))
noDV <- attributes(beaTab)$names != 'DataValue'
noTS <- attributes(beaTab)$names != TimeIntersect
noNotes <- attributes(beaTab)$names != 'NoteRef'
#A weird fix to push NA values down to bottom for reshaping
beaTab[, DataValue := ifelse(is.na(DataValue), 0, DataValue)]
# beaResults <- try(stats::reshape(
# beaTab,
# timevar = 'TimePeriod',
# idvar = attributes(beaTab)$names[noDV & noTS & noNotes],
# direction = 'wide')
# )
eval(
parse(
text=paste0(
'beaResults <- data.table::dcast(data.table::melt(beaTab, measure = "DataValue"),',
paste(
attributes(beaTab)$names[noDV & noTS & noNotes],
collapse='+'
),
' ~ variable + ', TimeColName, ')'
)
)
)
if(
any(
tolower(
attributes(beaResponse)$params$ParameterValue
) %in%
c('nipa', 'niunderlyingdetail', 'fixedassets')
)
){
beaResults <- beaResults[order(as.numeric(LineNumber))]
}
attributes(beaResults)$is.wide <- TRUE
if (!iTableStyle){
beaTrans <- beaResults
# beaStrMatrix <- t(
beaColHeaders <-
eval(
parse(
# text = paste0('beaTrans[ , .(', paste(
text = paste0('beaTrans[ , paste(', paste(
attributes(beaTrans)$names[
!grepl('DataValue_', attributes(beaTrans)$names, fixed = T)
], collapse = ','
), ')]')
)
)
# )
beaNumMatrix <- t(
eval(
parse(
text = paste0('beaTrans[ , .(', paste(
sort(attributes(beaTrans)$names[
grepl('DataValue_', attributes(beaTrans)$names, fixed = T)
]), collapse = ','
), ')]')
)
)
)
# headRows <- data.table(beaStrMatrix)
# dataRows <- data.table(beaNumMatrix)
# beaResults <- rbindlist(list(headRows, dataRows))
colnames(beaNumMatrix) <- beaColHeaders
beaResults <- data.table(beaNumMatrix)
eval(parse(text = paste0("beaResults[, ", TimeColName, " := gsub('DataValue_',
'', attributes(beaTrans)$names[
grepl('DataValue_', attributes(beaTrans)$names, fixed = T)
],
fixed = TRUE
)];
data.table::setkey(beaResults, key = ", TimeColName, ");")))
}
}
attributes(beaResults)$params <- attributes(beaResponse)$params
attributes(beaResults)$detail <- attributes(beaResponse)$detail
if(is.null(attributes(beaResults)$params)){
warning('Request response data not found; returned values may not contain successful BEA API response.')
}
return(beaResults)
}
|
787fc6c74f6865fe4bb47e760f046f22841866d3
|
5cbdb30c88a51c572482678a06f5afa47481c7bc
|
/src/kc_clustering.R
|
f1b15652fdee2decacc475325cc6e2efa4005f84
|
[] |
no_license
|
tjbencomo/cs191w
|
353b682bcd50272d1ae2a878f8905e04ec08adeb
|
bcd03f20abb1da9ef5cf9a283acc14fbe9f39b87
|
refs/heads/main
| 2023-04-14T00:17:22.934224
| 2021-04-26T07:18:50
| 2021-04-26T07:18:50
| 330,042,913
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,020
|
r
|
kc_clustering.R
|
library(Seurat)
library(dplyr)
library(ggplot2)
library(readr)
library(patchwork)
data_dir <- "data"
fp <- file.path(data_dir, "seurat", "keratinocytes.rds")
kcs <- readRDS(fp)
kcs <- FindNeighbors(kcs, reduction = "harmony", dims = 1:30)
kcs <- FindClusters(kcs, resolution = .25)
kcs <- RunUMAP(kcs, reduction = "harmony", dims = 1:30)
p1 <- p1 <- DimPlot(kcs, reduction = "umap", label=T)
print(p1)
p2 <- DimPlot(kcs, reduction = "umap", label = T, split.by = "orig.ident")
print(p2)
kcs.markers <- FindAllMarkers(kcs, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
top_markers <- kcs.markers %>%
filter(p_val_adj < .05) %>%
group_by(cluster) %>%
slice_max(avg_logFC, n=15) %>%
ungroup()
## Andrew's Markers
basal_genes <- c("KRT15", "CCL2", "COL17A1", "CXCL14", "DST",
"CCN1", "FTH1", "MT2A", "IGFBP5", "THBS2")
basal_featplot <- FeaturePlot(kcs, basal_genes) +
plot_annotation(title = "Basal Marker Genes",
theme = theme(plot.title = element_text(hjust = 0.5)))
print(basal_featplot)
cycling_genes <- c("STMN1", "HIST1H4C", "TUBA1B", "PTTG1", "HMGB2",
"H2AFZ", "TOP2A", "UBE2C", "UBE2C", "NUSAP1", "PCLAF")
cycling_featplot <- FeaturePlot(kcs, cycling_genes) +
plot_annotation(title = "Cycling Marker Genes",
theme = theme(plot.title = element_text(hjust = 0.5)))
print(cycling_featplot)
diff_genes <- c("KRTDAP", "KRT10", "KRT1", "S100A7", "SBSN", "DMKN",
"LYPD3", "KRT6A", "CALML5")
diff_featplot <- FeaturePlot(kcs, diff_genes) +
plot_annotation(title = "Differentiating Marker Genes",
theme = theme(plot.title = element_text(hjust = 0.5)))
print(diff_featplot)
tsk_genes <- c("MMP10", "PTHLH", "FEZ1", "IL24", "KCNMA1", "INHBA",
"MAGEA4", "NT5E", "LAMC2", "SLITRK6")
tsk_featplot <- FeaturePlot(kcs, tsk_genes) +
plot_annotation(title = "TSK Marker Genes",
theme = theme(plot.title = element_text(hjust = 0.5)))
print(tsk_featplot)
|
e2169bb9f405535e8454ed36ad2a33ba7af6d7df
|
4e31c0926273615f5e401283ff0b5060e915bf2b
|
/scripts/ROC_curve_generation_code.R
|
45fd82ed4446b87649fbe12d9d81892eedb4d8df
|
[] |
no_license
|
vanhasseltlab/MetabolomicsEtiologyCAP
|
0c31a2f2f6feef6048cbfaa2b6f32420ed5a7d6b
|
7715991f11d2accd4cd02a45f8e85b7680d10b82
|
refs/heads/master
| 2023-03-16T18:15:28.435259
| 2021-03-08T13:10:42
| 2021-03-08T13:10:42
| 294,083,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,664
|
r
|
ROC_curve_generation_code.R
|
## ROC curves of cross validated elastic net models ----------------
## Elastic net without covariates
# Atypical versus other
elnet.atyp.other.roc <- ElnetROC(dat, elnet.atyp.other.results,
repeats = 100, nfolds = 5)
# Spneumoniae versus other
elnet.spneu.other.roc <- ElnetROC(dat, elnet.spneu.other.results,
repeats = 100, nfolds = 5)
# Viral versus bacterial
elnet.bac.vir.roc <- ElnetROC(dat, elnet.bac.vir.results,
repeats = 100, nfolds = 5)
## Elastic net with covariates age and sex
# Atypical versus other
cov.elnet.atyp.other.roc <- ElnetROC(dat, cov.elnet.atyp.other.results,
repeats = 100, nfolds = 5)
# Spneumoniae versus other
cov.elnet.spneu.other.roc <- ElnetROC(dat, cov.elnet.spneu.other.results,
repeats = 100, nfolds = 5)
# Viral versus bacterial
cov.elnet.bac.vir.roc <- ElnetROC(dat, cov.elnet.bac.vir.results,
repeats = 100, nfolds = 5)
## Elastic net with all covariates
# Atypical versus other
cov.all.elnet.atyp.other.roc <- ElnetROC(dat, cov.all.elnet.atyp.other.results,
repeats = 100, nfolds = 5)
# Spneumoniae versus other
cov.all.elnet.spneu.other.roc <- ElnetROC(dat, cov.all.elnet.spneu.other.results,
repeats = 100, nfolds = 5)
# Viral versus bacterial
cov.all.elnet.bac.vir.roc <- ElnetROC(dat, cov.all.elnet.bac.vir.results,
repeats = 100, nfolds = 5)
## Visualization of the results ----------------------------------------------
## Plot ROC curves of all models together ----
# for the comparison atyp-other
roc.plotdat.atyp.other <- bind_rows(data.frame(logit.glycylglycine[[3]],
model = "LR: Glycine"),
data.frame(logit.sdma[[3]],
model = "LR: SDMA"),
data.frame(logit.lpi18.1[[3]],
model = "LR: LPI(18:1)"),
data.frame(logit.sigatyp[[3]],
model = "LR: Glycine, SDMA & LPI(18:1)"),
data.frame(logit.sigatyp.age.sex[[3]],
model = "LR: Glycine, SDMA, LPI(18:1), age & sex"),
data.frame(logit.sigatyp.cov[[3]],
model = "LR: Glycine, SDMA, LPI(18:1) & all covariates"),
data.frame(elnet.atyp.other.roc[[2]],
model = "EN: All metabolites"),
data.frame(cov.elnet.atyp.other.roc[[2]],
model = "EN: All metabolites, age & sex"),
data.frame(cov.all.elnet.atyp.other.roc[[2]],
model = "EN: All metabolites & all covariates"))
roc.plotdat.atyp.other$model <- factor(roc.plotdat.atyp.other$model,
levels = c("LR: Glycine", "LR: SDMA", "LR: LPI(18:1)",
"LR: Glycine, SDMA & LPI(18:1)",
"LR: Glycine, SDMA, LPI(18:1), age & sex",
"LR: Glycine, SDMA, LPI(18:1) & all covariates",
"EN: All metabolites",
"EN: All metabolites, age & sex",
"EN: All metabolites & all covariates"))
roc.plot.atyp.other <- ggplot()+
geom_step(data = roc.plotdat.atyp.other, aes(x = fpr.mean, y = tpr.mean,
color = model, linetype = model))+
geom_segment(aes(x = 0, y = 0, xend = 1, yend = 1),
linetype = "dotted", color = "gray")+
scale_color_lei(palette = "nine")+
scale_linetype_manual(values = c(rep(2, 3), rep(5, 3), rep(1, 3)))+
labs(x = "False Positive Rate", y = "True Positive Rate",
colour = "Model", linetype = "Model",
title = "ROC curves: atypical pathogen models")+
theme(legend.title = element_text(size = 18),
legend.text = element_text(size = 18))+
theme_bw()
ggexport(roc.plot.atyp.other, filename = "figures/ROC_curves_atyp_other_combined.png",
width = 1500, height = 800, res = 250)
## for comparison s.pneu - other
roc.plotdat.spneu.other <- bind_rows(data.frame(elnet.spneu.other.roc[[2]],
model = "EN: All metabolites"),
data.frame(cov.elnet.spneu.other.roc[[2]],
model = "EN: All metabolites, age & sex"),
data.frame(cov.all.elnet.spneu.other.roc[[2]],
model = "EN: All metabolites & all covariates"))
roc.plotdat.spneu.other$model <- factor(roc.plotdat.spneu.other$model,
levels = c("EN: All metabolites",
"EN: All metabolites, age & sex",
"EN: All metabolites & all covariates"))
roc.plot.spneu.other <- ggplot()+
geom_step(data = roc.plotdat.spneu.other, aes(x = fpr.mean, y = tpr.mean,
color = model))+
geom_segment(aes(x = 0, y = 0, xend = 1, yend = 1),
linetype = "dotted", color = "gray")+
scale_color_lei(palette = "three")+
scale_linetype_manual(values = c(rep(2, 5), 1, 1))+
labs(x = "False Positive Rate", y = "True Positive Rate",
colour = "Model", linetype = "Model",
title = "ROC curves: S.pneumoniae models")+
theme_bw()
ggexport(roc.plot.spneu.other, filename = "figures/ROC_curves_spneu_other_combined.png",
width = 1500, height = 800, res = 250)
## for comparison viral - other
roc.plotdat.bac.vir <- bind_rows(data.frame(elnet.bac.vir.roc[[2]],
model = "EN: All metabolites"),
data.frame(cov.elnet.bac.vir.roc[[2]],
model = "EN: All metabolites, age & sex"),
data.frame(cov.all.elnet.bac.vir.roc[[2]],
model = "EN: All metabolites & all covariates"))
roc.plotdat.bac.vir$model <- factor(roc.plotdat.bac.vir$model,
levels = c("EN: All metabolites",
"EN: All metabolites, age & sex",
"EN: All metabolites & all covariates"))
roc.plot.bac.vir <- ggplot()+
geom_step(data = roc.plotdat.bac.vir, aes(x = fpr.mean, y = tpr.mean,
color = model))+
geom_segment(aes(x = 0, y = 0, xend = 1, yend = 1),
linetype = "dotted", color = "gray")+
scale_color_lei(palette = "three")+
scale_linetype_manual(values = c(rep(2, 5), 1, 1))+
labs(x = "False Positive Rate", y = "True Positive Rate",
colour = "Model", linetype = "Model",
title = "ROC curves: viral pathogen models")+
theme_bw()
ggexport(roc.plot.bac.vir, filename = "figures/ROC_curves_vir_other_combined.png",
width = 1500, height = 800, res = 250)
# Plot all three comparions in same window
ggarrange(plotlist = list(roc.plot.atyp.other,
roc.plot.spneu.other,
roc.plot.bac.vir),
ncol = 3,
align = "h",
font.label = list(size = 17),
common.legend = TRUE,
legend = "bottom") %>%
ggexport(filename = "figures/ROC_curves_all_combined.png",
width = 2800, height = 1100, res = 250)
# Plot seperate ROC curves of all models (supplementary information)
ggarrange(logit.glycylglycine[[2]], logit.sdma[[2]],
logit.lpi18.1[[2]], logit.sigatyp[[2]],
logit.sigatyp.age.sex[[2]], logit.sigatyp.cov[[2]],
elnet.atyp.other.roc[[1]], cov.elnet.atyp.other.roc[[1]],
cov.all.elnet.atyp.other.roc[[1]]) %>%
ggexport(filename = "figures/ROC_curves_atyp_other_separate.png",
width = 2500, height = 2250, res = 250)
|
feaed213e6856ad84e69a615879fe512d5a4262d
|
e1f093f20200ed2bd820d4ee0884c87c73e41d66
|
/man/iris.nmds.Rd
|
02c0ce4fe0d74471ebdf22f7bfe3ba1fb220982b
|
[] |
no_license
|
cran/ecodist
|
8431a5659f02211c3131e282fbd2c90765285aa0
|
a34b199c4d70d5ee21e2d6abbd54d2a9729d7dd0
|
refs/heads/master
| 2022-05-13T06:14:42.563254
| 2022-05-05T05:50:08
| 2022-05-05T05:50:08
| 17,695,709
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
rd
|
iris.nmds.Rd
|
\name{iris.nmds}
\alias{iris.nmds}
\docType{data}
\title{Example for nmds}
\description{
An object of class nmds for use in the example for \code{\link{nmds}}. Many of the functions in \code{ecodist} take a long time to run, so prepared examples have been included.
}
\usage{data(iris.nmds)}
\format{
See \code{\link{nmds}} for current format specification.
}
\author{ Sarah Goslee }
\seealso{ \code{\link{nmds}} }
\examples{
data(iris)
iris.d <- dist(iris[,1:4])
### nmds() is timeconsuming, so this was generated
### in advance and saved.
### set.seed(1234)
### iris.nmds <- nmds(iris.d, nits=20, mindim=1, maxdim=4)
### save(iris.nmds, file="ecodist/data/iris.nmds.rda")
data(iris.nmds)
# examine fit by number of dimensions
plot(iris.nmds)
}
\keyword{datasets}
|
c470f9ad267d6710984897d193c8f994ee1e3cb3
|
488854749b8d6c1e5f1db64dd6c1656aedb6dcbd
|
/R/htmlParse.R
|
a22ccd776a78d8871f052605f99f7ee960753117
|
[] |
no_license
|
cran/XML
|
cd6e3c4d0a0875804f040865b96a98aca4c73dbc
|
44649fca9d41fdea20fc2f573cb516f2b12c897e
|
refs/heads/master
| 2023-04-06T18:52:11.013175
| 2023-03-19T10:04:35
| 2023-03-19T10:04:35
| 17,722,082
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,053
|
r
|
htmlParse.R
|
isURL =
function(file)
{
is.character(file) && grepl("^(http|ftp)", file)
}
############
#XXXXXXXXX
# This is now replaced by copying xmlTreeParse.
htmlTreeParse <-
#
# HTML parser that reads the entire `document' tree into memory
# and then converts it to an R/S object.
# Uses the libxml from Daniel Veillard at W3.org.
#
# asText treat the value of file as XML text, not the name of a file containing
# the XML text, and parse that.
# See also xml
#
function(file, ignoreBlanks = TRUE, handlers = NULL,
replaceEntities = FALSE, asText = inherits(file, "AsIs") || !isURL && grepl("^<", file), # could have a BOM
trim = TRUE,
isURL = is.character(file) && grepl("^(http|ftp)", file),
asTree = FALSE, useInternalNodes = FALSE,
encoding = character(),
useDotNames = length(grep("^\\.", names(handlers))) > 0,
xinclude = FALSE, addFinalizer = TRUE, error = function(...){},
options = integer(), parentFirst = FALSE)
{
if(TRUE)
{
doc = xmlTreeParse(file, ignoreBlanks, handlers, replaceEntities, asText, trim, validate = FALSE,
getDTD = FALSE, isURL, asTree, addAttributeNamespaces = FALSE,
useInternalNodes, isSchema = FALSE, fullNamespaceInfo = FALSE,
encoding, useDotNames, xinclude, addFinalizer, error, isHTML = TRUE, options = options)
class(doc) = c("HTMLInternalDocument", class(doc)[1])
return(doc)
}
if(length(file) > 1) {
file = paste(file, collapse = "\n")
if(!missing(asText) && !asText)
stop("multiple URIs passed to xmlTreeParse. If this is the content of the file, specify asText = TRUE")
asText = TRUE
}
if(missing(asText) && substring(file, 1, 1) == "<")
asText = TRUE
if(!asText && missing(isURL)) {
isURL <- length(grep("^(http|ftp)://", file, useBytes = TRUE, perl = TRUE))
}
# check whether we are treating the file name as
# a) the XML text itself, or b) as a URL.
# Otherwise, check if the file exists and report an error.
if(asText == FALSE && isURL == FALSE) {
if(file.exists(file) == FALSE)
stop(paste("File", file, "does not exist "))
}
if(!asText && !isURL)
file = path.expand(file)
old = setEntitySubstitution(replaceEntities)
on.exit(setEntitySubstitution(old))
if(!is.logical(xinclude)) {
if(inherits(xinclude, "numeric"))
xinclude = bitlist(xinclude)
else
xinclude = as.logical(xinclude)
}
.oldErrorHandler = setXMLErrorHandler(error)
on.exit(.Call("RS_XML_setStructuredErrorHandler", .oldErrorHandler, PACKAGE = "XML"), add = TRUE)
ans <- .Call("RS_XML_ParseTree", as.character(file), handlers,
as.logical(ignoreBlanks), as.logical(replaceEntities),
as.logical(asText), as.logical(trim),
FALSE, FALSE,
as.logical(isURL), FALSE,
as.logical(useInternalNodes), TRUE, FALSE, FALSE, as.character(encoding),
as.logical(useDotNames), xinclude, error, addFinalizer, options, as.logical(parentFirst), PACKAGE = "XML")
if(!missing(handlers) & !as.logical(asTree))
return(handlers)
if(inherits(ans, "XMLInternalDocument")) {
addDocFinalizer(ans, addFinalizer)
class(ans) = c("HTMLInternalDocument", class(ans))
}
ans
}
#XXXXXX
# This is another version that doesn't seem to release the document. Weird. I can't seem to find
# out who is holding onto it.
myHTMLParse =
function(file, ignoreBlanks = TRUE, handlers = NULL,
replaceEntities = FALSE, asText = inherits(file, "AsIs") || !isURL && grepl("^<", file), # could have a BOM
trim = TRUE,
isURL = is.character(file) && grepl("^(http|ftp)", file),
asTree = FALSE, useInternalNodes = FALSE,
encoding = character(),
useDotNames = length(grep("^\\.", names(handlers))) > 0,
xinclude = FALSE, addFinalizer = TRUE, error = function(...){})
{
doc = xmlTreeParse(file, ignoreBlanks, handlers, replaceEntities, asText, trim, validate = FALSE,
getDTD = FALSE, isURL, asTree, addAttributeNamespaces = FALSE,
useInternalNodes, isSchema = FALSE, fullNamespaceInfo = FALSE,
encoding, useDotNames, xinclude, addFinalizer, error, isHTML = TRUE)
class(doc) = c("HTMLInternalDocument", class(doc)[2])
return(doc)
}
hideParseErrors = function (...) NULL
htmlTreeParse = xmlTreeParse
formals(htmlTreeParse)$error = as.name("htmlErrorHandler") # as.name("hideParseErrors")
formals(htmlTreeParse)$isHTML = TRUE
htmlParse = htmlTreeParse
formals(htmlParse)$useInternalNodes = TRUE
parseURI =
function(uri)
{
if(is.na(uri))
return(structure(as.character(uri), class = "URI"))
u = .Call("R_parseURI", as.character(uri), PACKAGE = "XML")
if(u$port == 0)
u$port = as.integer(NA)
class(u) = "URI"
u
}
setOldClass("URI")
setOldClass("URL")
setAs("URI", "character",
function(from) {
if(from$scheme == "")
sprintf("%s%s%s",
from["path"],
if(from[["query"]] != "") sprintf("?%s", from[["query"]]) else "",
if(from[["fragment"]] != "") sprintf("#%s", from[["fragment"]]) else "" )
else
sprintf("%s://%s%s%s%s%s%s%s",
from[["scheme"]],
from[["user"]],
if(from[["user"]] != "") "@" else "",
from[["server"]],
if(!is.na(from[["port"]])) sprintf(":%d", as.integer(from[["port"]])) else "",
from["path"],
if(from[["query"]] != "") sprintf("?%s", from[["query"]]) else "",
if(from[["fragment"]] != "") sprintf("#%s", from[["fragment"]]) else ""
)
})
|
8654f6d6b35ceb17dc13a66bd30d51e3d28009df
|
b132a7b1db5a5c03565d9ceeda6f064e9761da3f
|
/R CODE FOR ECON494.R
|
22f21c6ff01db27d3417921e5f7a7b30f553a23d
|
[] |
no_license
|
pfoley1999/PROJECT
|
bd5838762fcdd109228590ff92a6fd46ac1025c6
|
ae222c4db3ca0b9d00f1e0c4beaba22899beb8a1
|
refs/heads/main
| 2023-04-05T23:39:08.617060
| 2021-04-19T05:19:46
| 2021-04-19T05:19:46
| 359,336,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,218
|
r
|
R CODE FOR ECON494.R
|
df <- Business.Analytics.Data...Sheet1
df$isDoc <- as.integer(df$Ownership.Model == "Doc")
View(df)
summary(df)
cor(df[2],df[3])
cor(df[2],df[4])
cor(df[2],df[6])
cor(df[2],df[7])
cor(df[2],df[9])
cov(df[2],df[3])
cov(df[2],df[4])
cov(df[2],df[6])
cov(df[2],df[7])
cov(df[2],df[9])
hist(df$Total.Income, prob = TRUE, main=" ", xlab=" ", ylab=" ")
curve(dnorm(x, mean = mean(df$Total.Income), sd = sd(df$Total.Income)), col = "darkblue", lwd = 2, add = TRUE)
title(main = "Histogram of Total Income",
xlab = "Total Income", ylab = "Density")
install.packages('sm')
library(sm)
options(scipen=5)
sm.density.compare(df$Total.Income, df$Ownership.Model,main=" ", xlab=" ", ylab=" ")
legend("topright", legend=c("Owned by Doctor", "Owned by Investor", "Owned by Company"),
col=c("red", "green","blue"), lty=1:2, cex=0.8,
box.lty=2, box.lwd=2, box.col="black")
title(main = "Total Income Density",
xlab = "Total Income", ylab = "Density")
plot(df$Total.Income~df$Chiropractic.Visits, main=" ", xlab=" ", ylab=" ",
col=factor(df$Ownership.Model))
legend("topleft", legend=c("Owned by Doctor", "Owned by Investor", "Owned by Company"),
col=c("red", "green", "black"), pch=1, cex=0.8,
box.lty=2, box.lwd=2, box.col="black")
abline(lm(df$Total.Income~df$Chiropractic.Visits), col="blue")
title(main = "Total Income vs. Chiropractic Visits",
xlab = "Chiropractic Visits", ylab = "Total Income")
library(ggplot2)
p <- ggplot(df, aes(Total.Income)) +
geom_boxplot() +
facet_wrap(~isDoc)
theme_update(plot.title = element_text(hjust = 0.5))
p + ggtitle("Income by Ownership") +
xlab("Total Income($)")
plot(df$Chiropractic.Visits~df$Patient.Referrals, main=" ", xlab=" ", ylab=" ", xlim=c(0,80))
abline(lm(df$Chiropractic.Visits~df$Patient.Referrals), col="red")
title(main = "Chiropractic Visits vs. Patient Referrals",
xlab = "Patient Referrals", ylab = "Chiropractic Visits")
hist(df$Ownership.Model~df$Chiropractic.Visits, prob = TRUE, main=" ", xlab=" ", ylab=" ")
title(main = "Ownership Model vs Chiropractic Visits",
xlab = "Ownership Model ", ylab = "Chiropractic Visits")
|
ea59710cf9ca18db92450c74f90bc138e856d5e2
|
e1986ad57cf85a086abb699dcb1a0ae23dd54be7
|
/inst/examples/data/transformation/boxcox_yeo.R
|
853d6f923471a7dab64bab4b2ec563964e175dd8
|
[] |
no_license
|
Kale14/mmstat4
|
4fb108216f768bc404a7f353621f4f129258ba0a
|
5ee81b9f5452e043b3a43708801997c72af3cda2
|
refs/heads/main
| 2023-03-29T22:25:45.841324
| 2021-04-07T09:15:41
| 2021-04-07T09:15:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 768
|
r
|
boxcox_yeo.R
|
library("car")
x <- (-400:400)/200
xp <- x[x>0]
mp <- 5
#
pdf("boxcox_yeo.pdf", width=10, height=7)
par(mfrow=c(1,2))
plot(xp, bcPower(xp, 0), type="l", xlim=range(x), main="Box-Cox", ylim=range(x),
xlab="Original value", ylab="Transformed value")
val <- 0
col <- 1
for (i in 1:mp) {
val <- c(-i, val)
col <- c(i+1, col)
lines(xp, bcPower(xp, i), col=i+1)
val <- c(val, i)
col <- c(col, i+1)
lines(xp, bcPower(xp, -i), col=i+1)
}
legend("topleft", legend=val, col=col, lwd=2)
plot(x, yjPower(x, 0), type="l", xlim=range(x), main=" Yeo-Johnson", ylim=range(x),
xlab="Original value", ylab="Transformed value")
for (i in 1:mp) {
lines(x, yjPower(x, i), col=i+1)
lines(x, yjPower(x, -i), col=i+1)
}
legend("topleft", legend=val, col=col, lwd=2)
dev.off()
|
92a14bbf0bf81e080ed0af51d841ab8f3823d91e
|
7c6f801419b0c8b8e6add9bf5702fe5687d4e882
|
/R/fileVersion.R
|
e318fda8d49644fe4be88732eca9ee6b2a8eef13
|
[] |
no_license
|
dsidavis/Rqpdf
|
777276db9d03369328896c7731f376a632de1075
|
59e17c9b4f48be7863bed32a328d92cc828696fc
|
refs/heads/master
| 2023-04-03T11:49:23.020859
| 2023-03-25T18:04:05
| 2023-03-25T18:04:05
| 187,417,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 234
|
r
|
fileVersion.R
|
pdfVersion =
function(doc)
{
if(is(doc, "QPDF"))
doc = getDocname(doc)
else
doc = path.expand(doc)
if(!file.exists(doc))
stop("no such file ", doc)
.Call("R_get_pdf_version", doc)
}
|
0c2ed3f113b648969af337915cefa936741b4f6a
|
694a0269e4e4fadb7f75a190fa77bdc8d882c0e5
|
/Boostrap/run_area_sp.R
|
e9d7059b9a71a29b446b470880de677b44dc52c1
|
[] |
no_license
|
sekigakuyo/ARE
|
3ce09854cb583909c4f3cd8767d6afd19cdb5e61
|
b0b9c4eaa8d771a1733c8da3f3e6fc67cc90c194
|
refs/heads/master
| 2021-08-23T06:33:49.430910
| 2017-12-03T22:42:30
| 2017-12-03T22:42:30
| 112,845,245
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
run_area_sp.R
|
library(rstan)
library(ggmcmc)
rstan_options(auto_write=TRUE)
options(mc.cores=parallel::detectCores())
source("bootstrap.R")
model = stan_model(file= "area_sp.stan")
fit = sampling(model,
data= data,
seed= 123
)
|
4c481c6528abfd21d670840ce99ea606ce9a05b1
|
9fdc71517ba832b251a8401c05aee9843ce7acfd
|
/R/TabaPartial.R
|
45c921cbd17d8fb618e22c7244213d1cb6520d42
|
[] |
no_license
|
cran/Taba
|
4437b442a8f89ef4667657ec869316b6d2c8d893
|
629a01094c15f0a2b5619018ba4aba83d5ad48b5
|
refs/heads/master
| 2021-07-10T07:23:13.297977
| 2021-03-31T18:50:02
| 2021-03-31T18:50:02
| 236,904,390
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,104
|
r
|
TabaPartial.R
|
#'
#' Robust Partial and Semipartial Correlation
#'
#' @description Calculates a partial or semipartial correlation using one of the
#' specified robust methods Taba linear or Taba rank correlation.
#' @usage taba.partial(x, y, ..., regress, method = c("taba", "tabarank", "tabwil", "tabwilrank"),
#' alternative = c("less", "greater", "two.sided"),
#' semi = c("none", "x", "y"), omega)
#' @param x A numeric vector of length greater than 2 must be same length as y and covariates
#' listed in ...
#' @param y A numeric vector of length greater than 2 must be same length as x and covariates
#' listed in ...
#' @param ... Numeric vectors used as covariates of length equal to x and y
#' @param regress A string variable "\code{linear}" for linear regression, "\code{logistic}" for binary
#' logistic regression, and "\code{poisson}" for Poisson regression
#' @param method A character string of \code{"taba"}, \code{"tabarank"}, \code{"tabwil"}, or
#' \code{"tabwilrank"} determining if one wants to calculate Taba linear, Taba rank
#' (monotonic), TabWil, or TabWil rank correlation, respectively. If no method is specified,
#' the function will output Taba Linear correlation.
#' @param alternative Character string specifying the alternative hypothesis must be one
#' of \code{"less"} for negative association, \code{"greater"} for
#' positive association, or \code{"two.sided"} for difference in association.
#' If the alternative is not specified, the function will default to a two sided test.
#' @param semi A character string specifying which variable (x or y) should be adjusted.
#' @param omega Numeric allowing the user to alter the tuning constant. If one is not specified,
#' the function will default to 0.45 for Taba and Taba rank, and 0.1 for TabWil and TabWil rank.
#' Range is between 0 and 1.
#' @details This function calculates the partial or semipartial association of two
#' numeric vectors, or columns of a matrix or data frame composed
#' of more than two numeric elements, adjusting for covariates of length equal to
#' x and y. Covariates are combined colomn-wise and can be numeric vectors, matricies,
#' or data frames with numeric cells. Each column in the matrix or data frame will be
#' treated as a different covariate, and must have different names from x and y.
#' Missing values in x, y, or any of the covariates are deleted row-wise.
#' The default for this function is a two sided test using Taba linear partial
#' correlation, with the tuning constant \code{omega} equal to 0.45 for Taba and
#' Taba rank, and 0.1 for TabWil and TabWil rank. Range is between 0 and 1.
#' The variable you are not controlling must be continuous when using semipartial correlation.
#' @return This function returns the robust association
#' between two numeric vectors, adjusting for specified covariates. In addition,
#' this function can provide the semipartial correlation, if specified.
#' @seealso
#' \code{\link{taba}} for calculating Taba linear or Taba rank (monotonic) correlations
#' \cr\code{\link{taba.test}} for testing Taba linear or Taba rank (monotonic) correlations
#' \cr\code{\link{taba.gpartial}} for generalized partial correlations
#' \cr\code{\link{taba.matrix}} for calculating correlation, p-value, and distance matricies
#' @references Tabatabai, M., Bailey, S., Bursac, Z. et al. An introduction to new robust linear
#' and monotonic correlation coefficients. BMC Bioinformatics 22, 170 (2021). https://doi.org/10.1186/s12859-021-04098-4
#' \cr{\cr{\doi{https://doi.org/10.1186/s12859-021-04098-4}}}
#' @examples
#' x = rnorm(100)
#' y = rnorm(100)
#' z1 = rnorm(100)
#' z2 = rnorm(100)
#' z3 = rnorm(100)
#' taba.partial(x, y, z1, z2, z3, method = "tabwilrank")
#' taba.partial(x, y, z2, alternative = "less", semi = "x")
#' @import robustbase
#' stats
#' @export taba.partial
taba.partial = function(x, y, ..., regress, method = c("taba", "tabarank", "tabwil", "tabwilrank"),
alternative = c("less", "greater", "two.sided"),
semi = c("none", "x", "y"), omega) {
if (missing(method)) {
method <- "taba"
}
na.method <- pmatch(method, c("taba", "tabarank", "tabwil", "tabwilrank"))
if (is.na(na.method)) {
stop("invalid 'methods' argument")
method <- match.arg(method)
}
if (missing(regress)) {
regress <- "linear"
}
na.regress <- pmatch(regress, c("linear", "logistic", "poisson"))
if (is.na(na.regress)) {
stop("invalid 'regress' argument")
regress <- match.arg(regress)
}
if (missing(alternative)) {
alternative <- "two.sided"
}
na.alternative <- pmatch(alternative, c("less","greater","two.sided"))
if (is.na(na.alternative)) {
stop("invalid 'alternative' argument")
alternative <- match.arg(alternative)
}
if (missing(semi)) {
semi <- "none"
}
na.semi <- pmatch(semi, c("none", "x", "y"))
if (is.na(na.semi)) {
stop("invalid 'semi' argument")
semi <- match.arg(semi)
}
if (missing(omega)) {
if (method == "taba" || method == "tabarank") {
omega <- 0.45
} else {
omega <- 0.05
}
}
if (omega > 1 || omega < 0) {
stop("'omega' must be between 0 and 1")
omega <- match.arg(omega)
}
if (is.data.frame(y) || is.numeric(y)) {
y <- as.matrix(y)
}
if (is.data.frame(x) || is.numeric(x)) {
x <- as.matrix(x)
}
if (!is.matrix(x) && is.null(y)) {
stop("supply both 'x' and 'y' or a matrix-like 'x'")
}
if (!(is.numeric(x) || is.logical(x))) {
stop("'x' must be numeric")
stopifnot(is.atomic(x))
}
if (!is.null(y)) {
if (!(is.numeric(y) || is.logical(y)))
stop("'y' must be numeric")
stopifnot(is.atomic(y))
}
Covariates <- cbind.data.frame(...)
covlen <- length(Covariates)
if (covlen == 0) {
stop("No covariates entered")
}
if (sum(sapply(Covariates,is.numeric)) != covlen) {
stop("All covariates must be numeric")
stopifnot(is.atomic(y))
}
if (sum(is.na(x)) > 0 || sum(is.na(y)) > 0 || sum(is.na(Covariates)) > 0) {
warning("Missing data included in dataset was removed row-wise. Results may not be accurate.")
miss <- which(complete.cases(x,y,Covariates) == FALSE)
x <- x[-miss]
y <- y[-miss]
Covariates = as.data.frame(Covariates[-miss,])
}
k = length(x)
if (k != length(y)) {
stop("'x','y', and 'Covariares' must have the same length")
}
if (semi != "y") {
xres <- switch(regress, "linear" = lm(x~., data = Covariates)$residuals,
"logistic" = glm(x~., family = binomial(link = "logit"),
data = Covariates)$residuals,
"poisson" = glm(x~., family = poisson(link = "log"),
data = Covariates)$residuals)
}else{
xres <- x
}
if (semi != "x") {
yres <- switch(regress, "linear" = lm(y~., data = Covariates)$residuals,
"logistic" = glm(y~., family = binomial(link = "logit"),
data = Covariates)$residuals,
"poisson" = glm(y~., family = poisson(link = "log"),
data = Covariates)$residuals)
}else{
yres <- y
}
if (method == "tabarank" || method == "tabwilrank") {
xres <- rank(xres)
yres <- rank(yres)
}
if (Sn(xres) == 0 || Sn(yres) == 0) {
s1 <- 1
s2 <- 1
} else {
s1 <- Sn(xres)
s2 <- Sn(yres)
}
if (method == "taba" || method == "tabarank") {
medx <- median(xres)
medy <- median(yres)
a <- sum( ((1 / cosh(omega * ((xres - medx) / s1))) * ((xres - medx) / s1)) *
((1 / cosh(omega * ((yres - medy) / s2))) * ((yres - medy) / s2)) )
b <- sum( ((1 / cosh(omega * ((xres - medx) / s1))) * ((xres - medx) / s1))**2 )
c <- sum( ((1 / cosh(omega * ((yres - medy) / s2))) * ((yres - medy) / s2))**2 )
tcor <- a / sqrt(b * c)
} else {
u <- (xres - median(xres))/s1 + (yres - median(yres))/s2
v <- (xres - median(xres))/s1 - (yres - median(yres))/s2
a <- ((1 / cosh(omega * (median(abs(u))**2))) * (median(abs(u))**2)) - ((1 / cosh(omega * (median(abs(v))**2))) * (median(abs(v))**2))
b <- ((1 / cosh(omega * (median(abs(u))**2))) * (median(abs(u))**2)) + ((1 / cosh(omega * (median(abs(v))**2))) * (median(abs(v))**2))
tcor <- a / b
}
tTaba <- ( tcor * sqrt((k - 2 - covlen) / (1 - tcor**2)) )
if (alternative == "two.sided") {
p <- 2*pt(-abs(tTaba), (k - 2 - covlen))
}else{
if (alternative == "greater") {
p <- pt(-abs(tTaba), (k - 2 - covlen), lower.tail = TRUE)
}else{
p <- pt(-abs(tTaba), (k - 2 - covlen), lower.tail = FALSE)
}
}
TabaC <- list(correlation = tcor,
t.statistic = tTaba,
p.value = p )
return(TabaC)
}
|
753a8e78c2b8cdb7d6da92e6ba93466346a17699
|
2a5f19d83ef4dc8ea9059743dd0f20789a9ae2d6
|
/R/template_find_approx.R
|
7d59bf46d9bc81d6ba40a9408f0722c42c8d1c6b
|
[] |
no_license
|
mimi3421/BiocNeighbors
|
55ad54e411b69f4ff7c36cabc3289f6bc96d25ef
|
be4372146b1659c226fdfa89bccb57ee858d34f8
|
refs/heads/master
| 2020-07-04T16:54:47.579645
| 2019-07-19T16:16:12
| 2019-07-19T16:16:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,658
|
r
|
template_find_approx.R
|
#' @importFrom BiocParallel SerialParam bpmapply
.template_find_approx <- function(X, k, get.index=TRUE, get.distance=TRUE,
BPPARAM=SerialParam(), precomputed=NULL, subset=NULL,
buildFUN, pathFUN, searchFUN, searchArgsFUN, distFUN, ...)
# Provides a R template for approximate nearest neighbors searching,
# assuming that all of them use a file-backed index.
#
# written by Aaron Lun
# created 14 December 2018
{
if (is.null(precomputed)) {
precomputed <- buildFUN(X, ...)
on.exit(unlink(pathFUN(precomputed)))
}
if (is.null(subset)) {
job.id <- seq_len(nrow(precomputed))
} else {
job.id <- .subset_to_index(subset, precomputed, byrow=TRUE)
}
jobs <- .assign_jobs(job.id - 1L, BPPARAM)
k <- .refine_k(k, precomputed, query=FALSE)
common.args <- c(searchArgsFUN(precomputed), list(dtype=bndistance(precomputed), nn=k))
if (get.distance || get.index) {
collected <- bpmapply(FUN=searchFUN, jobs,
MoreArgs=c(common.args, list(get_index=get.index, get_distance=get.distance)),
BPPARAM=BPPARAM, SIMPLIFY=FALSE)
# Aggregating results across cores.
output <- list()
if (get.index) {
neighbors <- .combine_matrices(collected, i=1, reorder=NULL)
output$index <- neighbors
}
if (get.distance) {
output$distance <- .combine_matrices(collected, i=2, reorder=NULL)
}
} else {
collected <- bpmapply(FUN=distFUN, jobs, MoreArgs=common.args,
BPPARAM=BPPARAM, SIMPLIFY=FALSE)
output <- unlist(collected, use.names=FALSE)
}
output
}
|
cd13350f99b5422ea88b3dd6d7942bef6ae72ee1
|
90b0dfc49246ee9caa3da88f07eccc6d21c655d4
|
/Rmdies/R/dataframe_2_csv_2_dataframe.R
|
184ea27417dcfa7c76bd4dbcac0af53226a24c1f
|
[] |
no_license
|
cgpu/staries
|
f74a96b71014041cb3b9f2033ab297a6c8d6838c
|
e89f0844121a3941189598b57be6051286bebd32
|
refs/heads/main
| 2023-09-03T15:56:15.601035
| 2023-06-09T05:22:35
| 2023-06-09T05:22:35
| 228,875,457
| 5
| 1
| null | 2023-09-04T15:36:18
| 2019-12-18T16:03:11
|
R
|
UTF-8
|
R
| false
| false
| 1,035
|
r
|
dataframe_2_csv_2_dataframe.R
|
# Preview dataframe to write beforehands:
head(my_fav_df)
# Check dimensions of the dataframe you wish to write in a .csv file:
dim(my_fav_df)
# Set path of (i)output directory (ii) filename:
savedir = "home/cgpu/favorite_dir/"
FILE = paste0(savedir, 'favorite_filename', '.csv')
# Write dataframe into .csv file
write.table( my_fav_df,
file = FILE,
append = FALSE,
quote = FALSE,
sep = ",",
row.names = F,
col.names = T)
# Reload the file to check column names and all are good:
temp_df <- read.csv(FILE,
header = TRUE,
stringsAsFactors = FALSE,
check.names = FALSE);
# check dimensions of csv file // This should be `TRUE`: dim(temp_df) == dim(my_fav_df)
dim(temp_df)
# Preview dataframe, check if written correctly
# Common suspects for trouble: header not written, header converted to row, first column converted to rownames, etc/
head(temp_df)
|
01248153d4da550d6b1551596c8281df2e5a8379
|
7b3bd95c45bc48a6a06fa7f31511c7818e62425a
|
/section-10/sec-10.R
|
69eddd625674a6f79a322445cda38d29d034d6fb
|
[] |
no_license
|
danhammer/ARE212
|
032d9f80f6522d1d0b96cff7f38080b72db73fbe
|
a2c43d556441c7bded8fb3299d568e51594daa2a
|
refs/heads/master
| 2020-05-17T17:37:58.000259
| 2013-04-28T17:00:01
| 2013-04-28T17:00:01
| 5,385,445
| 4
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,939
|
r
|
sec-10.R
|
library(XML)
library(RCurl)
library(stringr)
options(show.error.messages = FALSE)
token <- "characters"
nameslist <- list()
while (is.character(token) == TRUE) {
baseurl <- "http://oai.crossref.org/OAIHandler?verb=ListSets"
if (token == "characters") {
tok.follow <- NULL
} else {
tok.follow <- paste("&resumptionToken=", token, sep = "")
}
query <- paste(baseurl, tok.follow, sep = "")
xml.query <- xmlParse(getURL(query))
set.res <- xmlToList(xml.query)
names <- as.character(sapply(set.res[["ListSets"]], function(x) x[["setName"]]))
nameslist[[token]] <- names
if (class(try(set.res[["request"]][[".attrs"]][["resumptionToken"]])) == "try-error") {
stop("no more data")
}
else {
token <- set.res[["request"]][[".attrs"]][["resumptionToken"]]
}
}
allnames <- do.call(c, nameslist)
length(allnames)
econtitles <- as.character(allnames[str_detect(allnames, "^[Ee]conomic|\\s[Ee]conomic")])
length(econtitles)
sample(econtitles, 10)
countJournals <- function(regex) {
titles <- as.character(allnames[str_detect(allnames, regex)])
return(length(titles))
}
subj = c("economic", "business", "politic", "environment", "engineer", "history")
regx = c("^[Ee]conomic|\\s[Ee]conomic", "^[Bb]usiness|\\s[Bb]usiness",
"^[Pp]olitic|\\s[Pp]olitic", "^[Ee]nvironment|\\s[Ee]nvironment",
"^[Ee]ngineer|\\s[Ee]ngineer", "^[Hh]istory|\\s[Hh]istory")
subj.df <- data.frame(subject = subj, regex = regx)
subj.df[["count"]] <- sapply(as.character(subj.df[["regex"]]), countJournals)
(g <- ggplot(data = subj.df, aes(x = subject, y = count)) + geom_bar())
convertTopic <- function(s) {
## accepts a topic as a string and returns the appropriate regular
## expression
first.letter <- substr(s, 1, 1)
upper.case <- toupper(first.letter)
replacement.str <- paste("[", upper.case, first.letter, "]", sep="")
res <- paste(replacement.str, substring(s, 2), sep="")
return(res)
}
|
a3976007d9ad554386b916ccbaa6dbb98f496d84
|
2bbdd4b17b79c42c8f94553a28b2e6a33ab10be1
|
/helpers/date_format.R
|
81c2ccd4811e084e5c815bf89c370b5fd9d80f54
|
[] |
no_license
|
Samantha-Lui/EZRecords_App
|
03fb3da8f09bd97e28083ec70a21a96156a773e6
|
175959f363ab70642da2f243571e78d97c1e2ebf
|
refs/heads/master
| 2021-01-23T21:11:37.221956
| 2017-06-16T00:09:37
| 2017-06-16T00:09:37
| 90,569,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 593
|
r
|
date_format.R
|
# Edits a string represention of a date in one of the three formats (YYYY-MM-DD,
# MM/DD/YYYY, MM/DD/YY) such that it is in the form YYYY-MM-DD.
# @param d A string represention of a date in one of the three formats described above.
# @return A string representation of the date in the YYYY-MM-DD format.
date_format <- function(d){
if(grepl('[1-2][0-9][0-9][0-9]-[0-1]?[0-9]-[0-2]?[0-9]', d))
return('%Y-%m-%d')
if(grepl('[0-1]?[0-9]/[0-2]?[0-9]/[1-2][0-9][0-9][0-9]', d))
return('%m/%d/%Y')
if(grepl('[0-1]?[0-9]/[0-2]?[0-9]/[0-9][0-9]', d))
return('%m/%d/%y')
}
|
1ff63fedb7a9f2700d95dd63b53d3515d45a0fd5
|
6e4f004782186082b73025cda95f31bcae76afcf
|
/man/gl2bayescan.Rd
|
5d1eaad0c9e6aa9c1a3b418fe178bc7918454f06
|
[] |
no_license
|
carlopacioni/dartR
|
319fbff40a385ca74ab7490b07857b0b027c93a8
|
06614b3a328329d00ae836b27616227152360473
|
refs/heads/master
| 2023-08-23T00:32:10.850006
| 2021-09-08T06:52:44
| 2021-09-08T06:52:44
| 262,468,788
| 0
| 0
| null | 2020-05-09T02:07:08
| 2020-05-09T02:07:07
| null |
UTF-8
|
R
| false
| true
| 1,301
|
rd
|
gl2bayescan.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gl2bayescan.r
\name{gl2bayescan}
\alias{gl2bayescan}
\title{Convert a genlight object to format suitable for input to Bayescan}
\usage{
gl2bayescan(x, outfile = "bayescan.txt", outpath = tempdir(), verbose = NULL)
}
\arguments{
\item{x}{-- name of the genlight object containing the SNP data [required]}
\item{outfile}{-- file name of the output file (including extension) [default bayescan.txt]}
\item{outpath}{-- path where to save the output file [default tempdir(), mandated by CRAN]. Use outpath=getwd() or outpath="." when calling this function to direct output files to your working directory.}
\item{verbose}{-- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2 or as specified using gl.set.verbosity]}
}
\description{
The output text file contains the snp data and relevant BAyescan command lines to guide input.
}
\examples{
gl2bayescan(testset.gl)
}
\references{
Foll M and OE Gaggiotti (2008) A genome scan method to identify selected loci appropriate for both dominant and codominant markers: A Bayesian perspective. Genetics 180: 977-993.
}
\author{
Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
}
|
86ce367de04e886829dfad4beff0b1f7e3d82e32
|
2678a37459391d54315cb926e73a5049e7f7e8b7
|
/src/combine_pr_files.R
|
80ba4e35b6be1e0c6e810d0deec3fb702637f5f9
|
[
"MIT"
] |
permissive
|
adolgert/linearstep.jl
|
296d402ea6e4c380c8ff86a4b388ee9ac6dfdbff
|
a3db330df66e04c1dff2ffa478c12f3f4da797e1
|
refs/heads/master
| 2022-10-12T00:09:12.467187
| 2020-06-12T03:58:43
| 2020-06-12T03:58:43
| 271,721,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,476
|
r
|
combine_pr_files.R
|
# We made monthly AR from monthly PR for districts, with draws, or just the median.
# This script combines the monthly data into yearly data.
# It also adds values for reporting.
library(data.table)
library(sf)
## Combine individual files
data_dir <- "/ihme/malaria_modeling"
project_dir <- fs::path(data_dir, "projects/uganda2020/outputs")
proj_in_dir <- fs::path(data_dir, "projects/uganda2020/inputs")
# The canonical ID numbers come from the subcounties map, not the districts map.
sc_map <- "/ihme/malaria_modeling/projects/uganda2020/outputs/uganda_subcounties_2019_topology_fix/uganda_subcounties_2019_topology_fix.shp"
sc_dt <- as.data.table(sf::st_set_geometry(sf::st_read(sc_map), NULL))
dist_dict <- unique(sc_dt[, .(District)])
dist_dict <- dist_dict[, .(name = District, id = .I)]
# Inputs from the draws_dir, generated by gen_scaled_ar.R.
shp_path <- fs::path(proj_in_dir, "uganda_districts_2019-wgs84/uganda_districts_2019-wgs84.shp")
draws_dir <- fs::path(project_dir, "adam_median_draws")
# All results of this script go into the out_dir.
out_dir <- fs::path(project_dir, "adam_median_summary")
# Get the district names for the summary from the canonical source, this random shapefile.
# There is another canonical source, the subcounties file.
ug_dt <- as.data.table(sf::st_set_geometry(sf::st_read(shp_path), NULL))
# Use unique because some districts have more than one polygon in the shapefile.
# dist_dict <- unique(ug_dt[, "DName2019"])
# dist_dict <- dist_dict[, .(name = DName2019, id = .I)]
file_list <- list.files(draws_dir)
dir.create(out_dir, showWarnings = FALSE, recursive = TRUE)
# dt <- rbindlist(lapply(file_list, function(file) {
# split <- strsplit(gsub(".csv", "", file), "_")[[1]]
# dist <- as.integer(split[1])
# draw <- as.integer(split[2])
# data.table(dist, draw)
# }))
#
# dplyr::setdiff(dist_draw_table, dt)
# Read in every single file at once with all data.
dt <- rbindlist(lapply(file_list, function(filename) {
dt <- fread(fs::path(draws_dir, filename))
}))
# This adds a NAME column where the name is the Dname2019 for the district.
dt <- merge(dt, dist_dict, by.x = "dist_id", by.y = "id", sort = FALSE)
all_output_in_one_file <- fs::path(out_dir, "adjusted_district_draws.csv")
write.csv(dt, all_output_in_one_file, row.names = FALSE)
## Generate annual PR summary by district and by year
dt[, y := as.integer(substring(date, 1, 4))]
sum_dt <- dt[, .(pr_median = median(pr), pr_lower = quantile(pr, 0.025), pr_upper = quantile(pr, 0.975)), by = .(name, y)]
write.csv(sum_dt, fs::path(out_dir, "annual_adjusted_district_pr_summary.csv"), row.names = FALSE)
## Generate bites per year by district and by year
bites_dt <- dt[, .(ar_sum = sum(ar)), by = .(name, y)]
write.csv(bites_dt, fs::path(out_dir, "annual_bites_per_year.csv"), row.names = FALSE)
## Generate annual AR summary by district
hold_dt <- copy(dt)
hold_dt[, c("date", "pr") := NULL]
hold_dt[, id := seq_len(.N), by = .(y, draw, dist_id)]
cast_dt <- dcast(hold_dt, y + draw + dist_id ~ id, value.var = "ar")
mat <- 1 - as.matrix(cast_dt[, 4:ncol(cast_dt)])
# This uses products of probabilities to get a correct yearly attack rate.
cast_dt[, annual_ar := 1 - apply(t(mat), 2, prod, na.rm = T)]
sum_ar <- cast_dt[, .(ar_median = median(annual_ar), ar_lower = quantile(annual_ar, 0.025), ar_upper = quantile(annual_ar, 0.975)), by = .(dist_id, y)]
write.csv(sum_ar, fs::path(out_dir, "annual_adjusted_district_ar_summary.csv"), row.names = F)
|
e9daa48cde412c07ad5e976328f78d5282bc96da
|
2656b0d7111005a8e2c5ecbb04c490d155768e6f
|
/tests/testthat/tests-decision_tree_cluster.R
|
c8d9ce5acbbd80958029b23a5760eb4a84c785cd
|
[] |
no_license
|
lnellums/LTBIscreeningproject
|
26e78ac1de874b348882eaffb10e0f1fdfe94c33
|
c08daa01ce82618dff21c523db89b8e84bfee2f6
|
refs/heads/master
| 2021-04-12T09:09:51.592708
| 2018-03-15T10:48:02
| 2018-03-15T10:48:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 817
|
r
|
tests-decision_tree_cluster.R
|
##to debug
# res <- lapply(scenario_parameters,
# decision_tree_cluster,
# N.mc = N.mc,
# n.uk_tb = n.uk_tb,
# n.exit_tb = n.exit_tb)
#
# xx <- decision_tree_cluster(parameters = scenario_parameters[[1]],
# n.uk_tb = 10,
# n.exit_tb = 10,
# cost_dectree = "osNode_cost_2009.Rds",
# health_dectree = "osNode_health_2009.Rds")
#
# xx <- decision_tree_cluster(parameters = scenario_parameters[[1]][1:3, ],
# n.uk_tb = 10,
# n.exit_tb = 10,
# cost_dectree = "osNode_cost_2009_pdistn.Rds",
# health_dectree = "osNode_health_2009_pdistn.Rds")
|
49401413179f9c42ce6d6839e15979dc37c2ef0e
|
4796f61e1d6772f6d6fc89314037b4a8af8de79a
|
/man/string_format_.Rd
|
c4c15b37c3441bbeccb192f1c71e72d44dd918df
|
[] |
no_license
|
cran/sprintfr
|
831fab8829f85fff127a1c93106a4caee7fae765
|
28ad4962033ff4855bd4e0cf80ca8704b99d09b1
|
refs/heads/master
| 2021-01-10T13:18:34.977536
| 2016-01-05T21:12:21
| 2016-01-05T21:12:21
| 49,090,167
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 520
|
rd
|
string_format_.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sprintfr.R
\name{string_format_}
\alias{string_format_}
\title{Standard evaluation version of string_format}
\usage{
string_format_(args, sep = "")
}
\arguments{
\item{args}{A list of unevaluated time format pieces}
\item{sep}{A separator for pasting pieces together.}
}
\description{
Useful for interactively building time formats.
}
\examples{
base_list = c("double", "integer")
string_format_(list(base_list[1], "' '", base_list[2]) )
}
|
b88e4f2e1302f476da14630469d6164c36568084
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleyoutubev3.auto/man/captions.list.Rd
|
83de3e98747402d763f7b41692d47fd46d56e65b
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,702
|
rd
|
captions.list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_functions.R
\name{captions.list}
\alias{captions.list}
\title{Returns a list of caption tracks that are associated with a specified video. Note that the API response does not contain the actual captions and that the captions.download method provides the ability to retrieve a caption track.}
\usage{
captions.list(part, videoId, id = NULL, onBehalfOf = NULL,
onBehalfOfContentOwner = NULL)
}
\arguments{
\item{part}{The part parameter specifies a comma-separated list of one or more caption resource parts that the API response will include}
\item{videoId}{The videoId parameter specifies the YouTube video ID of the video for which the API should return caption tracks}
\item{id}{The id parameter specifies a comma-separated list of IDs that identify the caption resources that should be retrieved}
\item{onBehalfOf}{ID of the Google+ Page for the channel that the request is on behalf of}
\item{onBehalfOfContentOwner}{Note: This parameter is intended exclusively for YouTube content partners}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/youtube.force-ssl
\item https://www.googleapis.com/auth/youtubepartner
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/youtube.force-ssl, https://www.googleapis.com/auth/youtubepartner)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/youtube/v3}{Google Documentation}
}
|
039fe5baab0dc02259707dc8ea38958c168c6fdf
|
d3e06ba4c5ea75d81ea69edd4caceb32ed1016e9
|
/R/was_sim.R
|
1704e838ce36a0002f4ec1cf3c8dd5fa6d2da7b9
|
[] |
no_license
|
federicoandreis/wasim
|
93d33e1c60188a843f6eba5e47f4d7b2eae5da40
|
21e5624310015db837b1e021d623d91bfa54802e
|
refs/heads/master
| 2020-03-08T15:38:42.602115
| 2020-01-15T16:15:39
| 2020-01-15T16:15:39
| 128,215,803
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,948
|
r
|
was_sim.R
|
#' Runs the WAS simulation.
#'
#' @param J The number of runs.
#' @param S The number of steps.
#' @param des A list containing the sampling designs for each step.
#' @param des_pars A list containing the parameters for the sampling designs in \code{des}.
#' @param pre A list containing the predictive tools to compare.
#' @param pre_pars A list containing the parameters for the predictive tools in \code{pre}.
#' @param tar A list containing the targeting functions for each step/to compare?.
#' @param tar_pars A list containing the parameters for the targeting functions in \code{tar}.
#' @param B The number of Bootstrap runs to estimate the inclusion probabilities.
#' @param est_var Should the variance be also estimated via bootstrapped second order inclusion probabilities? Defaults to FALSE.
#' @param pop The population [...].
#' @return The simulation.
#' @examples
#' set.seed(42)
was_sim <- function(J, # number of sims (scalar)
S, # number of steps (scalar)
des, # sampling designs for each step (list)
des_pars, # parameters for the designs (list, specify structure)
pre, # predictive tools (list)
pre_pars, # parameters for the predictions (list)
tar, # targeting functions (list)
tar_pars, # parameters for the targeting functions (list)
B, # number or Bootstrap runs to estimate pis
est_var=FALSE, # if FALSE, use estimated second order inclusion probabilities, else use approximations
pop # population
) {
# test J, S, B, N, pop
N <- length(des_pars[[1]]$pik)
# test designs
# test prediction functions
# test targeting functions
# text message to recap sim parameters
# actual sim
res <- replicate(J,{
pik0 <- rep(ni[1]/N,N)
ss <- sample(N,size=ni[1],prob=pik0)
ss1 <- numeric(N)
ss1[ss] <- 1
spips <- par_sample(pik)
#spips <- UPsampford(pik)
ss.pop <- cbind(x,NA)
ss.pop[ss1==1,2] <- dd[ss1==1,3]
hd.pop <- impute.NN_HD(ss.pop)
hd.pop <- cbind(hd.pop,ss1,0,0,0)
uu1 <- target_foo(hd.pop[ss1!=1,2],tthr=thr,boost=c1)
uu2 <- target_foo(hd.pop[ss1!=1,2],tthr=thr,boost=c2)
uu3 <- target_foo(hd.pop[ss1!=1,2],tthr=thr,boost=c3)
hd.pop[ss1!=1,4] <- check_pik(ni[2]*uu1/sum(uu1),ni[2])
hd.pop[ss1!=1,5] <- check_pik(ni[2]*uu2/sum(uu2),ni[2])
hd.pop[ss1!=1,6] <- check_pik(ni[2]*uu3/sum(uu3),ni[2])
ss2 <- par_sample(hd.pop[,4])
ss2b <- par_sample(hd.pop[,5])
ss2c <- par_sample(hd.pop[,6])
# ss2 <- UPsampford(hd.pop[,4])
# ss2b <- UPsampford(hd.pop[,5])
hd.pop <- data.frame(hd.pop,ss2,ss2b,ss2c)
names(hd.pop) <- c('x','yhat1','ss1','pikk','pikb','pikc','ss2','ss2b','ss2c')
ssrs <- numeric(N)
ssrs[sample(N,n)] <- 1
list(cbind(hd.pop,ssrs,spips,pik0))
})
}
|
3f40d3ec9439e250dba0fa32736de1ea38485b67
|
0be682ee70c8c1f693030fc1d64238a2aecf2dca
|
/man/rm.col.Rd
|
dcd2c053f3382c5a538fad13ac518f390b48e853
|
[
"MIT"
] |
permissive
|
ahmeduncc/visdom-1
|
70c68306dd5f04a94d75c67d6404110f1def0501
|
b980b2bc9b79b6ae970ec07a42247b012f9b8241
|
refs/heads/master
| 2020-03-25T07:54:47.864245
| 2018-06-11T16:06:55
| 2018-06-11T16:06:55
| 143,587,878
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 226
|
rd
|
rm.col.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util-base.R
\name{rm.col}
\alias{rm.col}
\title{remove named, index, or logical columns, if present, from a data.frame}
\usage{
rm.col(df, cols)
}
|
23ee8384ff1f23a3e15f023fa965153ea39fd795
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.analytics/man/quicksight_describe_topic_refresh_schedule.Rd
|
d6a647599160e4fc7d8d722836b0229aa7b7671c
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 843
|
rd
|
quicksight_describe_topic_refresh_schedule.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quicksight_operations.R
\name{quicksight_describe_topic_refresh_schedule}
\alias{quicksight_describe_topic_refresh_schedule}
\title{Deletes a topic refresh schedule}
\usage{
quicksight_describe_topic_refresh_schedule(AwsAccountId, TopicId, DatasetId)
}
\arguments{
\item{AwsAccountId}{[required] The Amazon Web Services account ID.}
\item{TopicId}{[required] The ID of the topic that contains the refresh schedule that you want to
describe. This ID is unique per Amazon Web Services Region for each
Amazon Web Services account.}
\item{DatasetId}{[required] The ID of the dataset.}
}
\description{
Deletes a topic refresh schedule.
See \url{https://www.paws-r-sdk.com/docs/quicksight_describe_topic_refresh_schedule/} for full documentation.
}
\keyword{internal}
|
6423c1d864ed38526b176b8c681fca4b5289eb80
|
ff6198c86808f03b83d0476750f2ae79de2d9c85
|
/abcstats/man/dnn_grad.Rd
|
2c07a4a5efeee8fa698d3d4d0db7b9a429a5743b
|
[] |
no_license
|
snarles/abc
|
5b2c727fd308591be2d08461add2ae4e35c7a645
|
fefa42cf178fd40adca88966c187d0cd41d36dcb
|
refs/heads/master
| 2020-12-24T17:35:48.864649
| 2015-07-23T06:07:14
| 2015-07-23T06:07:14
| 39,470,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 400
|
rd
|
dnn_grad.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{dnn_grad}
\alias{dnn_grad}
\title{Comput the gradient given a DNN and input matrix x, target y}
\usage{
dnn_grad(Wbs, x, y)
}
\arguments{
\item{Wbs}{Weight matrics of DNN, in a list}
\item{x}{Input, n x p matrix}
}
\description{
Comput the gradient given a DNN and input matrix x, target y
}
|
b6dbd770d50908f59e708318aca2bfa81f4f9708
|
7137ce8e9a50e13328bb5bf455e18c94ed9b9bcb
|
/man/weekly_frequency_table.Rd
|
bf5a23d0ae1e230dd0931075e340b98faa048da3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
PatrickEslick/HASP
|
88ac4bc42408d6910dccc3e30531f627d6cb0a9e
|
98999b1d8435ac529353200bce368fbf729bcf74
|
refs/heads/master
| 2022-12-15T11:19:18.874970
| 2020-08-20T13:35:41
| 2020-08-20T13:35:41
| 287,529,519
| 0
| 0
|
NOASSERTION
| 2020-08-14T12:37:46
| 2020-08-14T12:37:46
| null |
UTF-8
|
R
| false
| true
| 1,137
|
rd
|
weekly_frequency_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frequency_analysis.R
\name{weekly_frequency_table}
\alias{weekly_frequency_table}
\title{Create a table of weekly frequency analysis}
\usage{
weekly_frequency_table(gw_level_dv, date_col, value_col, approved_col)
}
\arguments{
\item{gw_level_dv}{daily groundwater level data
from readNWISdv}
\item{date_col}{name of date column.}
\item{value_col}{name of value column.}
\item{approved_col}{name of column to get provisional/approved status.}
}
\value{
a data frame of weekly frequency analysis
}
\description{
The weekly frequency analysis is based on daily values
}
\examples{
# site <- "263819081585801"
p_code_dv <- "62610"
statCd <- "00001"
# gw_level_dv <- dataRetrieval::readNWISdv(site, p_code_dv, statCd = statCd)
gw_level_dv <- L2701_example_data$Daily
weekly_frequency <- weekly_frequency_table(gw_level_dv,
date_col = "Date",
value_col = "X_62610_00001",
approved_col = "X_62610_00001_cd")
head(weekly_frequency)
}
|
c3b8b49d24645b22e81a14b4024fcfe89dbef9a0
|
6e5efc0b6b6b37c735c1c773531c41b51675eb10
|
/man/CleanDataMatrix.Rd
|
84bd5b76e1658bee8a0fa4251f5ef8ed2aa8c686
|
[
"GPL-2.0-or-later"
] |
permissive
|
xia-lab/MetaboAnalystR
|
09aa09c9e57d7da7d73679f5a515eb68c4158e89
|
9edbbd1e2edda3e0796b65adf440ad827abb7beb
|
refs/heads/master
| 2023-08-10T06:08:56.194564
| 2023-08-01T15:13:15
| 2023-08-01T15:13:15
| 109,994,826
| 268
| 165
|
MIT
| 2023-03-02T16:33:42
| 2017-11-08T15:38:12
|
R
|
UTF-8
|
R
| false
| true
| 337
|
rd
|
CleanDataMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general_norm_utils.R
\name{CleanDataMatrix}
\alias{CleanDataMatrix}
\title{Clean the data matrix}
\usage{
CleanDataMatrix(ndata)
}
\arguments{
\item{ndata}{Input the data to be cleaned}
}
\description{
Function used in higher functinos to clean data matrix
}
|
b514b325e9583fe6e427c64121be39000ba70adc
|
2fa3917a2d7f6df6111c8e122f5fc2283622d7cf
|
/Source_code.r
|
8a0cdf0defbfa92a9346a97372d6e97c7fad9f87
|
[] |
no_license
|
kumarYashaswi/gs_quantify
|
5740aa28af07e270c11e25820803e56f0cb785b3
|
a5fe9b80e6f2593b8c7f1b18cb23105f4cf22d73
|
refs/heads/master
| 2021-09-01T06:40:45.385212
| 2017-12-25T12:11:17
| 2017-12-25T12:11:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,975
|
r
|
Source_code.r
|
setwd("H:/GS")
install.packages('randomForest')
library(randomForest)
train=read.csv("H:/GS/gcTrianingSet.csv")
test=read.csv("H:/GS/gcPredictionFile.csv")
View(train)
summary(train)
train$a = train$initialUsedMemory+train$initialFreeMemory
#total memory(initial used memory + initial free memory) before running a particular query)
train$b = train$finalUsedMemory+train$finalFreeMemory
#total memory(final used memory + final free memory) after running a particular query
summary(train$a)
df <-subset(train,train$gcRun=="TRUE",drop=TRUE)
#segregating that part of the dataset that has gcRun==TRUE
View(df)
mean(train$initialUsedMemory)
mean(df$initialUsedMemory)
df$c=((df$finalUsedMemory-df$initialUsedMemory)+(df$gcInitialMemory-df$gcFinalMemory) )
#a new variable that defines the amount of memory each query takes for those queries which gives TRUE for garbage collector
summary(df$c)
df1 <-subset(train,train$gcRun=="FALSE",drop=TRUE)
# A subset of those queries which give FALSE for garbage collector
View(df1)
df1$c=(-df1$initialUsedMemory+df1$finalUsedMemory)
#a new variable that defines the amount of memory each query takes for those queries which gives FALSE for garbage collector
summary(df1$c)
for(j in 1:2730)
{
if(train$gcRun[j]=="TRUE")
train$c[j]=(train$finalUsedMemory[j]-train$initialUsedMemory[j]+train$gcInitialMemory[j]-train$gcFinalMemory[j] )
else
train$c[j]=(train$finalUsedMemory[j]-train$initialUsedMemory[j] )
}
# a new variable that defines the amount of memory each query takes for all the training cases
summary(train$c)
summary(df$initialUsedMemory+df$c)
thres=min(df$initialUsedMemory+df$c)
# Threshold for classification of garbage collector ( thres is the minimum of the sum of initially used memory and the memory of each query )
View(test)
for(j in 1:1625)
{
tok <- subset(train,train$querytoken==test$querytoken[j],drop=TRUE)
test$c[j]=tok$c[1]
# In the subset of token( for each iteration tok contains only 1 query value and is used to determine best optimal value of query memory for a particular query ), only the initial values of each token is taken as best optimal query memory (since the tokens are initially used sequentially in the beggining, there would be less errors as we move down the dataset our unexplained error increases)
}
write.csv(df,"df.csv")
df2=read.csv("H:/GS/df.csv")
model1 <- randomForest(finalUsedMemory~cpuTimeTaken+c+initialUsedMemory,data=df,nodesize=18,ntree=55)
#random forest model applied for regression and final used memory as dependent variable with the suitable parameters and independent variables taken
for(j in 1:1624)
{
if((test1$initialUsedMemory[j]+test1$c[j])>thres)
{test1$gcRun[j]="TRUE"
#test1$initialUsedMemory[j+1]=predict(model1,newdata=data.matrix(test1[j,1:3]))
a=4.247186+2.774299
#test1$initialFreeMemory[j]= (a-test1$initialUsedMemory[j])
# test1$initialFreeMemory[j+1]=predict(model1,newdata=data.frame(test1[j,]))
test1$initialUsedMemory[j+1]=predict(model1,newdata=data.frame(test1[j,]))
#test1$initialUsedMemory[j+1]= (a-test1$initialFreeMemory[j+1])
}
else
{
test1$initialUsedMemory[j+1]=test1$initialUsedMemory[j]+test1$c[j]
test1$gcRun[j]="FALSE"
}
}
#whole for loop consist of sequentially transversing through testing data set and filling initialUsedMemory and gcRun using random forest model as regressor for initialUsedMemory and thres value as classifier
table(test1$gcRun)
#to measure precision and recall of our results
a=4.247186+2.774299
#assumption that our total memory of the container is initial Used value o+ intial Free Value of the testing dataset before any query was run
test1$initialFreeMemory= (a-test1$initialUsedMemory)
#determining Initial free memory before each query was run.
write.csv(test1,"ans.csv")
|
eb51b1ea097fec4c10d83db29c5d31f5f80aa3ad
|
d5bc0d93d48f79d9bc8055ee77d46b0dddd4f909
|
/RetiredVersions/FromSanghoon/backup/CSEA.RunCalculationsV2.4_scde.R
|
fd397b6cd3e9722e9dd8330a3d2307dd9838f85c
|
[] |
no_license
|
wangxlab/uniConSig
|
5bb1269291517553972f33b2f574b96df2023e2c
|
a46f5d8c219c98ac34f8c47b02dcb89276d7d6b9
|
refs/heads/master
| 2022-09-08T06:11:26.156033
| 2022-09-02T18:37:25
| 2022-09-02T18:37:25
| 142,683,021
| 1
| 2
| null | 2018-08-07T15:19:43
| 2018-07-28T14:18:57
|
R
|
UTF-8
|
R
| false
| false
| 4,033
|
r
|
CSEA.RunCalculationsV2.4_scde.R
|
#################################################################################
##required for all calculations
setwd("/zfs2/xiaosongwang/sal170/18_uniConSig_CSEA2/7-1_CSEAV2.3_newNorm_HSC_scSeq_UMI_nonUMI")
source("CSEA.modulesV2.4.R")
gmtfile="../ConceptDb/ConceptDb20190624.rmCa.gmt"
readCountGCTFile<-"../../19_SymSim/4_Dataset2ObservedCounts_test/GSE68981_SCSeq_FeatureCount_HumanGeneSymb_7744g177smp.gct"
clsFile<-"../scDataset/scData_quiescent77sVsActive100s_tabdelimit.cls"
targetListFile<-"../scDataset/LimmaOutput_newNorm_scData_ObsTPMlog2_QuiescVsActive_Down500g.txt"
output.dCSEA<-"dCSEAv2.3_Output_HallmarkC2CP_newNorm_scData_TrueTPMlog2_ByLimmaDw500g.txt"
output.wCSEAUp<-"wCSEAv2.4_UpOut_HallmarkC2CP_scde_scData_TrueTPMlog2_7447g_Q77s_A100s.txt"
output.wCSEADw<-"wCSEAv2.4_DownOut_HallmarkC2CP_scde_scData_TrueTPMlog2_7447g_Q77s_A100s.txt"
###############################################################################
##required for all calculations
compare.list=c(read_gmt("../PathwayDb/h.all.v6.2.symbols.gmt",min=10),read_gmt("../PathwayDb/c2.cp.v6.2.symbols.gmt",min=10))
feature.list=read_gmt(gmtfile,min=10)
feature.preCalfile=paste(gsub(".gmt","",gmtfile),".Ochiai.min10.preCal.gmt",sep="")
preCalmatrix<- as.matrix(unlist(readLines(feature.preCalfile)[-1]),col=1)
#######################################################################
###Perform dCSEA for scRNAseq
#######################################################################
####CalUniConSig for Cancer Gene
target.data<-read.table(targetListFile,stringsAsFactors=T,header=T,sep="\t",quote="") #15119 7
target.list<-target.data$Symbol
uniConSig=cal.uniConSig(target.list=target.list,feature.list=feature.list,preCalmatrix,minsize=10,weight.cut=0.05,power=1,root=1,ECNpenalty=0.5,method="Ochiai")
CSEA.result<-CSEA2(setNames(as.numeric(uniConSig$uniConSig), uniConSig$subjectID),compare.list,p.cut=0.05)
#write.table(CSEA.result,output.dCSEA,col.names=NA,row.names=T,sep="\t",quote=F)
#save.image("dCSEAv2.3_newNorm_scData_TrueTPMlog2_byLimmaDw500g.RData")
## PathwayAssociation
PathwayAssociationOut <- pathwayAssociation(topPathway=CSEA.result$Compare.List[1:20],compare.list,feature.list,preCalmatrix,minsize=10)
#write.table(PathwayAssociationOut ,output.up.assoc,col.names=NA,row.names=T,sep="\t",quote=F)
#######################################################################
###Perform wCSEA for scRNAseq
#######################################################################
compare.list=c(read_gmt("../PathwayDb/h.all.v6.2.symbols.gmt",min=5),read_gmt("../PathwayDb/c2.cp.v6.2.symbols.gmt",min=5))
scdeOut<-scdeDEG(readCountGCT=readCountGCTFile,clsFile=clsFile, min.lib.size=100,
min.reads=1,min.detected=1,SignPzscore=0.9) # input should be feature count data # this takes about 4 minutes.
#write.table(scdeOut,"scdeOut_HSCscSEQ_TrueReadCount_7447g_Quies77s_Active100s.txt", col.names=NA,quote=F,sep="\t")
weight=scdeOut$Signed.P.Zscore
names(weight)=rownames(scdeOut)
ks.result=run.weightedKS(weight,signed=T,feature.list,minsize=10,correct.overfit=FALSE,correct.outlier=TRUE) # correct.overfit=FALSE is default
uniConSig.result=cal.uniConSig.ks(up.ks=ks.result[[1]],down.ks=ks.result[[2]],preCalmatrix,feature.list,outfile,p.cut=0.01,q.cut=0.25,NES.cut=0,power=1,root=1,ECNpenalty=0.5,correct.overfit=FALSE) #correct.overfit=FALSE is default
up.CSEA.result<-CSEA2(target.score=setNames(as.numeric(uniConSig.result$up.uniConSig), uniConSig.result$subjectID),compare.list,p.cut=0.05,minsize=5)
write.table(up.CSEA.result, output.wCSEAUp,col.names=NA,row.names=T,sep="\t",quote=F)
down.CSEA.result<-CSEA2(target.score=setNames(as.numeric(uniConSig.result$down.uniConSig), uniConSig.result$subjectID),compare.list,p.cut=0.05,minsize=5)
write.table(down.CSEA.result,output.wCSEADw,col.names=NA,row.names=T,sep="\t",quote=F)
save.image("wCSEAv2.4_BySCDE_TrueTPMlog2.RData")
#load("wCSEAv2.3_BT20_FusionPositive9s_vs_Vector3s_TPMlog2.RData")
|
7e0927570410998cf8e974c1886a75d6b07575c3
|
cabe564fcd104530331e8025caddc6e449cb0024
|
/visualisation_PIMH_accept.R
|
32bc694792f57928f83a889f32c5a22e418e4fc9
|
[] |
no_license
|
EllaKaye/PMCMC
|
8c32dd53322679856571926d0ae902ab1cb6f0a0
|
8001bf859631b28607968c5107e47df636f94f4f
|
refs/heads/master
| 2016-08-12T23:59:06.333181
| 2015-11-19T23:21:45
| 2015-11-19T23:21:45
| 46,049,705
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,515
|
r
|
visualisation_PIMH_accept.R
|
library(ggplot2)
library(Rcpp)
source("data_generation.R")
source("PIMH_fast.R")
sourceCpp("SMC_fast.cpp")
# set up N and T
num_particles <- c(10, 50, 100, 200, 500, 1000, 2000)
np <- length(num_particles)
Time <- c(10, 25, 50, 100)
create_plot = function(sv, sw, Time, num_particles, iters){
res <- numeric(0)
for (t in Time) {
set.seed(1)
data <- generate_model2(t, sv = sv, sw = sw)
Y <- data$Y
for (n in num_particles) {
s <- PIMH_fast(n, Y, sv, sw, iters=iters)
res <- c(res, s$accept)
cat("n =", n, ", t =", t, "\n")
}
}
df <- data.frame(N = num_particles, T = as.factor(rep(Time, each = np)), accept = res)
return(df)
}
# case 1, sv^2 = 10, sw^2 = 10
df1 = create_plot(sqrt(10), sqrt(10), Time, num_particles, iters=10000)
# case 2, sv^2 = 10, sw^2 = 1
df2 = create_plot(sqrt(10), sqrt(1), Time, num_particles, iters=10000)
# save the results
save(df1, df2, file="data/accept_PIMH.RData")
# join the two data frames and add variance column for facetting
df <- rbind(cbind(df1, variance = "(a)"),
cbind(df2, variance = "(b)"))
# MAKE ANY CHANGES TO PLOT APPEARANCE AS YOU SEE FIT.
p.all <- ggplot(df, aes(N, accept)) +
geom_line(aes(group=T, colour=T)) +
geom_point(aes(group=T, colour=T, shape=T)) +
facet_grid(.~variance) +
ylim(0, 1) + ylab("Acceptance rate") +
xlab("Number of particles") +
theme_bw() +
scale_color_brewer(palette="Spectral")
pdf("fig/accept_pimh.pdf", width = 8, height = 3.5)
p.all
dev.off()
|
4e525877c7e489050f92f3476e231a7b34361303
|
7aecf9b42ada4437f3c77f66d10a69b187f9f8a2
|
/man/rad_decoding_haplotypes.Rd
|
ad497a32f3fa4c59235181234b27939863bce66a
|
[] |
no_license
|
Maschette/radiator
|
d7001f1117c09304aa703e79221b9fd3fe6916df
|
a323f6b6e15d66e92921db77549e9b69deda7d97
|
refs/heads/master
| 2020-04-17T02:20:02.883433
| 2019-01-17T05:39:27
| 2019-01-17T05:40:55
| 166,129,313
| 0
| 0
| null | 2019-01-16T23:49:41
| 2019-01-16T23:49:41
| null |
UTF-8
|
R
| false
| true
| 380
|
rd
|
rad_decoding_haplotypes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/radiator_imputations_module.R
\name{rad_decoding_haplotypes}
\alias{rad_decoding_haplotypes}
\title{rad_decoding_haplotypes}
\usage{
rad_decoding_haplotypes(data = NULL,
parallel.core = parallel::detectCores() - 1)
}
\description{
separate snp group merged with rad_encoding_snp
}
\keyword{internal}
|
45c518f30119e6da7a6d174668e82d10c6daf94c
|
5937b9ed1bd24578c5ee43e431eeda91d2c6b83d
|
/src/functions.R
|
1e367da7abecdc696276002a146ad088da61ea5d
|
[
"BSD-2-Clause"
] |
permissive
|
orionzhou/epi
|
44c2a517b92ace321eda804e6fd616a92a4cd2fa
|
d6990f435074f108c3bdf39c17e9f59aaf0922a3
|
refs/heads/master
| 2023-03-07T05:17:21.004116
| 2021-02-21T09:05:18
| 2021-02-21T09:05:18
| 336,761,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 889
|
r
|
functions.R
|
#{{{ load & read
require(devtools)
require(GenomicFeatures)
load_all('~/git/rmaize')
require(progress)
require(ape)
require(ggtree)
require(ggforce)
require(Rtsne)
require(ggpubr)
require(lubridate)
options(dplyr.summarise.inform = F)
dirg = '~/data/genome'
dirp = "~/projects/epi"
dird = file.path(dirp, 'data')
dirr = '~/projects/stress/nf/raw'
dirf = file.path(dird, '95_figures', 'plots')
gcfg = read_genome_conf(genome='Osativa')
cols100 = colorRampPalette(rev(brewer.pal(n = 6, name = "RdYlBu")))(100)
cols100v = viridis_pal(direction=-1,option='magma')(100)
colbright <- function(col) {x = col2rgb(col); as.integer(.2126*x[1] + .7152*x[2] + .0722*x[3])}
cols36 = c(pal_ucscgb()(18)[8], pal_igv()(18), pal_ucscgb()(18)[c(1:7,9:18)])
brights36 = tibble(col=cols36) %>% mutate(bright=map_int(col,colbright)) %>%
mutate(b = ifelse(bright<128, 'white','black')) %>% pull(b)
#}}}
|
d74c2599bc8939c9aea6db123871eff99952712c
|
83d93f6ff2117031ba77d8ad3aaa78e099657ef6
|
/man/gbasicdialog.Rd
|
5570305f8b390454bb42b54b3efe4fe8a8ed8175
|
[] |
no_license
|
cran/gWidgets2
|
64733a0c4aced80a9722c82fcf7b5e2115940a63
|
831a9e6ac72496da26bbfd7da701b0ead544dcc1
|
refs/heads/master
| 2022-02-15T20:12:02.313167
| 2022-01-10T20:12:41
| 2022-01-10T20:12:41
| 17,696,220
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,263
|
rd
|
gbasicdialog.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dialogs.R
\name{gbasicdialog}
\alias{gbasicdialog}
\alias{.gbasicdialog}
\alias{visible.GBasicDialog}
\alias{dispose.GBasicDialog}
\title{Constructor for modal dialog that can contain an arbitrary widget}
\usage{
gbasicdialog(
title = "Dialog",
parent = NULL,
do.buttons = TRUE,
handler = NULL,
action = NULL,
...,
toolkit = guiToolkit()
)
.gbasicdialog(
toolkit,
title = "Dialog",
parent = NULL,
do.buttons = TRUE,
handler = NULL,
action = NULL,
...
)
\method{visible}{GBasicDialog}(obj, ...)
\method{dispose}{GBasicDialog}(obj, ...)
}
\arguments{
\item{title}{title for window}
\item{parent}{parent to display by}
\item{do.buttons}{FALSE to suppress buttons when no parent}
\item{handler}{handler called when \code{Ok} button invoked}
\item{action}{passed to handler for OK button}
\item{...}{ignored}
\item{toolkit}{toolkit}
\item{obj}{dialog object}
}
\value{
A \code{GBasicDialog} instance with a visible method
logical indicating which button was pushed (or TRUE if no buttons present)
}
\description{
The basic dialog is basically a modal window. To use there is a 3
step process: 1) Create a container by calling this constructor,
say \code{dlg}; 2) use \code{dlg} as a container for your
subsequent GUI; 3) set the dialog to be modal by calling
\code{visible(dlg)}. (One can't call \code{visible(dlg)
<- TRUE}.)
We overrided the basic use of \code{visible} for the
\code{gbasicdialog} container to have it become visible and modal
after this call. The better suited call \code{visible(dlg) <-
TRUE} does not work as wanted, for we want to capture the return
value.
dispose method for a basic dialog
}
\examples{
\dontrun{
## a modal dialog for editing a data frme
fix_df <- function(DF, ...) {
dfname <- deparse(substitute(DF))
w <- gbasicdialog(..., handler=function(h,...) {
assign(dfname, df[,], .GlobalEnv)
})
g <- ggroup(cont=w, horizontal=FALSE)
glabel("Edit a data frame", cont=g)
df <- gdf(DF, cont=g, expand=TRUE)
size(w) <- c(400, 400)
out <- visible(w)
}
m <- mtcars[1:3, 1:4]
fix_df(m)
}
}
\seealso{
\code{\link{gmessage}}, \code{\link{gconfirm}},
\code{\link{gbasicdialog}}, \code{\link{galert}}
}
|
09834011263d1340cd36ac84f08df24a78cabcaa
|
31bffc353b627342a401fc7738c4788a86dff53d
|
/old/Create_populate_yearRaster_stackDirectories.R
|
6b4dd8a6a053f60455fc7c4e0ce9801d07d85657
|
[] |
no_license
|
cboisvenue/RCodeSK
|
93e1c60430a73235e1ae496cbec0b403934032b2
|
7a6ea80cb432a054bb72fe3333820f0ad1fa4788
|
refs/heads/master
| 2021-11-28T10:23:21.137462
| 2021-11-25T18:17:09
| 2021-11-25T18:17:09
| 42,564,043
| 2
| 1
| null | 2015-11-24T21:03:14
| 2015-09-16T04:14:12
|
R
|
UTF-8
|
R
| false
| false
| 4,207
|
r
|
Create_populate_yearRaster_stackDirectories.R
|
#----------------------------------------------
# Creation of Directories by year with all rasters required as predictor variables for biomass spreading
# This script:
# 1-Creates year rasters for clipped study area (i.e.Crop30_PaFMA.tif)
# 2-Creates by-year directory structure (should only need to be done once)
# 3-Imports and renames clipped landsat images into the by-year directory structure
# Bsmiley
# May 14, 2015
#-----------------------------------------------------------
require(sp)
require(rgdal)
require(raster)
require(rgeos)
require(parallel)
#File Locations-----------------------------------------------------------------------------------
timeINvariant = "H:/saskatchewan/spatialGrowth/timeInvariantRasters/"
years <- (1984:2012)
timeVARIANT = paste("H:/saskatchewan/spatialGrowth/timeVariantRasters/", years[1:29],"/",sep="")
#-----Create and export 'YEAR' raster for stack for each year and insert into newly created
# directory labelled for each year---------------------------------------------------------------------
#Add in study area raster (pixel values equal to 1)
setwd("H:/Saskatchewan/bsmiley_work/Sask/Sask_area_datasets")
StudyArea <- raster("Crop30_PaFMA.tif") # raster where forestDistrict study area pixels =1
#Function to multiple study area raster by each year values
# (e.g. for each pixel, 1X 1984 = 1984 pixel value)
create.year<-function(x,b) {
x <- x * b
return(x)
}
# Mutlicore lapply to multiple year by study area raster
year.ras <- mclapply(years,create.year,StudyArea)
#create.year
#Stack year rasters for export
year.stack <- stack(year.ras)
#Create 'Year' (1984-2012) directories (SHOULD ONLY NEED TO DO THIS ONCE)-----------------
for (i in 1:length(years)) {
dir.create(paste("H:/Saskatchewan/Biomass_Growth/Time_Variant_rasters/", years[i], sep=""))
}
# End of create directories---------------------------------------------------------------
#Export each raster into its designated year directory------------------------------------
writeRaster(year.stack, file.path(paste(timeVARIANT, years,"/",sep="")),
format='GTiff', bylayer=TRUE, datatype='INT2U', overwrite=TRUE)
#End of year raster/directory creation-----------------------------------------------------------------
# Add clipped LANDSAT rasters to folder structure------------------------------------------------------
#Create lists for each band stack
indir <- "H:/Saskatchewan/clipped"
b1.list <- list.files(paste(indir), pattern="c1.tif$", full.names=FALSE)
b2.list <- list.files(paste(indir), pattern="c2.tif$", full.names=FALSE)
b3.list <- list.files(paste(indir), pattern="c3.tif$", full.names=FALSE)
b4.list <- list.files(paste(indir), pattern="c4.tif$", full.names=FALSE)
b5.list <- list.files(paste(indir), pattern="c5.tif$", full.names=FALSE)
b6.list <- list.files(paste(indir), pattern="c6.tif$", full.names=FALSE)
#Create stacks of each band
setwd(paste(indir))
b1_stack <- stack(b1.list)
b2_stack <- stack(b2.list)
b3_stack <- stack(b3.list)
b4_stack <- stack(b4.list)
b5_stack <- stack(b5.list)
b6_stack <- stack(b6.list)
#select rasters from the stack that end in b1 ---> export, b2, export, etc.
#Export each raster into its designated year directory
writeRaster(b1_stack, file.path(paste(timeVARIANT,"/",sep=""), "b1"),
format='GTiff', bylayer=TRUE, datatype='INT2U', overwrite=TRUE)
writeRaster(b2_stack, file.path(paste(timeVARIANT,"/",sep=""), "b2"),
format='GTiff', bylayer=TRUE, datatype='INT2U', overwrite=TRUE)
writeRaster(b3_stack, file.path(paste(timeVARIANT,"/",sep=""), "b3"),
format='GTiff', bylayer=TRUE, datatype='INT2U', overwrite=TRUE)
writeRaster(b4_stack, file.path(paste(timeVARIANT,"/",sep=""), "b4"),
format='GTiff', bylayer=TRUE, datatype='INT2U', overwrite=TRUE)
writeRaster(b5_stack, file.path(paste(timeVARIANT,"/",sep=""), "b5"),
format='GTiff', bylayer=TRUE, datatype='INT2U', overwrite=TRUE)
writeRaster(b6_stack, file.path(paste(timeVARIANT,"/",sep=""), "b6"),
format='GTiff', bylayer=TRUE, datatype='INT2U', overwrite=TRUE)
#End of addition of clipped rasters to 'Year' directories----------------------------------------------
|
413d0ed4dc27332230161bd2ce0f2505b04944c0
|
a7972df8b00072ee0717bffc523cb9b6f7c96469
|
/man/ggplot_colors.Rd
|
79bbbbc5b3da9553a53b3ead32b4793d65c7ea27
|
[] |
no_license
|
suzanbaert/analysistools
|
7b897fe87b2642e515a3b2e3b0da446d84389aa3
|
10154c9213799f74a1b7ac3efb95442492b40aad
|
refs/heads/master
| 2020-03-22T07:52:34.116960
| 2018-07-12T16:13:59
| 2018-07-12T16:14:42
| 139,730,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 511
|
rd
|
ggplot_colors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{ggplot_colors}
\alias{ggplot_colors}
\title{Generate ggplot 2 colours}
\usage{
ggplot_colors(n)
}
\arguments{
\item{n}{number of colours}
}
\description{
Generates the ggplot2 colours so they can be used in other plots. Solution taken from https://stackoverflow.com/questions/8197559/emulate-ggplot2-default-color-palette/8197703#8197703 and added to this package for easy personal use.
}
\examples{
ggplot_colors(3)
}
|
5b78d15d3929ec98312a3825e28a05f1e6d685be
|
7fe73397caa9f53c04909a027c779c569a9d49ab
|
/scripts/functions_scripts.R
|
2c20db8ef201635e5b69d4637f5a61c7881f903f
|
[
"MIT"
] |
permissive
|
NCRivera/CIS-627-Big-Data-Analytics-Capstone
|
84687475f47d00302439aa1a8c7d93164a32254a
|
d38772b90b4b3e1e6fd25afedc1b37e5504e99b6
|
refs/heads/main
| 2023-05-31T09:56:40.353713
| 2021-06-24T01:20:21
| 2021-06-24T01:20:21
| 369,005,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,475
|
r
|
functions_scripts.R
|
library(tidyverse)
get_state_data <- function(state = "US"){
api_key <- "82fc4cc0c0ba42608845036022e0975b"
if (state == "US") {
api_data <- read.csv(file = str_glue("https://api.covidactnow.org/v2/states.timeseries.csv?apiKey={api_key}")) %>% tibble(.name_repair = janitor::make_clean_names)
} else if (state != "US") {
api_data <- read.csv(file = str_glue("https://api.covidactnow.org/v2/state/{state}.timeseries.csv?apiKey={api_key}")) %>% tibble(.name_repair = janitor::make_clean_names)
}
return(api_data)
}
get_county_data <- function(cnty = "Miami-Dade", state = "FL"){
api_key <- "82fc4cc0c0ba42608845036022e0975b"
if (is.null(cnty) & is.null(state)) {
api_data <- read.csv(file = str_glue("https://api.covidactnow.org/v2/counties.timeseries.csv?apiKey={api_key}")) %>% tibble(.name_repair = janitor::make_clean_names)
} else if (is.null(cnty) & !is.null(state)) {
api_data <- read.csv(file = str_glue("https://api.covidactnow.org/v2/county/{state}.timeseries.csv?apiKey={api_key}")) %>% tibble(.name_repair = janitor::make_clean_names)
} else if (!is.null(cnty) & !is.null(state)) {
api_data <- read.csv(file = str_glue("https://api.covidactnow.org/v2/county/{state}.timeseries.csv?apiKey={api_key}")) %>%
tibble(.name_repair = janitor::make_clean_names) %>%
filter(str_detect(string = county, pattern = cnty))
}
return(api_data)
}
|
796cdf15d1df34c209cc573fbf1a2f701a5cdab0
|
77247f728372ac1d43582a3f2b0671b86bbf742d
|
/DoubleSampling/Ex7.2/solution.R
|
cdf588226b87467cf2a9112afa5a03f4b6f280cb
|
[] |
no_license
|
szcf-weiya/SamplingTechniques
|
e48f4f59e13567ecf73792997987af4afdde0379
|
9945424c8cd6b2bc297b1f6433fc8a48263f87b0
|
refs/heads/master
| 2021-01-20T15:13:25.710328
| 2017-06-07T06:06:39
| 2017-06-07T06:06:39
| 90,738,997
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
r
|
solution.R
|
x = c(1500, 1200, 2000, 1800, 1300, 3000, 800, 1400, 1600, 1100)
y = c(2000, 1800, 2800, 2500, 1900, 5800, 1300, 2000, 2300, 1600)
ybar = mean(y)
xbar = mean(x)
Rhat = ybar/xbar
xbar.hat = 1500
nhat = 100
n = 10
sy2 = var(y)
sx2 = var(x)
syx = cov(x, y)
y.RD = Rhat*xbar.hat
y.RD.var = sy2/n + (1/n - 1/nhat)*(Rhat^2*sx2 - 2*Rhat*syx)
y.RD.s = sqrt(y.RD.var)
|
e9ebbbc4e42e387288e07c7f836df7b91dddf276
|
3ba9f835e5b7b94144aa3a7bc6bf2522b63934af
|
/data/surveySample.R
|
7bca5e0d373dbfa2fdc24740d34b7c5bf6495d00
|
[] |
no_license
|
asRodelgo/shinyProductivity
|
1672cf4a7d4f5803a0dcdc7e1d867f62fc336c16
|
17148231d0b1ef21c33f9319e3881380273bbbcd
|
refs/heads/master
| 2020-04-16T13:53:01.302503
| 2016-10-26T21:16:20
| 2016-10-26T21:16:20
| 52,633,279
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,380
|
r
|
surveySample.R
|
library(foreign) # read/write Stata (.dta) files
library(readstata13) # read data in version 13 of Stata
library(survey)
library(dplyr)
data <- read.dta13("data/TFPR_and_ratios.dta")
# stratified survey for non-tfp indicators (id=~1 as this is stratified sampling with no clusters)
# At country level, strata don't play any role and the stratified sample mean is equivalent
# to weighted mean.
cou <- "Argentina2010"
sect <- "Manufacturing"
variable <- "n2a_d2"#"d2_l1"
isic_code <- 19
dataForSampling <- data %>%
filter(country == cou & sector_MS == sect & isic == isic_code) %>%
select(idstd,wt,strata,var = one_of(variable))
# dataForSampling <- dataForSampling %>%
# group_by(isic) %>%
# mutate(meanS = mean(var,na.rm=TRUE))
# no clusters (id=~1)
myDesign <- svydesign(id=~1,data=dataForSampling,weights=~wt,strata=~strata)
# avoid errors as some strata contain only 1 PSU (primary sample unit)
options(survey.lonely.psu = "certainty")
# calculate means by the interaction of sector and isic
svymean(~var,myDesign,na.rm=TRUE)
# compare with weighted mean
weighted.mean(dataForSampling$var,dataForSampling$wt,na.rm=TRUE)
# data(api)
#
# ## one-stage cluster sample
# dclus1<-svydesign(id=~dnum, weights=~pw, data=apiclus1, fpc=~fpc)
#
# svymean(~api00, dclus1, deff=TRUE)
# svymean(~factor(stype),dclus1)
# svymean(~interaction(stype, comp.imp), dclus1)
#
|
e7ebff4b2f916bdf13920cbdd317a54a6adddc17
|
e9f162d2bc3a8f0fe0f636310f3f7d17ba55268d
|
/CPD_analysis.R
|
6de7675a0653718177b11221e0e645d0af145c80
|
[] |
no_license
|
josieparis/guppy-colour-polymorphism
|
bad711036b13e8ec197209c6b11dc161fdfa23fc
|
799d43b40d32a431055171d4f95c9d784b58778d
|
refs/heads/main
| 2023-04-07T02:49:20.884488
| 2022-07-06T16:03:05
| 2022-07-06T16:03:05
| 379,895,857
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,948
|
r
|
CPD_analysis.R
|
# new general run
rm(list=ls()) #clears all variables
objects() # clear all objects
graphics.off() #close all figures
#install.packages("changepoint")
lib<-as.vector(c("changepoint","data.table","devtools","rlang","tidyr","ggplot2"))
lapply(lib,library,character.only=TRUE)
## working directory
setwd("~/Dropbox/Sussex_Guppies/Analyses/pool-seq/change-point/")
### Read in the chr12 freqs
### from here:
chr12 <- data.frame(fread("~/Dropbox/Sussex_Guppies/Analyses/pool-seq/poolfstat/outputs/final/chr12_AFs_final.tsv", header=TRUE))
## Polarise to IF9:
## remove pos
tmp <- chr12 %>% select(IF10_AF, IF6_AF, IF8_AF, IF9_AF)
## save the IF9 freqs:
IF9_freqs<-tmp$IF9_AF
for(i in colnames(tmp)){
tmp[IF9_freqs < 0.5,i]<-(1-tmp[IF9_freqs < 0.5,i])
}
## add IF9 polarised back in back in:
chr12_dd <- cbind(chr12,tmp)
colnames(chr12_dd) <- c("chr", "pos", "REF", "ALT", "IF10_AF", "IF6_AF", "IF8_AF", "IF9_AF",
"IF10_polar", "IF6_polar", "IF8_polar", "IF9_polar")
## take out polarised
chr12 <- chr12_dd %>% select(pos,IF10_polar,IF9_polar,IF8_polar,IF6_polar)
## Perform CPD
IF6_chr12 <- chr12 %>% select(IF6_polar)
IF6_chr12 <- as.numeric(IF6_chr12$IF6_polar)
IF8_chr12 <- chr12 %>% select(IF8_polar)
IF8_chr12 <- as.numeric(IF8_chr12$IF8_polar)
IF9_chr12 <- chr12 %>% select(IF9_polar)
IF9_chr12 <- as.numeric(IF9_chr12$IF9_polar)
IF10_chr12 <- chr12 %>% select(IF10_polar)
IF10_chr12 <- as.numeric(IF10_chr12$IF10_polar)
# change in mean allele frequencies
IF6mean=cpt.mean(IF6_chr12, method = "BinSeg", penalty = "SIC", Q=10)
# change in mean allele frequencies
IF8mean=cpt.mean(IF8_chr12, method = "BinSeg", penalty = "SIC", Q=10)
# change in mean allele frequencies
IF9mean=cpt.mean(IF9_chr12, method = "BinSeg", penalty = "SIC", Q=10)
# change in mean allele frequencies
IF10mean=cpt.mean(IF10_chr12, method = "BinSeg", penalty = "SIC", Q=10)
IF6mean@cpts
IF8mean@cpts
IF9mean@cpts
IF10mean@cpts
|
1e3f2d2b675e24cded2a454e0ed8b4a35475b7f4
|
4f67e136275682b7262174c9bb16cca82cba9ed0
|
/modelBuilder.R
|
529d31facd49ff8d8ab589e2dc750162a3290c99
|
[] |
no_license
|
julienfbeaulieu/nextWord
|
3c164dc927cf161e2c1cc4b71a241355a1f0da1c
|
637949f1a2e55c60eff3dd9ff07ea9e3d299ee24
|
refs/heads/master
| 2021-01-01T05:10:05.183259
| 2016-04-14T12:35:51
| 2016-04-14T12:35:51
| 56,174,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,554
|
r
|
modelBuilder.R
|
setwd("C:/Users/Julien/Documents/GitHub/nextWord/Model construct")
##input data
ngram.1 <- read.csv("ngram.1.trunc.csv")
ngram.2 <- read.csv("ngram.2.trunc.csv")
ngram.3 <- read.csv("ngram.3.trunc.csv")
ngram.4 <- read.csv("ngram.4.trunc.csv")
testSet.6 <- read.csv("testSet.6.csv")
##Evaluation models
unigramEval <- function(ngramSet) {
library(stringr)
unigram <- word(ngramSet,-1)
history.freq <- sum(ngram.1$Frequency)
unigram.freq <- ngram.1[match(unigram,ngram.1$Ngram), "Frequency"]
MLE <- unigram.freq/history.freq
MLE[!is.finite(MLE)] <- 0
return(MLE)
}
bigramEval <- function(ngramSet) {
library(stringr)
history <- word(ngramSet,-2,-2)
bigram <- word(ngramSet,-2,-1)
history.freq <- ngram.1[match(history,ngram.1$Ngram),"Frequency"]
bigram.freq <- ngram.2[match(bigram,ngram.2$Ngram), "Frequency"]
MLE <- bigram.freq/history.freq
MLE[!is.finite(MLE)] <- 0
return(MLE)
}
trigramEval <- function(ngramSet) {
library(stringr)
history <- word(ngramSet,-3,-2)
trigram <- word(ngramSet,-3,-1)
history.freq <- ngram.2[match(history,ngram.2$Ngram),"Frequency"]
trigram.freq <- ngram.3[match(trigram,ngram.3$Ngram), "Frequency"]
MLE <- trigram.freq/history.freq
MLE[!is.finite(MLE)] <- 0
return(MLE)
}
quadrigramEval <- function(ngramSet) {
library(stringr)
history <- word(ngramSet,-4,-2)
quadrigram <- word(ngramSet,-4,-1)
history.freq <- ngram.3[match(history,ngram.3$Ngram),"Frequency"]
quadrigram.freq <- ngram.4[match(quadrigram,ngram.4$Ngram), "Frequency"]
MLE <- quadrigram.freq/history.freq
MLE[!is.finite(MLE)] <- 0
return(MLE)
}
quadrigramEval <- function(ngramSet) {
library(stringr)
history <- word(ngramSet,-4,-2)
quadrigram <- word(ngramSet,-4,-1)
history.freq <- ngram.3[match(history,ngram.3$Ngram),"Frequency"]
quadrigram.freq <- ngram.4[match(quadrigram,ngram.4$Ngram), "Frequency"]
MLE <- quadrigram.freq/history.freq
MLE[!is.finite(MLE)] <- 0
return(MLE)
}
pentagramEval <- function(ngramSet) {
library(stringr)
history <- word(ngramSet,-5,-2)
pentagram <- word(ngramSet,-5,-1)
history.freq <- ngram.4[match(history,ngram.4$Ngram),"Frequency"]
pentagram.freq <- ngram.5[match(pentagram,ngram.5$Ngram), "Frequency"]
MLE <- pentagram.freq/history.freq
MLE[!is.finite(MLE)] <- 0
return(MLE)
}
hexagramEval <- function(ngramSet) {
library(stringr)
history <- word(ngramSet,-6,-2)
hexagram <- word(ngramSet,-6,-1)
history.freq <- ngram.5[match(history,ngram.5$Ngram),"Frequency"]
hexagram.freq <- ngram.6[match(hexagram,ngram.6$Ngram), "Frequency"]
MLE <- hexagram.freq/history.freq
MLE[!is.finite(MLE)] <- 0
return(MLE)
}
##Accuracy function
checkAccuracy <- function(predictions,lastWord) {
###top1 accuracy
top1Accuracy <- sum(predictions[,1] == lastWord)/ (dim(predictions)[1])
###top3 accuracy
top3Accuracy <- (sum(predictions[,1] == lastWord)+
sum(predictions[,2] == lastWord)+
sum(predictions[,3] == lastWord))/ (dim(predictions)[1])
Accuracy <- cbind(top1Accuracy,top3Accuracy)
return(Accuracy)
}
##Prediction model
###Test set
set.seed(456)
testSet.sample <- sample(testSet.6$Ngram,1000)
library(stringr)
history.6 <- word(testSet.sample,1,5)
lastWord.6 <- word(testSet.sample,-1)
###Prediction function
InterpolatedPred3 <- function(history, lambda = rep(1/4,4)) {
##Construct candidate list, based on bigrams
library(stringr)
candidateTest <- word(history,-1)
candidateTest.regex <- paste0("^",candidateTest," ")
candidates <- word(ngram.2[grepl(candidateTest.regex,ngram.2$Ngram),"Ngram"],-1)
##Add few common candidates if list is too short
if (length(candidates) < 3) {
candidates <- c(candidates,"the","to","and")
}
##construct solutions
solutions <- paste(history,candidates)
##Ask individual ngram models
mle <- data.frame(unigramEval(solutions),
bigramEval(solutions),
trigramEval(solutions),
quadrigramEval(solutions))
##Combine answer
mle.IM <- as.matrix(mle) %*% (lambda)
reorder <- sort(mle.IM,decreasing = TRUE,index.return = TRUE)
top3 <- as.character(candidates[reorder$ix[1:3]])
top3.MLE<- as.numeric(reorder$x[1:3])
return(top3)
}
##make predictions and record time
ptm <- proc.time()
predictions3 <- t(sapply(history.6,InterpolatedPred3))
time3 <- (proc.time() - ptm)
##check accuracy
accuracy3 <- checkAccuracy(predictions3,lastWord.6)
##check space allocation
ngram.size <- c(as.numeric(object.size(ngram.1)),
as.numeric(object.size(ngram.2)),
as.numeric(object.size(ngram.3)),
as.numeric(object.size(ngram.4)))
Eval.size <- c(as.numeric(object.size(unigramEval)),
as.numeric(object.size(bigramEval)),
as.numeric(object.size(trigramEval)),
as.numeric(object.size(quadrigramEval)),
as.numeric(object.size(InterpolatedPred3)))
##write records
write.csv(predictions3,"pred3.csv")
write.csv(accuracy3, "accuracy3.csv")
write.table(as.matrix(time3),"time3.txt")
write.table(ngram.size,"ngram.size.txt")
write.table(Eval.size,"Eval.size.txt")
##Test wordclouds
###Prediction function which returns full candidate list
InterpolatedPred4 <- function(history, lambda = rep(1/4,4)) {
##Construct candidate list, based on bigrams
library(stringr)
candidateTest <- word(history,-1)
candidateTest.regex <- paste0("^",candidateTest," ")
candidates <- word(ngram.2[grepl(candidateTest.regex,ngram.2$Ngram),"Ngram"],-1)
##Add few common candidates if list is too short
if (length(candidates) < 3) {
candidates <- c(candidates,"the","to","and")
}
##construct solutions
solutions <- paste(history,candidates)
##Ask individual ngram models
mle <- data.frame(unigramEval(solutions),
bigramEval(solutions),
trigramEval(solutions),
quadrigramEval(solutions))
##Combine answer
likelihood <- as.matrix(mle) %*% (lambda)
predictions <- data.frame(candidates,likelihood)
predictions <- predictions[order(predictions$likelihood, decreasing = TRUE),]
return(predictions)
}
###Wordcloud function
makeWordCloud <- function(candidates, likelihood){
frequency <- floor(1000*likelihood) ##convert likelihood to counts of words
library(wordcloud)
wordcloud(candidates,frequency, min.freq = 2)
}
|
04cb762288b23926e50c988125bef60a693a2793
|
6dfa40f0b4ca611b22562ab4b8561a4a2a6929d7
|
/man/blbcoef.Rd
|
1b3a7be2b641198a8b2e7da5b671c6e77c50d857
|
[
"MIT"
] |
permissive
|
McChickenNuggets/blblm
|
28566d8b5c0943ecf5d771fbccb6d47947bd24b3
|
3ff530a0da028624d005bf9259cf0d841b8b54ba
|
refs/heads/master
| 2023-03-22T02:01:42.557457
| 2021-03-14T10:10:49
| 2021-03-14T10:10:49
| 347,285,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 299
|
rd
|
blbcoef.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blbcoef.R
\name{blbcoef}
\alias{blbcoef}
\title{compute the coefficients from fit}
\usage{
blbcoef(fit)
}
\arguments{
\item{fit}{objects}
}
\value{
list of coefficients
}
\description{
compute the coefficients from fit
}
|
a6722ed9a389cd53a5a652955b38148d159a8e0e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phyloclim/examples/niche.overlap.Rd.R
|
dd66b072a2b042b758e9c4ccdc3841035fe7149f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 385
|
r
|
niche.overlap.Rd.R
|
library(phyloclim)
### Name: niche.overlap
### Title: Quantification of Niche Overlap
### Aliases: niche.overlap
### ** Examples
# load PNOs for Oxalis sect. Palmatifoliae
data(PNO)
# niche overlap on a annual precipitation gradient:
no <- niche.overlap(PNO$AnnualPrecipitation)
# upper triangle: based on Schoeners D
# lower triangle: based on Hellinger distances
print(no)
|
6c40cf3dc120cc926d97bef2fae48380f77aa674
|
7d4613a3fcb34d032ae9227566132d8fcbf12682
|
/Rscripts/simul/cherry.R
|
90c056b85786bb48d26fd01fba1492d108331a78
|
[
"BSD-2-Clause"
] |
permissive
|
junseonghwan/PhylExAnalysis
|
15f2b9d6ec4e67911fbfac5d2bd6e96034f44680
|
5bafad3fb6dc60e4144b18e85bbdba47a9a04fe7
|
refs/heads/main
| 2023-04-11T17:29:30.812185
| 2023-03-29T14:49:13
| 2023-03-29T14:49:13
| 335,060,186
| 1
| 1
| null | 2021-02-06T05:22:54
| 2021-02-01T19:26:43
|
R
|
UTF-8
|
R
| false
| false
| 6,027
|
r
|
cherry.R
|
# Download the cherry data from Zenodo and unzip under data/simulation
library(ggplot2)
library(dplyr)
library(rjson)
library(PhylExR)
library(Rcpp)
# TODO: This code relies on older version of the code -- update required.
sourceCpp(file = "PrePhylExR/src/rcpp_hello_world.cpp")
# dropout_hp: named list containing 'alpha' and 'beta'.
# bursty_hp: named list containing 'alpha' and 'beta'.
# biallelic_hp: numeric matrix with the first column the alpha and the second column the beta
AssignCells <- function(cell_data,
datum2node,
dropout_hp,
bursty_hp,
biallelic_hp) {
names(datum2node) <- c("ID", "Node")
snv_count <- length(datum2node$ID)
nodes <- as.character(unique(datum2node$Node))
nodes_breadth_first <- nodes[order(nchar(nodes), nodes)]
mut_ids <- unique(as.character(cell_data$ID))
mut_ids <- mut_ids[order(nchar(mut_ids), mut_ids)]
cells <- unique(as.character(cell_data$Cell))
cells <- cells[order(nchar(cells), cells)]
cell_data$ID <- factor(cell_data$ID, levels = mut_ids)
cell_data$Cell <- factor(cell_data$Cell, levels = cells)
if (!("b" %in% names(cell_data))) {
cell_data$b <- cell_data$d - cell_data$a
}
var_reads <- reshape2::dcast(cell_data, Cell ~ ID, value.var = "b")
var_reads[is.na(var_reads)] <- 0
total_reads <- reshape2::dcast(cell_data, Cell ~ ID, value.var = "d")
total_reads[is.na(total_reads)] <- 0
dropout_hp_mat <- matrix(c(dropout_hp$alpha, dropout_hp$beta), nrow = snv_count, ncol=2, byrow=T)
bursty_hp_mat <- matrix(c(bursty_hp$alpha, bursty_hp$beta), nrow = snv_count, ncol=2, byrow=T)
log_unnorm_liks <- IdentifyCellMutationStatus(datum2node,
nodes_breadth_first,
mut_ids,
as.matrix(var_reads[,-1]),
as.matrix(total_reads[,-1]),
as.matrix(dropout_hp_mat),
as.matrix(bursty_hp_mat),
as.matrix(biallelic_hp))
cells_with_no_reads <- (rowSums(log_unnorm_liks) == 0)
log_unnorm_liks2 <- log_unnorm_liks[!cells_with_no_reads,]
#dim(log_unnorm_liks2)
log_norms <- apply(log_unnorm_liks2, 1, logSumExp)
cell_assign_probs <- exp(log_unnorm_liks2 - log_norms)
err <- (rowSums(cell_assign_probs) - 1) > 1e-6
if (sum(err) > 0) {
print(paste("Row does not sum to 1 for ", which(err)))
}
cell_assignment <- nodes_breadth_first[apply(cell_assign_probs, 1, which.max)]
return(data.frame(Cell = var_reads[,1], Node = cell_assignment))
}
dat <- read.table("data/simulation/cherry/genotype_ssm.txt", header=T, sep="\t")
sc <- read.table("data/simulation/cherry/simul_sc.txt", header=T, sep="\t")
sc_hp <- read.table("data/simulation/cherry/simul_sc_hp.txt", header=T, sep="\t")
snv_count <- dim(dat)[1]
# Plot cell mutation profile.
sc$b <- sc$d - sc$a
cells <- unique(sc$Cell)
ids <- dat$ID
sc$Cell <- factor(sc$Cell, levels = cells)
sc$ID <- factor(sc$ID, levels = ids)
base_size <- 11
p <- ggplot(sc, aes(ID, Cell, fill = b)) + geom_tile(colour = "white")
p <- p + theme_bw() + scale_fill_gradient(low = "white", high = "red")
p <- p + ylab("Cells") + xlab("Loci")
p <- p + scale_x_discrete(expand = c(0, 0)) + scale_y_discrete(expand = c(0, 0))
p <- p + theme(legend.position = "none", axis.ticks = element_blank())
p <- p + theme(axis.title.x =element_text(size = base_size * 2))
p <- p + theme(axis.title.y =element_text(size = base_size * 2))
p <- p + theme(axis.text.x = element_blank(), axis.text.y = element_blank())
p <- p + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p
ggsave("_figures/simulation/cherry/cherry_sc.pdf", p)
cluster_labels <- read.table("data/simulation/cherry/genotype/joint/tree0/cluster_labels.tsv", header=F, sep="\t")
names(cluster_labels) <- c("ID", "Cluster")
cluster_labels$Cluster <- as.character(cluster_labels$Cluster)
datum2node <- read.table("data/simulation/cherry/genotype/joint/tree0/datum2node.tsv", header=F, sep="\t")
# Assign cell to nodes.
dropout_hp <- list(alpha=0.01, beta=1)
bursty_hp <- list(alpha=1, beta=0.01)
#cell_assignment <- AssignCellsBursty(sc, datum2node, bursty_hp, sc_hp)
cell_assignment <- AssignCells(sc, datum2node, dropout_hp, bursty_hp, sc_hp[,2:3])
cell_assignment_truth <- read.table("data/simulation/cherry/cell2node.txt", header=F, sep="\t")
names(cell_assignment_truth) <- c("Cell", "Node")
cell_assignment$Node <- as.character(cell_assignment$Node)
ids <- cluster_labels[order(cluster_labels$Cluster),"ID"]
cells <- cell_assignment[order(nchar(cell_assignment$Node), cell_assignment$Node),"Cell"]
sc$Cell <- factor(sc$Cell, levels = cells)
sc$ID <- factor(sc$ID, levels = ids)
p <- ggplot(sc, aes(ID, Cell, fill = b)) + geom_tile(colour = "white")
p <- p + theme_bw() + scale_fill_gradient(low = "white", high = "red")
p <- p + ylab("Cells") + xlab("Loci")
p <- p + scale_x_discrete(expand = c(0, 0)) + scale_y_discrete(expand = c(0, 0))
p <- p + theme(legend.position = "none", axis.ticks = element_blank())
p <- p + theme(axis.title.x =element_text(size = base_size * 2))
p <- p + theme(axis.title.y =element_text(size = base_size * 2))
p <- p + theme(axis.text.x = element_blank(), axis.text.y = element_blank())
p <- p + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p
#ggsave("_figures/simulation/cherry/cherry_inferred.pdf", p, width = 6, height=6, units="in")
ggsave("_figures/simulation/cherry/cherry_inferred.pdf", p)
### Save the source data for Figure 1e-f ###
head(sc)
sc_ <- left_join(sc, cell_assignment)
sc_ <- left_join(sc_, cluster_labels)
names(sc_) <- c("ID", "Cell", "a", "d", "SampleName", "b", "CellCluster", "SNVCluster")
write.csv(x = sc_[,-c(5)], file = "data/NatComm/Figure1e-f.csv", quote = F, row.names = F)
|
6b4cc95560eba54b6c9ec8c8634c5c5824a11d59
|
c0f9288b164da1c5ebf462238ae32e782a71404e
|
/R/build_extravignette.R
|
2a628fd40407414859f0e8dee4ad54520e49b121
|
[] |
no_license
|
courtiol/testenvrmarkdown
|
bd1b6f2e1d06daa29cbdc3364c67b8a31a4db300
|
0d85219afa1b30609663e2455c96a175fba45a14
|
refs/heads/master
| 2022-11-20T19:36:04.389292
| 2020-07-21T10:37:40
| 2020-07-21T10:37:40
| 281,366,551
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,201
|
r
|
build_extravignette.R
|
#' Build optional vignette once the package is installed
#'
#' This function aims at creating vignettes after the package installation.
#' @export
#' @examples
#' search()
#' build_extravignette()
#'
build_extravignette <- function() {
## The vignette source files are stored in different places depending on whether one
## uses the installed version of the package or the one loaded with devtools during
## the development of the package. We thus try both:
path1 <- paste0(find.package("testenvrmarkdown"), "/inst/extdata/")
path2 <- paste0(find.package("testenvrmarkdown"), "/extdata/")
if (dir.exists(path1)) {
path <- path1
}
if (dir.exists(path2)) {
path <- path2
}
path_complete <- normalizePath(paste0(path, "vignette_test", ".Rmd"), mustWork = TRUE)
## Create a clean environment:
vignette_env <- new.env(parent = as.environment("package:stats"))
## Rendering the vignette
vignette_path <- rmarkdown::render(path_complete, quiet = TRUE, envir = vignette_env)
## We open the vignette:
utils::browseURL(vignette_path)
## We return the path:
invisible(vignette_path)
}
#' A simple function
#'
#' @export
hello <- function() print("hello world")
|
3fd47e20d724290312189591d551e43b6eb35ac1
|
23b10ba8ffd9254e4c73fc07173f5b49fbeb4422
|
/files/SourceCode.R
|
cb2fba69c2f1860b7afa77439ccbf8b5fc414f28
|
[
"CC0-1.0"
] |
permissive
|
ryan-hill/SFS_GIS_R_training
|
42078e2fd805591a2a0e6d5c90b1c39fa2a5aea8
|
25999a779b00f58f808652d5b3dbfab088263ea6
|
refs/heads/master
| 2020-03-11T11:16:58.922250
| 2018-04-12T22:31:11
| 2018-04-12T22:31:11
| 129,965,352
| 1
| 0
| null | 2018-04-17T21:09:20
| 2018-04-17T21:09:20
| null |
UTF-8
|
R
| false
| false
| 27,446
|
r
|
SourceCode.R
|
#####################################################
# Source code for AWRA GIS Conference April 2018
# R and Spatial Data
# Marc Weber, Mike McMannus, Steve Kopp
#####################################################
#######################
# SpatialData in R - sp
#######################
# Download data
download.file("https://github.com/mhweber/AWRA_GIS_R_Workshop/blob/gh-pages/files/SourceCode.R?raw=true",
"SourceCode.R",
method="auto",
mode="wb")
download.file("https://github.com/mhweber/AWRA_GIS_R_Workshop/blob/gh-pages/files/WorkshopData.zip?raw=true",
"WorkshopData.zip",
method="auto",
mode="wb")
download.file("https://github.com/mhweber/AWRA_GIS_R_Workshop/blob/gh-pages/files/HUCs.RData?raw=true",
"HUCs.RData",
method="auto",
mode="wb")
download.file("https://github.com/mhweber/gis_in_action_r_spatial/blob/gh-pages/files/NLCD2011.Rdata?raw=true",
"NLCD2011.Rdata",
method="auto",
mode="wb")
unzip("WorkshopData.zip", exdir = "/home/marc")
getwd()
dir()
setwd("/home/marc/GitProjects")
class(iris)
str(iris)
# Exercise 1
library(sp)
getClass("Spatial")
getClass("SpatialPolygons")
library(rgdal)
data(nor2k)
plot(nor2k,axes=TRUE)
# Exercise 2
cities <- c('Ashland','Corvallis','Bend','Portland','Newport')
longitude <- c(-122.699, -123.275, -121.313, -122.670, -124.054)
latitude <- c(42.189, 44.57, 44.061, 45.523, 44.652)
population <- c(20062,50297,61362,537557,9603)
locs <- cbind(longitude, latitude)
plot(locs, cex=sqrt(population*.0002), pch=20, col='red',
main='Population', xlim = c(-124,-120.5), ylim = c(42, 46))
text(locs, cities, pos=4)
breaks <- c(20000, 50000, 60000, 100000)
options(scipen=3)
legend("topright", legend=breaks, pch=20, pt.cex=1+breaks/20000,
col='red', bg='gray')
lon <- c(-123.5, -123.5, -122.5, -122.670, -123)
lat <- c(43, 45.5, 44, 43, 43)
x <- cbind(lon, lat)
polygon(x, border='blue')
lines(x, lwd=3, col='red')
points(x, cex=2, pch=20)
library(maps)
map()
map.text('county','oregon')
map.axes()
title(main="Oregon State")
p <- map('county','oregon')
str(p)
p$names[1:10]
p$x[1:50]
L1 <-Line(cbind(p$x[1:8],p$y[1:8]))
Ls1 <- Lines(list(L1), ID="Baker")
SL1 <- SpatialLines(list(Ls1))
str(SL1)
plot(SL1)
library(maptools)
counties <- map('county','oregon', plot=F, col='transparent',fill=TRUE)
counties$names
#strip out just the county names from items in the names vector of counties
IDs <- sapply(strsplit(counties$names, ","), function(x) x[2])
counties_sp <- map2SpatialPolygons(counties, IDs=IDs,
proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"))
summary(counties_sp)
plot(counties_sp, col="grey", axes=TRUE)
# Exercise 3
StreamGages <- read.csv('StreamGages.csv')
class(StreamGages)
head(StreamGages)
coordinates(StreamGages) <- ~LON_SITE + LAT_SITE
llCRS <- CRS("+proj=longlat +datum=NAD83")
proj4string(StreamGages) <- llCRS
summary(StreamGages)
bbox(StreamGages)
proj4string(StreamGages)
projInfo(type='datum')
projInfo(type='ellps')
projInfo(type='proj')
proj4string(StreamGages)
plot(StreamGages, axes=TRUE, col='blue')
map('state',regions=c('oregon','washington','idaho'),fill=FALSE, add=T)
plot(StreamGages[StreamGages$STATE=='OR',],add=TRUE,col="Yellow") #plot just the Oregon sites in yellow on top of other sites
plot(StreamGages[StreamGages$STATE=='WA',],add=TRUE,col="Red")
plot(StreamGages[StreamGages$STATE=='ID',],add=TRUE,col="Green")
load("files/HUCs.RData")
class(HUCs)
getClass("SpatialPolygonsDataFrame")
summary(HUCs)
slotNames(HUCs) #get slots using method
str(HUCs, 2)
head(HUCs@data) #the data frame slot
HUCs@bbox #call on slot to get bbox
HUCs@polygons[[1]]
slotNames(HUCs@polygons[[1]])
HUCs@polygons[[1]]@labpt
HUCs@polygons[[1]]@Polygons[[1]]@area
# How would we code a way to extract the HUCs polygon with the smallest area?
# Look at the min_area function that is included in the HUCs.RData file
min_area
min_area(HUCs)
# We use sapply from the apply family of functions on the area slot of the Polygons slot
StreamGages <- spTransform(StreamGages, CRS(proj4string(HUCs)))
gage_HUC <- over(StreamGages,HUCs, df=TRUE)
StreamGages$HUC <- gage_HUC$HUC_8[match(row.names(StreamGages),row.names(gage_HUC))]
head(StreamGages@data)
library(rgeos)
HUCs <- spTransform(HUCs,CRS("+init=epsg:2991"))
gArea(HUCs)
# Reading in Spatial Data
ogrDrivers()
download.file("ftp://ftp.gis.oregon.gov/adminbound/citylim_2017.zip","citylim_2017.zip")
unzip("citylim_2017.zip", exdir = ".")
citylims <- readOGR(".", "citylim_2017") # our first parameter is directory, in this case '.' for working directory, and no extension on file!
plot(citylims, axes=T, main='Oregon City Limits') # plot it!
download.file("https://www.blm.gov/or/gis/files/web_corp/state_county_boundary.zip","/home/marc/state_county_boundary.zip")
unzip("state_county_boundary.zip", exdir = "/home/marc")
fgdb = "state_county_boundary.gdb"
# List all feature classes in a file geodatabase
fc_list = ogrListLayers(fgdb)
print(fc_list)
# Read the feature class
state_poly = readOGR(dsn=fgdb,layer="state_poly")
plot(state_poly, axes=TRUE)
cob_poly = readOGR(dsn=fgdb,layer="cob_poly")
plot(cob_poly, add=TRUE, border='red')
#######################
# SpatialData in R - sf
#######################
# From CRAN:
install.packages("sf")
# From GitHub:
library(devtools)
# install_github("edzer/sfr")
library(sf)
methods(class = "sf")
# Exercise 1
library(RCurl)
library(sf)
library(ggplot2)
download <- getURL("https://www.epa.gov/sites/production/files/2014-10/wsa_siteinfo_ts_final.csv")
wsa <- read.csv(text = download)
class(wsa)
levels(wsa$ECOWSA9)
wsa_plains <- wsa[wsa$ECOWSA9 %in% c("TPL","NPL","SPL"),]
wsa_plains = st_as_sf(wsa_plains, coords = c("LON_DD", "LAT_DD"), crs = 4269,agr = "constant")
str(wsa_plains)
head(wsa_plains[,c(1,60)])
plot(wsa_plains[c(46,56)], graticule = st_crs(wsa_plains), axes=TRUE)
plot(wsa_plains[c(38,46)],graticule = st_crs(wsa_plains), axes=TRUE)
plot(wsa_plains['geometry'], main='Keeping things simple',graticule = st_crs(wsa_plains), axes=TRUE)
ggplot(wsa_plains) +
geom_sf() +
ggtitle("EPA WSA Sites in the Plains Ecoregions") +
theme_bw()
# Exercise 2
library(USAboundaries)
states <- us_states()
levels(as.factor(states$state_abbr))
states <- states[!states$state_abbr %in% c('AK','PR','HI'),]
st_crs(states)
st_crs(wsa_plains)
# They're not equal, which we verify with:
st_crs(states) == st_crs(wsa_plains)
# We'll tranfsorm the WSA sites to same CRS as states
wsa_plains <- st_transform(wsa_plains, st_crs(states))
plot(states$geometry, axes=TRUE)
plot(wsa_plains$geometry, col='blue',add=TRUE)
plains_states <- states[wsa_plains,]
plains_states <- states[wsa_plains,op = st_intersects]
iowa = states[states$state_abbr=='IA',]
iowa_sites <- st_intersection(wsa_plains, iowa)
sel_list <- st_intersects(wsa_plains, iowa)
sel_mat <- st_intersects(wsa_plains, iowa, sparse = FALSE)
iowa_sites <- wsa_plains[sel_mat,]
plot(plains_states$geometry, axes=T)
plot(iowa_sites, add=T, col='blue')
sel_mat <- st_disjoint(wsa_plains, iowa, sparse = FALSE)
not_iowa_sites <- wsa_plains[sel_mat,]
plot(plains_states$geometry, axes=T)
plot(not_iowa_sites, add=T, col='red')
# Exercise 3
wsa_plains <- wsa_plains[c(1:4,60)]
wsa_plains <- st_join(wsa_plains, plains_states)
# verify your results
head(wsa_plains)
library(dataRetrieval)
IowaNitrogen<- readWQPdata(statecode='IA', characteristicName="Nitrogen")
head(IowaNitrogen)
names(IowaNitrogen)
siteInfo <- attr(IowaNitrogen, "siteInfo")
unique(IowaNitrogen$ResultMeasure.MeasureUnitCode)
IowaSummary <- IowaNitrogen %>%
dplyr::filter(ResultMeasure.MeasureUnitCode %in% c("mg/l","mg/l ")) %>%
dplyr::group_by(MonitoringLocationIdentifier) %>%
dplyr::summarise(count=n(),
start=min(ActivityStartDateTime),
end=max(ActivityStartDateTime),
mean = mean(ResultMeasureValue, na.rm = TRUE)) %>%
dplyr::arrange(-count) %>%
dplyr::left_join(siteInfo, by = "MonitoringLocationIdentifier")
iowa_wq = st_as_sf(IowaSummary, coords = c("dec_lon_va", "dec_lat_va"), crs = 4269,agr = "constant")
plot(st_geometry(subset(states, state_abbr == 'IA')), axes=T)
plot(st_geometry(subset(wsa_plains, STATE =='IA')), add=T, col='blue')
plot(iowa_wq, add=T, col='red')
wsa_iowa <- subset(wsa_plains, state_abbr=='IA')
wsa_iowa <- st_transform(wsa_iowa, crs=26915)
iowa_wq <- st_transform(iowa_wq, crs=26915)
wsa_wq = st_join(wsa_iowa, iowa_wq, st_is_within_distance, dist = 50000)
# Exercise 4
download <- getURL("https://www.epa.gov/sites/production/files/2014-10/waterchemistry.csv")
wsa_chem <- read.csv(text = download)
wsa$COND <- wsa_chem$COND[match(wsa$SITE_ID, wsa_chem$SITE_ID)]
wsa = st_as_sf(wsa, coords = c("LON_DD", "LAT_DD"), crs = 4269,agr = "constant")
states <- st_transform(states, st_crs(wsa))
plot(states$geometry, axes=TRUE)
plot(wsa$geometry, add=TRUE)
avg_cond_state <- st_join(states, wsa) %>%
dplyr::group_by(name) %>%
dplyr::summarize(MeanCond = mean(COND, na.rm = TRUE))
ggplot(avg_cond_state) +
geom_sf(aes(fill = MeanCond)) +
scale_fill_distiller("Conductivity", palette = "Greens") +
ggtitle("Averge Conductivity (uS/cm @ 25 C) per State") +
theme_bw()
st_drivers()
download.file("http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/cultural/ne_110m_admin_0_countries.zip", "ne_110m_admin_0_countries.zip")
unzip("ne_110m_admin_0_countries.zip", exdir = ".")
countries <- st_read("ne_110m_admin_0_countries.shp")
plot(countries$geometry) # plot it!
# Geodatabase Example - if you haven't already downloaded:
download.file("https://www.blm.gov/or/gis/files/web_corp/state_county_boundary.zip","/home/marc/state_county_boundary.zip")
unzip("state_county_boundary.zip", exdir = "/home/marc")
fgdb = "state_county_boundary.gdb"
# List all feature classes in a file geodatabase
st_layers(fgdb)
# Read the feature class
state_poly = st_read(dsn=fgdb,layer="state_poly")
state_poly$SHAPE
###########################
# SpatialData in R - Raster
###########################
library(raster)
r <- raster(ncol=10, nrow = 10, xmx=-116,xmn=-126,ymn=42,ymx=46)
str(r)
r
r[] <- runif(n=ncell(r))
r
plot(r)
r[5]
r[1,5]
r2 <- r * 50
r3 <- sqrt(r * 5)
s <- stack(r, r2, r3)
s
plot(s)
b <- brick(x=c(r, r * 50, sqrt(r * 5)))
b
plot(b)
# Exercise 1
US <- getData("GADM",country="USA",level=1)
states <- c('California', 'Nevada', 'Utah','Montana', 'Idaho', 'Oregon', 'Washington')
PNW <- US[US$NAME_1 %in% states,]
plot(PNW, axes=TRUE)
library(ggplot2)
ggplot(PNW) + geom_polygon(data=PNW, aes(x=long,y=lat,group=group),
fill="cadetblue", color="grey") + coord_equal()
download.file("https://github.com/mhweber/AWRA_GIS_R_Workshop/blob/gh-pages/files/SRTM_OR.RData?raw=true",
"SRTM_OR.RData",
method="auto",
mode="wb")
load("SRTM_OR.RData")
srtm <- getData('SRTM', lon=-116, lat=42)
plot(srtm)
plot(PNW, add=TRUE)
OR <- PNW[PNW$NAME_1 == 'Oregon',]
srtm2 <- getData('SRTM', lon=-121, lat=42)
srtm3 <- getData('SRTM', lon=-116, lat=47)
srtm4 <- getData('SRTM', lon=-121, lat=47)
srtm_all <- mosaic(srtm, srtm2, srtm3, srtm4,fun=mean)
plot(srtm_all)
plot(OR, add=TRUE)
srtm_crop_OR <- crop(srtm_all, OR)
plot(srtm_crop_OR, main="Elevation (m) in Oregon")
plot(OR, add=TRUE)
srtm_mask_OR <- crop(srtm_crop_OR, OR)
US <- getData("GADM",country="USA",level=2)
Benton <- US[US$NAME_1=='Oregon' & US$NAME_2=='Benton',]
srtm_crop_Benton <- crop(srtm_crop_OR, Benton)
srtm_mask_Benton <- mask(srtm_crop_Benton, Benton)
plot(srtm_mask_Benton, main="Elevation (m) in Benton County")
plot(Benton, add=TRUE)
typeof(values(srtm_crop_OR))
# values(srtm_crop_OR) <- as.numeric(values(srtm_crop_OR))
typeof(values(srtm_crop_OR))
cellStats(srtm_crop_OR, stat=mean)
cellStats(srtm_crop_OR, stat=min)
cellStats(srtm_crop_OR, stat=max)
cellStats(srtm_crop_OR, stat=median)
cellStats(srtm_crop_OR, stat=range)
values(srtm_crop_OR) <- values(srtm_crop_OR) * 3.28084
library(rasterVis)
histogram(srtm_crop_OR, main="Elevation In Oregon")
densityplot(srtm_crop_OR, main="Elevation In Oregon")
p <- levelplot(srtm_crop_OR, layers=1, margin = list(FUN = median))
p + layer(sp.lines(OR, lwd=0.8, col='darkgray'))
Benton_terrain <- terrain(srtm_mask_Benton, opt = c("slope","aspect","tpi","roughness","flowdir"))
plot(Benton_terrain)
Benton_hillshade <- hillShade(Benton_terrain[['slope']],Benton_terrain[['aspect']])
plot(Benton_hillshade, main="Hillshade Map for Benton County")
# Exercise 2
library(landsat)
data(july1,july2,july3,july4,july5,july61,july62,july7)
july1 <- raster(july1)
july2 <- raster(july2)
july3 <- raster(july3)
july4 <- raster(july4)
july5 <- raster(july5)
july61 <- raster(july61)
july62 <- raster(july62)
july7 <- raster(july7)
july <- stack(july1,july2,july3,july4,july5,july61,july62,july7)
july
plot(july)
ndvi <- (july[[4]] - july[[3]]) / (july[[4]] + july[[3]])
# OR
ndviCalc <- function(x) {
ndvi <- (x[[4]] - x[[3]]) / (x[[4]] + x[[3]])
return(ndvi)
}
ndvi <- raster::calc(x=july, fun=ndviCalc)
plot(ndvi)
savi <- ((july[[4]] - july[[3]]) / (july[[4]] + july[[3]]) + 0.5)*1.5
# OR
saviCalc <- function(x) {
savi <- ((x[[4]] - x[[3]]) / (x[[4]] + x[[3]]) + 0.5)*1.5
return(savi)
}
ndvi <- calc(x=july, fun=saviCalc)
plot(savi)
ndmi <- (july[[4]] - july[[5]]) / (july[[4]] + july[[5]])
# OR
ndmiCalc <- function(x) {
ndmi <- (x[[4]] - x[[5]]) / (x[[4]] + x[[5]])
return(ndmi)
}
ndmi <- calc(x=july, fun=ndmiCalc)
plot(ndmi)
# Exercise 3
ThreeCounties <- US[US$NAME_1 == 'Oregon' & US$NAME_2 %in% c('Washington','Multnomah','Hood River'),]
srtm_crop_3counties <- crop(srtm_crop_OR, ThreeCounties)
plot(srtm_crop_3counties, main = "Elevation (m) for Washington, \n Multnomah and Hood River Counties")
plot(ThreeCounties, add=T)
county_av_el <- extract(srtm_crop_3counties , ThreeCounties, fun=mean, na.rm = T, small = T, df = T)
# Extra
ThreeCounties$ID <- 1:nrow(ThreeCounties)
county_av_el$NAME <- ThreeCounties$NAME_2[match(ThreeCounties$ID, row.names(county_av_el))]
download.file("https://github.com/mhweber/gis_in_action_r_spatial/blob/gh-pages/files/NLCD2011.Rdata?raw=true",
"NLCD_OR_2011.Rdata",
method="auto",
mode="wb")
load("NLCD_OR_2011.Rdata")
# Here we pull out the raster attribute table to a data frame to use later - when we manipule the raster in the raster package,
# we lose the extra categories we'll want later
ThreeCounties <- spTransform(ThreeCounties, CRS(projection(NLCD_OR_2011)))
projection(NLCD_OR_2011)
proj4string(ThreeCounties)
ThreeCounties <- spTransform(ThreeCounties, CRS(projection(NLCD_OR_2011)))
NLCD_ThreeCounties <- crop(NLCD_OR_2011, ThreeCounties)
rat <- as.data.frame(levels(NLCD_OR_2011[[1]]))
# Aggregate so extract doesn't take quite so long - but this will take a few minutes as well...
NLCD_ThreeCounties <- aggregate(NLCD_ThreeCounties, 3, fun=modal, na.rm = T)
plot(NLCD_ThreeCounties)
e <- extract(NLCD_ThreeCounties, ThreeCounties, method = 'simple')
class(e)
length(e)
# This next section gets into fairly advance approaches in R using apply family of functions as well as melting (turning data to long form)
# and casting (putting back into wide form)
et = lapply(e,table)
library(reshape)
t <- melt(et)
t.cast <- cast(t, L1 ~ Var.1, sum)
head(t.cast)
names(t.cast)[1] <- 'ID'
nlcd <- data.frame(t.cast)
head(nlcd)
nlcd$Total <- rowSums(nlcd[,2:ncol(nlcd)])
head(nlcd)
# There are simpler cleaner ways to do but this loop applys a percent value to each category
for (i in 2:17)
{
nlcd[,i] = 100.0 * nlcd[,i]/nlcd[,18]
}
rat
# We'll use the raster attrubite table we pulled out earlier to reapply the full land cover category names
newNames <- as.character(rat$LAND_COVER) # LAND_COVER is a factor, we need to convert to character - understanding factors very important in R...
names(nlcd)[2:17] <- newNames[2:17]
nlcd <- nlcd[c(1:17)] # We don't need the total column anymore
nlcd
# Last, let's pull the county names back in
CountyNames <- ThreeCounties$NAME_2
nlcd$County <- CountyNames
nlcd
# Reorder the data frame
nlcd <- nlcd[c(18,2:17)]
nlcd
# Whew, that's it - is it a fair bit of code? Sure. But is it easily, quickly repeatable and reproducible now? You bet.
###########################
# SpatialData in R - Interactive Mapping
###########################
# Exercise 1
library(ggplot2)
library(plotly)
library(mapview)
library(tmap)
library(leaflet)
library(dplyr)
library(sf)
library(USAboundaries)
library(plotly)
states <- us_states()
states <- states %>%
dplyr::filter(!name %in% c('Alaska','Hawaii', 'Puerto Rico')) %>%
dplyr::mutate(perc_water = log10((awater)/(awater + aland)) *100)
states <- st_transform(states, 5070)
# plot, ggplot
g = ggplot(states) +
geom_sf(aes(fill = perc_water)) +
scale_fill_distiller("perc_water",palette = "Spectral", direction = 1) +
ggtitle("Percent Water by State")
g
# NOTE - if you have an error here, it' may be because you don't have
# the development version of ggplot installed. Additionally, scales
# package was producing error for me and re-installing from CRAN
# solved the problem. See Winsont Chang's commment here:
# https://github.com/hadley/scales/issues/101
ggplotly(g)
# Exercise 2
mapview(states, zcol = 'perc_water', alpha.regions = 0.2, burst = 'name')
# An example that may be similar to what you try:
mapview(srtm_crop_OR)
mapview(states[states$name=='Oregon',]) + mapview(srtm_crop_OR) # can you figure out how to set the zoom when combining layers?
mapview(states)
# Exercise 3
m <- leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addMarkers(lng=-123.290698, lat=44.565578, popup="Here's where I work")
m # Print the map
state_map <- states %>%
st_transform(crs = 4326) %>%
as("Spatial") %>%
leaflet() %>%
addTiles() %>%
addPolygons()
state_map # Print the map
# Plotting raster data with leaflet
wc <- getData("worldclim", var='prec', res=10)
# wc is a raster stack of years - pick a year, here using double-
# bracket notation to select
wc <- wc[[1]]
pal <- colorNumeric("RdYlBu", values(wc), na.color = "transparent")
leaflet() %>% addTiles() %>%
addRasterImage(wc, colors = pal, opacity = 0.8) %>%
addLegend(pal = pal, values = values(wc),
title = "Precip")
# Exercise 4
# just fill
tm_shape(states) + tm_fill()
# borders
tm_shape(states) + tm_borders()
# borders and fill
tm_shape(states) + tm_borders() + tm_fill()
tm_shape(states) + tm_borders() + tm_fill(col='perc_water')
map_states <- tm_shape(states) + tm_borders()
map_states + tm_shape(wsa_plains) + tm_dots()
###########################
# SpatialData in R - Exploratory Spatial Data Analysis (ESDA)
###########################
library(rgdal)
library(gstat)
library(spdep)
# The shapefile needs to be in the working directory to use '.' or you need to specify the full path in first parameter to readOGR
wsa_plains <- readOGR(".","nplspltpl_bug")
class(wsa_plains)
dim(wsa_plains@data)
names(wsa_plains)
str(wsa_plains, max.level = 2)
bubble(wsa_plains['COND'])
coordinates(wsa_plains)
hscat(COND~1,wsa_plains, c(0, 10000, 50000, 250000, 750000, 1500000, 3000000))
hscat(log(COND) ~ 1,wsa_plains, c(0, 10000, 50000, 250000, 750000, 1500000, 3000000))
hscat(log(COND) ~ 1,wsa_plains, c(0, 10000, 50000, 100000, 150000, 200000, 250000))
hscat(log(COND) ~ 1,wsa_plains, c(0, 10000, 25000, 50000, 75000, 100000))
hscat(log(PTL) ~ 1,wsa_plains, c(0, 10000, 50000, 250000, 750000, 1500000, 3000000))
hscat(log(PTL) ~ 1,wsa_plains, c(0, 10000, 50000, 100000, 150000, 200000, 250000))
hscat(log(PTL) ~ 1,wsa_plains, c(0, 10000, 25000, 50000, 75000, 100000))
tpl <- subset(wsa_plains, ECOWSA9 == "TPL")
coords_tpl <- coordinates(tpl)
tpl_nb <- knn2nb(knearneigh(coords_tpl, k = 1), row.names=tpl$SITE_ID)
tpl_nb1 <- knearneigh(coords_tpl, k = 1)
#using the k=1 object to find the minimum distance at which all sites have a distance-based neighbor
tpl_dist <- unlist(nbdists(tpl_nb,coords_tpl))
summary(tpl_dist)#use max distance from summary to assign distance to create neighbors
tplnb_270km <- dnearneigh(coords_tpl, d1=0, d2=271000, row.names=tpl$SITE_ID)
summary(tplnb_270km)
plot(tpl)
plot(knn2nb(tpl_nb1), coords_tpl, add = TRUE)
title(main = "TPL K nearest neighbours, k = 1")
library(maptools)
library(rgdal)
library(spdep)
library(stringr)
library(sp)
library(reshape) # for rename function
library(tidyverse)
# N.B. Assigning short name to long path to reduce typing
shp.loc <- "//AA.AD.EPA.GOV/ORD/CIN/USERS/MAIN/L-P/mmcmanus/Net MyDocuments/AWRA GIS 2018/R and Spatial Data Workshop"
shp <- readOGR(shp.loc, "ef_lmr_huc12")
plot(shp)
dim(shp@data)
names(shp@data)
head(shp@data) # check on row name being used
# Code from Bivand book identifies classes within data frame @ data
# Shows FEATUREID variable as interger
sapply(slot(shp, "data"), class)
# Assign row names based on FEATUREID
row.names(shp@data) <- (as.character(shp@data$FEATUREID))
head(shp@data)
tail(shp@data)
# Read in StreamCat data for Ohio River Hydroregion. Check getwd()
scnlcd2011 <- read.csv("NLCD2011_Region05.csv")
names(scnlcd2011)
dim(scnlcd2011)
class(scnlcd2011)
str(scnlcd2011, max.level = 2)
head(scnlcd2011)
scnlcd2011 <- reshape::rename(scnlcd2011, c(COMID = "FEATUREID"))
names(scnlcd2011)
row.names(scnlcd2011) <- scnlcd2011$FEATUREID
head(scnlcd2011)
# gages$AVE <- gage_flow$AVE[match(gages$SOURCE_FEA,gage_flow$SOURCE_FEA)]
# this matches the FEATUREID from the 815 polygons in shp to the FEATUREID from the df scnlcd2011
efnlcd2011 <- scnlcd2011[match(shp$FEATUREID, scnlcd2011$FEATUREID),]
dim(efnlcd2011)
head(efnlcd2011) # FEATUREID is now row name
row.names(efnlcd2011) <- efnlcd2011$FEATUREID
head(efnlcd2011)
str(efnlcd2011, max.level = 2)
summary(efnlcd2011$PctCrop2011Cat)
summary(efnlcd2011$PctDecid2011Cat)
efnlcd2011 <- efnlcd2011 %>%
mutate(logCrop = log(PctCrop2011Cat + 0.50),
logDecid = log(PctDecid2011Cat + 0.50))
names(efnlcd2011)
sp@data = data.frame(sp@data, df[match(sp@data[,by], df[,by]),])
shp@data = data.frame(shp@data, efnlcd2011[match(shp@data[,"FEATUREID"], efnlcd2011[,"FEATUREID"]),])
head(shp@data)
class(shp)
names(shp@data)
dim(shp@data)
class(shp@data)
class(shp)
summary(shp)
head(shp@data)
summary(shp@data)
ctchcoords <- coordinates(shp)
class(ctchcoords)
ef.nb1 <- poly2nb(shp, queen = FALSE)
summary(ef.nb1)
class(ef.nb1)
plot(shp, border = "black")
plot(ef.nb1, ctchcoords, add = TRUE, col = "blue")
ef.nbwts.list <- nb2listw(ef.nb1, style = "W")
names(ef.nbwts.list)
moran.plot(shp$PctDecid2011Cat, listw = ef.nbwts.list, labels = shp$FEATUREID)
moran.plot(shp$PctDecid2011Cat, listw = ef.nbwts.list, labels = shp$FEATUREID)
unique(shp@data$huc12name)
huc12_ds1 <- shp@data
names(huc12_ds1)
str(huc12_ds1) # check huc12names is a factor
library(tidyverse)
# from Jeff Hollister EPA NHEERL-AED
# the indices [#] pull out the corresponding statistic from fivenum function
# library(dplyr)
huc12_ds2 <- huc12_ds1 %>%
group_by(huc12name) %>%
summarize(decidmin = fivenum(PctDecid2011Cat)[1],
decidq1 = fivenum(PctDecid2011Cat)[2],
decidmed = fivenum(PctDecid2011Cat)[3],
decidq3 = fivenum(PctDecid2011Cat)[4],
decidmax = fivenum(PctDecid2011Cat)[5],
cropmin = fivenum(PctCrop2011Cat)[1],
cropq1 = fivenum(PctCrop2011Cat)[2],
cropmed = fivenum(PctCrop2011Cat)[3],
cropq3 = fivenum(PctCrop2011Cat)[4],
cropmax = fivenum(PctCrop2011Cat)[5])
# N.B. using tidyverse function defaults to creating an object that is:
# "tbl_df" "tbl" "data.frame"
class(huc12_ds2)
# from Marcus Beck in 2016-05-16 email
# devtools::install_github('USEPA/R-micromap-package-development', ref = 'development')
devtools::install_github('USEPA/micromap')
library(micromap)
huc12 <- readOGR(shp.loc, "ef_lmr_WBD_Sub")
plot(huc12)
names(huc12@data)
huc12.map.table<-create_map_table(huc12,'huc12name')#ID variable is huc12name
head(huc12.map.table)
mmplot(stat.data = as.data.frame(huc12_ds2),
map.data = huc12.map.table,
panel.types = c('dot_legend', 'labels', 'box_summary', 'box_summary', 'map'),
panel.data=list(NA,
'huc12name',
list('cropmin', 'cropq1', 'cropmed', 'cropq3', 'cropmax'),
list('decidmin', 'decidq1', 'decidmed', 'decidq3', 'decidmax'),
NA),
ord.by = 'cropmed',
rev.ord = TRUE,
grouping = 6,
median.row = FALSE,
map.link = c('huc12name', 'ID'))
mmplot_lc <- mmplot(stat.data = as.data.frame(huc12_ds2),
map.data = huc12.map.table,
panel.types = c('dot_legend', 'labels', 'box_summary', 'box_summary', 'map'),
panel.data=list(NA,
'huc12name',
list('cropmin', 'cropq1', 'cropmed', 'cropq3', 'cropmax'),
list('decidmin', 'decidq1', 'decidmed', 'decidq3', 'decidmax'),
NA),
ord.by = 'cropmed',
rev.ord = TRUE,
grouping = 6,
median.row = FALSE,
map.link = c('huc12name', 'ID'),
plot.height=6, plot.width=9,
colors=brewer.pal(6, "Spectral"),
panel.att=list(list(1, panel.width=.8, point.type=20, point.size=2,point.border=FALSE, xaxis.title.size=1),
list(2, header='WBD HUC12', panel.width=1.25, align='center', text.size=1.1),
list(3, header='2011 NLCD\nCropland',
graph.bgcolor='white',
xaxis.ticks=c( 0, 25, 50, 75, 100),
xaxis.labels=c(0, 25, 50, 75, 100),
xaxis.labels.size=1,
#xaxis.labels.angle=90,
xaxis.title='Percent',
xaxis.title.size=1,
graph.bar.size = .6),
list(4, header='2011 NLCD\nDeciduous Forest',
graph.bgcolor='white',
xaxis.ticks=c( 0, 25, 50, 75, 100),
xaxis.labels=c(0, 25, 50, 75, 100),
xaxis.labels.size=1,
#xaxis.labels.angle=90,
xaxis.title='Percent',
xaxis.title.size=1,
graph.bar.size = .6),
list(5, header='Micromaps',
inactive.border.color=gray(.7),
inactive.border.size=2)))
print(mmplot_lc, name='mmplot_lc_v1_20180205.tiff',res=600)
library(tmap)
qtm(shp = shp, fill = c("PctDecid2011Cat", "PctCrop2011Cat"), fill.palette = c("Blues"), ncol =2)
|
5e04d0fec3fdd7d1582f0f1afecc0f68f2a74805
|
141cb000c5bd54fda357cefe87290a362aa008f2
|
/man/EM.Rd
|
f828e12a068993ac601ec09f856767b48eae02f8
|
[] |
no_license
|
2ndFloorStuff/reports
|
e632286f13ae90ca0af10721c9a80ae06a0e4fd8
|
98ca03c2e0344421cf2d9482495f8dd1448e3c0f
|
refs/heads/master
| 2020-12-14T00:21:06.820047
| 2015-07-03T20:47:06
| 2015-07-03T20:47:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,028
|
rd
|
EM.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/EM.R
\name{EM}
\alias{EM}
\title{Convert email to HTML email Tag}
\usage{
EM(email = "clipboard", text = NULL, new_win = TRUE,
copy2clip = interactive(), print = FALSE)
}
\arguments{
\item{email}{A character vector email copied to the clipboard. Default is to
read from the clipboard.}
\item{text}{A character vector of text to hyperref from. Defualt uses the
string passed to \code{email}.}
\item{new_win}{logical. If \code{TRUE} the link will open in a new window.}
\item{copy2clip}{logical. If \code{TRUE} attempts to copy the output to the
clipboard.}
\item{print}{logical. If TRUE \code{\link[base]{cat}} prints the output to the
console. If \code{FALSE} returns to the console.}
}
\value{
Returns a character vector of an HTML email tag.
}
\description{
Wrap an email to generate an HTML email tag.
}
\examples{
EM("tyler.rinker@gmail.com", print = TRUE)
}
\references{
\url{http://www.w3schools.com/tags/tag_address.asp}
}
|
11abcdc3e0df603649e0da0aab37fde2f937c30c
|
0b62d76a09352fb4d97e242614ea6ed66e1a0e72
|
/Quiz 3.R
|
e55bbabdeb2e7525bb92bbedc00d0b3acaeb9381
|
[] |
no_license
|
MikeColinTaylor/gettingAndCleaningData
|
51b10cda8e52c828a855fe9bf45ffd0cefae1e08
|
a58ad82c8da3818bc96f545905ce851812653808
|
refs/heads/master
| 2016-09-06T09:40:21.678098
| 2014-12-11T16:09:23
| 2014-12-11T16:09:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,376
|
r
|
Quiz 3.R
|
setwd("G:/Coursera/Getting and cleaning data")
#Q1
q1Data <- read.csv("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv")
head(q1Data$ACR == 3)
head(q1Data$AGS == 6)
head(which(q1Data$ACR == 3 & q1Data$AGS == 6), 3)
#125 238 262
#Q2
#install.packages("jpeg")
library(jpeg)
?download.file
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg", destfile = "quiz3Q2.jpg", mode = "wb")
q2File <- readJPEG("quiz3Q2.jpg", native = TRUE)
quantile(sort(q2File), c(0.3, 0.8))
# 30% 80%
#-15259150 -10575416
#Q3
options(stringsAsFactors = F)
gdp <- read.csv("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv", skip = 5, header = F, nrows = 190)
edu <- read.csv("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv")
str(gdp)
str(edu)
gdpEdu <- merge(x = gdp, y = edu, by.x = "V1", by.y = "CountryCode")
nrow(gdpEdu)
#189
#install.packages("stringr")
library(stringr)
gdpEdu$V5 = str_trim(gdpEdu$V5)
gdpEdu$V5 = str_replace_all(gdpEdu$V5, ",", "")
gdpEdu <- gdpEdu[order(as.numeric(gdpEdu$V5)), ]
gdpEdu$V4[13]
#St. Kitts and Nevis
#Q4
summary(as.factor(gdpEdu$Income.Group))
mean(gdpEdu$V2[gdpEdu$Income.Group == "High income: nonOECD"])
mean(gdpEdu$V2[gdpEdu$Income.Group == "High income: OECD"])
#32.96667, 91.91304
#Q5
summary(as.factor(gdpEdu$Income.Group[gdpEdu$V2 <= 38]))
#5
|
7657641f952e169d4fca3f2e38ad389d872f4beb
|
21cead8061096f0bfbbfc07ef5dbd57612c756ae
|
/assets/tropFishR/algorithms/temp_elefan_ga.R
|
cfcd4579f14394c6fe0c1c8928f065546697e6f8
|
[] |
no_license
|
aenieblas/StockMonitoringTool
|
a53ac46c29658cf43cb4b610688ef109b8ca9e01
|
6b7802ef3f45d7fba5d6604f4ba02f349857e70a
|
refs/heads/master
| 2023-08-19T20:39:13.085789
| 2021-10-14T03:58:23
| 2021-10-14T03:58:23
| 351,847,443
| 0
| 0
| null | 2021-06-28T13:27:53
| 2021-03-26T16:36:54
|
R
|
UTF-8
|
R
| false
| false
| 4,297
|
r
|
temp_elefan_ga.R
|
## modify ELEFAN_GA function for plotting score function (raw GA fit needed)
ELEFAN_GA_temp <- function (lfq, seasonalised = FALSE, low_par = NULL, up_par = NULL,
popSize = 50, maxiter = 100, run = maxiter, parallel = FALSE,
pmutation = 0.1, pcrossover = 0.8, elitism = base::max(1,
round(popSize * 0.05)), MA = 5, addl.sqrt = FALSE, agemax = NULL,
flagging.out = TRUE, seed = NULL, monitor = FALSE, plot = FALSE,
plot.score = TRUE, ...)
{
classes <- lfq$midLengths
n_classes <- length(classes)
Linf_est <- classes[n_classes]
low_par_ALL <- list(Linf = Linf_est * 0.5, K = 0.01, t_anchor = 0,
C = 0, ts = 0)
low_Linf <- ifelse("Linf" %in% names(low_par), get("Linf",
low_par), get("Linf", low_par_ALL))
low_K <- ifelse("K" %in% names(low_par), get("K", low_par),
get("K", low_par_ALL))
low_tanc <- ifelse("t_anchor" %in% names(low_par), get("t_anchor",
low_par), get("t_anchor", low_par_ALL))
low_C <- ifelse("C" %in% names(low_par), get("C", low_par),
get("C", low_par_ALL))
low_ts <- ifelse("ts" %in% names(low_par), get("ts", low_par),
get("ts", low_par_ALL))
up_par_ALL <- list(Linf = Linf_est * 1.5, K = 1, t_anchor = 1,
C = 1, ts = 1)
up_Linf <- ifelse("Linf" %in% names(up_par), get("Linf",
up_par), get("Linf", up_par_ALL))
up_K <- ifelse("K" %in% names(up_par), get("K", up_par),
get("K", up_par_ALL))
up_tanc <- ifelse("t_anchor" %in% names(up_par), get("t_anchor",
up_par), get("t_anchor", up_par_ALL))
up_C <- ifelse("C" %in% names(up_par), get("C", up_par),
get("C", up_par_ALL))
up_ts <- ifelse("ts" %in% names(up_par), get("ts", up_par),
get("ts", up_par_ALL))
lfq <- lfqRestructure(lfq, MA = MA, addl.sqrt = addl.sqrt)
sofun <- function(lfq, par, agemax, flagging.out) {
Lt <- lfqFitCurves(lfq, par = list(Linf = par[1], K = par[2],
t_anchor = par[3], C = par[4], ts = par[5]), agemax = agemax,
flagging.out = flagging.out)
return(Lt$fESP)
}
fun <- function(lfq, par, agemax, flagging.out) {
Lt <- lfqFitCurves(lfq, par = list(Linf = par[1], K = par[2],
t_anchor = par[3], C = 0, ts = 0), agemax = agemax,
flagging.out = flagging.out)
return(Lt$fESP)
}
if (seasonalised) {
min = c(low_Linf, low_K, low_tanc, low_C, low_ts)
max = c(up_Linf, up_K, up_tanc, up_C, up_ts)
writeLines("Genetic algorithm is running. This might take some time.")
flush.console()
fit <- GA::ga(type = "real-valued", fitness = sofun,
lfq = lfq, lower = min, upper = max, agemax = agemax,
flagging.out = flagging.out, popSize = popSize, maxiter = maxiter,
run = run, parallel = parallel, pmutation = pmutation,
pcrossover = pcrossover, elitism = elitism, seed = seed,
monitor = monitor, ...)
pars <- as.list(fit@solution[1, ])
names(pars) <- c("Linf", "K", "t_anchor", "C", "ts")
}
else {
min = c(low_Linf, low_K, low_tanc)
max = c(up_Linf, up_K, up_tanc)
writeLines("Genetic algorithm is running. This might take some time.")
flush.console()
fit <- GA::ga(type = "real-valued", fitness = fun, lfq = lfq,
lower = min, upper = max, agemax = agemax, flagging.out = flagging.out,
popSize = popSize, maxiter = maxiter, run = run,
parallel = parallel, pmutation = pmutation, pcrossover = pcrossover,
elitism = elitism, seed = seed, monitor = monitor,
...)
pars <- as.list(fit@solution[1, ])
names(pars) <- c("Linf", "K", "t_anchor")
}
if (plot.score) {
GA::plot(fit)
}
final_res <- lfqFitCurves(lfq = lfq, par = pars, flagging.out = flagging.out,
agemax = agemax)
phiL <- log10(pars$K) + 2 * log10(pars$Linf)
pars$phiL <- phiL
lfq$ncohort <- final_res$ncohort
lfq$agemax <- final_res$agemax
lfq$par <- pars
lfq$fESP <- fit@fitnessValue
lfq$Rn_max <- fit@fitnessValue
lfq$gafit <- fit
if (plot) {
plot(lfq, Fname = "rcounts")
Lt <- lfqFitCurves(lfq, par = lfq$pars, draw = TRUE)
}
return(lfq)
}
|
454989c4416af09dc3149eaaad8a51985ce38a10
|
173e8e734ee2d8e3eeeac0a86ce06ab48db15a08
|
/R/plot_wbal.R
|
9f77c5cc15dd7ef59eb06868e0ca358cbbb970b5
|
[
"MIT"
] |
permissive
|
aemon-j/gotmtools
|
b0ba213f83d0f3ccbee9f34b7efc69fc0e0dc86a
|
4eb90e9e6ad960c36a78cc51e9c77b4d826a6197
|
refs/heads/main
| 2023-04-28T07:43:19.483815
| 2021-01-28T19:17:37
| 2021-01-28T19:17:37
| 220,067,540
| 5
| 5
|
MIT
| 2022-02-04T15:27:59
| 2019-11-06T18:52:40
|
R
|
UTF-8
|
R
| false
| false
| 2,050
|
r
|
plot_wbal.R
|
#' Plot water balance from NetCDF file
#'
#' Plots water balance calculated by GOTM ('Qres') and inflows from the netCDF output file.
#'
#' @param ncdf filepath; Name of the netCDF file to extract variable
#' @param title character; Title of the graph. Defaults to 'Water Balance
#' @return dataframe with the
#' @importFrom ncdf4 nc_open
#' @importFrom ncdf4 nc_close
#' @importFrom ncdf4 ncvar_get
#' @importFrom ncdf4 ncatt_get
#' @importFrom reshape2 melt
#' @import ggplot2
#' @export
plot_wbal <- function(ncdf, title = 'Water Balance'){
vars_short = list_vars(ncdf,long = F)
flows_nam = list(vars_short[grep('Q_', vars_short)])
fid = nc_open(ncdf)
tim = ncvar_get(fid, 'time')
tunits = ncatt_get(fid,'time')
#Extract time and formate Date
lnam = tunits$long_name
tustr <- strsplit(tunits$units, " ")
step = tustr[[1]][1]
tdstr <- strsplit(unlist(tustr)[3], "-")
tmonth <- as.integer(unlist(tdstr)[2])
tday <- as.integer(unlist(tdstr)[3])
tyear <- as.integer(unlist(tdstr)[1])
origin = as.POSIXct(paste0(tyear,'-',tmonth,'-',tday), format = '%Y-%m-%d', tz = 'UTC')
time = as.POSIXct(tim, origin = origin, tz = 'UTC')
flows = list()
#Extract flows
for(i in 1:length(flows_nam)){
eval(parse(text = paste0("flows[[",i,"]] <- ncvar_get(fid, '",flows_nam[[i]],"')")))
}
flows_df <- data.frame(matrix(unlist(flows)),stringsAsFactors=FALSE)
colnames(flows_df) <- flows_nam
#Extract GOTM water balance
qres = ncvar_get(fid, 'Qres')
qres = qres[nrow(qres),]
tunits = ncatt_get(fid, 'Qres')
nc_close(fid)
#Extract time and formate Date
df <- data.frame(DateTime = time, GOTM_calc = qres)
df <- cbind.data.frame(df, flows_df)
dfmlt <- reshape2::melt(df, id.vars = 'DateTime')
colnames(dfmlt) <- c('DateTime', 'Flow', 'value')
#Plot data
p1 <- ggplot(dfmlt, aes(DateTime, value, colour = Flow))+
geom_line(size = 0.8)+
ggtitle(title)+
xlab('')+
geom_hline(yintercept = 0, colour = 'black')+
ylab(tunits$units)+
theme_bw(base_size = 18)
p1
return(p1)
}
|
1d6c1236fbfd1a5979287d429dbf47ccfe3e0df1
|
a3386aa4f794d2b8327e3d167f0ffd0d784ffc7e
|
/man/hash_emojis.Rd
|
406bb4ec75a2a94717e0e87e06e4cf871b95aae6
|
[] |
no_license
|
systats/lexicon
|
2ef43af84016968aff7fea63e0c332bcc734f5e6
|
f42a10e9f0c3f830d28f0e50911acbad439ec682
|
refs/heads/master
| 2021-04-28T20:26:46.470503
| 2018-02-15T04:31:44
| 2018-02-15T04:31:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 604
|
rd
|
hash_emojis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hash_sentiment_emojis.R
\docType{data}
\name{hash_emojis}
\alias{hash_emojis}
\title{Emoji Description Lookup Table}
\format{A data frame with 734 rows and 2 variables}
\usage{
data(hash_emojis)
}
\description{
A dataset containing ASCII byte code representation of emojis and their
accompanying description (from unicode.org).
}
\details{
\itemize{
\item x. Byte code representation of emojis
\item y. Emoji description
}
}
\references{
\url{http://www.unicode.org/emoji/charts/full-emoji-list.html}
}
\keyword{datasets}
|
35ce8a59ccefa1a5fc4ee5af1d8061651c7fd2ca
|
3304180c4bc1baecf780d590f8034cdd6b1dbe15
|
/man/plotCytoExprs.Rd
|
e0018be240da914c68286dfc009f0c1f140dba28
|
[] |
no_license
|
gfinak/cytoRSuite
|
506832f8491f5289ee3c6df4aef6fb37e71469b4
|
03ad4c8bcf216ec178e8bfecfe814d82c90b7447
|
refs/heads/master
| 2020-04-04T05:27:09.213681
| 2018-11-01T16:52:33
| 2018-11-01T16:52:33
| 155,747,008
| 0
| 0
| null | 2018-11-01T16:53:15
| 2018-11-01T16:53:15
| null |
UTF-8
|
R
| false
| true
| 875
|
rd
|
plotCytoExprs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotCytoExprs-methods.R
\name{plotCytoExprs}
\alias{plotCytoExprs}
\title{Plot Population Density Distribution in All Channels}
\usage{
plotCytoExprs(x, ...)
}
\arguments{
\item{x}{object of class \code{\link[flowCore:flowFrame-class]{flowFrame}},
\code{\link[flowCore:flowSet-class]{flowSet}},
\code{\link[flowWorkspace:GatingHierarchy-class]{GatingHierarchy}} or
\code{\link[flowWorkspace:GatingSet-class]{GatingSet}}.}
\item{...}{additional method-specific arguments.}
}
\description{
Plot Population Density Distribution in All Channels
}
\seealso{
\code{\link{plotCytoExprs,flowFrame-method}}
\code{\link{plotCytoExprs,flowSet-method}}
\code{\link{plotCytoExprs,GatingHierarchy-method}}
\code{\link{plotCytoExprs,GatingSet-method}}
}
\author{
Dillon Hammill (Dillon.Hammill@anu.edu.au)
}
|
67b6de8ba99ad876fa81d13972de119cb5015976
|
fbc5705f3a94f34e6ca7b9c2b9d724bf2d292a26
|
/DCamp/Intermediate R/Blending logical sum.R
|
eeb213c7c80ab108cf67dc75d30d2dcfefc517a7
|
[] |
no_license
|
shinichimatsuda/R_Training
|
1b766d9f5dfbd73490997ae70a9c25e9affdf2f2
|
df9b30f2ff0886d1b6fa0ad6f3db71e018b7c24d
|
refs/heads/master
| 2020-12-24T20:52:10.679977
| 2018-12-14T15:20:15
| 2018-12-14T15:20:15
| 58,867,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
r
|
Blending logical sum.R
|
# li_df is pre-loaded in your workspace
print(li_df)
# Select the second column, named day2, from li_df: second
second <- li_df$day2
# Build a logical vector, TRUE if value in second is extreme: extremes
extremes <- (second > 25 | second < 5)
# Count the number of TRUEs in extremes
sum(extremes)
# Solve it with a one-liner
print(sum(extremes))
|
6cb8560a65a78216e9d7b92b408451c85a2d32e0
|
a3274d9ab469c12c8eea5fa94b3447cc74579b76
|
/tests/testthat/test_space_time_disagg_simple.R
|
9abe39af6c1f9e3fb18bba5cde0c000e4c77d99e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
pankajcivil/knnstdisagg
|
1ca8277a9f8d7ed8ee10e0dd9c2930aa39bce2c1
|
bf66e006855abad8c56df2f1819821690451537a
|
refs/heads/master
| 2020-05-04T14:43:03.974748
| 2019-01-03T00:31:40
| 2019-01-03T00:34:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,284
|
r
|
test_space_time_disagg_simple.R
|
# small scale check -----------------------------------
# small scale, but full disagg with current data, and for a subset of the gages
# disagg based on total flow for bluff, green river ut, and cisco, and int for
# mead
library(xts)
context("small, but full scale test of `knn_space_time_disagg()`")
load(file = "nf_test_data.rda")
index_flow <- nf_index_flow
mon_flow <- nf_mon_flow
lf <- cbind(2019:2021, c(9000000, 15000000, 12500000))
nsim <- 5
ym <- zoo::as.yearmon("2019-01") + 0:35/12
ym <- paste(format(ym, "%Y"), format(ym, "%m"), sep = "-")
# ** check specifying index years, and make sure values match exactly
# ** check specifiying 1, and no sf_sites
setup(dir.create("tmp_disagg"))
teardown(unlink("tmp_disagg", recursive = TRUE))
ann_sum <- function(x)
{
do.call(
cbind,
lapply(
seq_len(ncol(x)),
function(xx) apply(matrix(x[,xx], ncol = 12, byrow = TRUE), 1, sum)
)
)
}
# test output ---------------------------
test_that("`knn_space_time_disagg()` output is properly created for nsim = 5", {
expect_is(
tmp <- knn_space_time_disagg(
lf,
index_flow,
mon_flow,
sf_sites = 1:20,
nsim = nsim,
ofolder = "tmp_disagg"
),
"knnst"
)
for (i in seq_len(nsim)) {
f1 <- file.path("tmp_disagg", paste0("disagg_flow_", i, ".csv"))
expect_true(file.exists(f1))
t1 <- read.csv(f1)
expect_identical(dim(t1), as.integer(c(36, 29)))
# all 5 files should not be the same at the monthly level
j <- ifelse(i == nsim, 1, i + 1)
expect_false(
identical(knnst_get_disagg_data(tmp, i), knnst_get_disagg_data(tmp, j)),
info = paste(i, "compared to", j)
)
# but they should all sum to the same annual value for lees ferry (not LB)
t1 <- knnst_get_disagg_data(tmp, i)
t1 <- ann_sum(t1)
t2 <- knnst_get_disagg_data(tmp, j)
t2 <- ann_sum(t2)
expect_equal(
apply(t1[,1:20], 1, sum),
apply(t2[,1:20], 1, sum),
info = paste(i, "compared to", j)
)
# and LB should match the natural flow data exactly
lb <- rbind(
as.matrix(
mon_flow[as.character(tmp$disagg_sims[[i]]$index_years[1]), 21:29]
),
as.matrix(
mon_flow[as.character(tmp$disagg_sims[[i]]$index_years[2]), 21:29]
),
as.matrix(
mon_flow[as.character(tmp$disagg_sims[[i]]$index_years[3]), 21:29]
)
)
dimnames(lb) <- NULL
rownames(lb) <- ym
expect_equal(knnst_get_disagg_data(tmp, i)[,21:29], lb)
}
# check index_years
index_out <- as.matrix(read.csv(file.path("tmp_disagg", "index_years.csv")))
expect_identical(dim(index_out), as.integer(c(3, nsim)))
expect_true(!anyNA(index_out))
expect_true(!anyNA(knnst_index_years(tmp)))
expect_true(all(index_out %in% index_flow[,1]))
expect_equal(dim(knnst_index_years(tmp)), c(nrow(lf), nsim))
# sim
expect_equal(knnst_nsim(tmp), nsim)
expect_equal(expect_output(print(tmp)), tmp)
})
ind_yrs <- cbind(c(2000, 1906, 1936), c(1999, 1976, 2010), c(2000, 1909, 1954))
nsim <- 3
# specified index_years ---------------------------
test_that("`knn_space_time_disagg()` works for index years for nsim != 1", {
expect_is(
expect_message(tmp <- knn_space_time_disagg(
lf,
index_flow,
mon_flow,
sf_sites = 1:20,
nsim = nsim,
index_years = ind_yrs
)),
"knnst"
)
expect_equal(knnst_index_years(tmp), ind_yrs)
expect_equal(dim(knnst_index_years(tmp)), c(nrow(lf), nsim))
# sim
expect_equal(knnst_nsim(tmp), nsim)
# print
expect_equal(expect_output(print(tmp)), tmp)
for (i in seq_len(nsim)) {
# all sims should not be the same at the monthly level
j <- ifelse(i == nsim, 1, i + 1)
expect_false(
identical(knnst_get_disagg_data(tmp, i), knnst_get_disagg_data(tmp, j)),
info = paste(i, "compared to", j)
)
# but they should all sum to the same annual value for lees ferry (not LB)
t1 <- knnst_get_disagg_data(tmp, i)
t1 <- ann_sum(t1)
t2 <- knnst_get_disagg_data(tmp, j)
t2 <- ann_sum(t2)
expect_equal(
apply(t1[,1:20], 1, sum),
apply(t2[,1:20], 1, sum),
info = paste(i, "compared to", j)
)
# and LB should match the natural flow data exactly
lb <- rbind(
as.matrix(mon_flow[as.character(ind_yrs[1, i]), 21:29]),
as.matrix(mon_flow[as.character(ind_yrs[2, i]), 21:29]),
as.matrix(mon_flow[as.character(ind_yrs[3, i]), 21:29])
)
dimnames(lb) <- NULL
rownames(lb) <- ym
expect_equal(knnst_get_disagg_data(tmp, i)[,21:29], lb)
}
expect_equivalent(
knnst_get_disagg_data(tmp, 1)[1:12, 15] /
sum(knnst_get_disagg_data(tmp, 1)[1:12, 15]),
as.vector(mon_flow[as.character(ind_yrs[1,1]), 15] /
sum(mon_flow[as.character(ind_yrs[1,1]), 15]))
)
expect_equivalent(
knnst_get_disagg_data(tmp, 2)[25:36, 18] /
sum(knnst_get_disagg_data(tmp, 2)[25:36, 18]),
as.vector(mon_flow[as.character(ind_yrs[3, 2]), 18] /
sum(mon_flow[as.character(ind_yrs[3, 2]), 18]))
)
expect_equivalent(
knnst_get_disagg_data(tmp, 3)[13:24, 1] /
sum(knnst_get_disagg_data(tmp, 3)[13:24, 1]),
as.vector(mon_flow[as.character(ind_yrs[2, 3]), 1] /
sum(mon_flow[as.character(ind_yrs[2, 3]), 1]))
)
})
|
4788f350bbad0f1ce93c24974462eb18f7f18866
|
e6b810dd97a74b96e814c61467f56818c6459ab0
|
/man/bind_iterations.Rd
|
82d8bc202c823218c8a6f2a1c5de17d4afd8f935
|
[
"MIT"
] |
permissive
|
poissonconsulting/universals
|
32800e3dc2ebb8c8c27cad5fe6d6633fd594177f
|
152629f241f50f690d51a7770a81e32e8c62815c
|
refs/heads/main
| 2022-10-20T13:49:00.645542
| 2022-10-15T12:31:36
| 2022-10-15T12:31:36
| 234,643,049
| 4
| 1
|
NOASSERTION
| 2022-06-17T21:41:06
| 2020-01-17T21:53:16
|
R
|
UTF-8
|
R
| false
| true
| 629
|
rd
|
bind_iterations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bind-iterations.R
\name{bind_iterations}
\alias{bind_iterations}
\title{Bind Iterations}
\usage{
bind_iterations(x, x2, ...)
}
\arguments{
\item{x}{An object.}
\item{x2}{A second object.}
\item{...}{Other arguments passed to methods.}
}
\value{
The combined object.
}
\description{
Combines two MCMC objects (with the same parameters and chains) by iterations.
}
\seealso{
Other MCMC manipulations:
\code{\link{bind_chains}()},
\code{\link{collapse_chains}()},
\code{\link{estimates}()},
\code{\link{split_chains}()}
}
\concept{MCMC manipulations}
|
11a4a705663e4e0bdef1043b1f6de6bef33c638f
|
4c91d2feb0697353aab069ef2d331f35ce19bcb2
|
/R/ThreeDrug1.R
|
4d45cd1acfc01a2796f0ef86c8ffdadde7d0bda3
|
[] |
no_license
|
gzrrobert/ComStat
|
1d60a232ef9febb6a34c78945a8f8d771b4011e3
|
eb6738cf46b079c5fec43f082e831c19fda38cef
|
refs/heads/master
| 2020-04-03T11:57:11.596797
| 2018-12-06T02:00:40
| 2018-12-06T02:00:40
| 155,234,583
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52,293
|
r
|
ThreeDrug1.R
|
#' Identifying the linear/log-linear model of each dose-response curve
#'
#' This function utilized nls() function to model the HILL model of each relationship and substitute each relationship
#' with linear or log-linear relationship, it then designs a dose-mixture combination dataset for the study by using Uniform design.
#' @param location1 location of Drug 1's dataset on your hard disk
#' @param location2 location of Drug 2's dataset on your hard disk
#' @param location3 location of Drug 3's dataset on your hard disk
#' @param c0 the smallest meaningful difference to be detected
#' @param r number of replications at each dose-mixture, and usually 3<=r<=8, default=3
#' @return The fitted models and method for three drugs,
#' the number of dose-mixtures for combination,
#' total sample size,
#' fitted plots for three drugs,
#' dose mixtures for experimental design
#' @examples
#' # Try this function with the following examples:
#' #(please be patient when the method is"logloglog"):
#' ThreeDrug_identify_design_1<- ThreeDrug_identify_design(
#' location1="C:/Users/58412/Desktop/SRA/Three drugs/data2/Drug1.txt",
#' location2="C:/Users/58412/Desktop/SRA/Three drugs/data2/Drug2.txt",
#' location3="C:/Users/58412/Desktop/SRA/Three drugs/data2/Drug3.txt",
#' c0=13, r=6)
#' ThreeDrug_identify_design_2<- ThreeDrug_identify_design(
#' location1="C:/Users/58412/Desktop/SRA/Three drugs/data1/ARAC03.txt",
#' location2="C:/Users/58412/Desktop/SRA/Three drugs/data1/SAHA03.txt",
#' location3="C:/Users/58412/Desktop/SRA/Three drugs/data1/VP1603.txt",
#' c0=15, r=6)
#' @export
#Econ=100
#HILL MODEL can be transformed into a linear regression model
#mlogD-mlogIC50=log[(Y-b)/(Econ-b)]
#Denote IC50 as IC_50
#viability range 20%-80% will be used in order to fit linear & log model
#data format: 1st column should be named as 'DOSE', 2nd column should be named as 'Viability'
#All missing data will be eliminated when using this program
#Attention: All datasets should be in TXT format
#' @import BB
#' @import nleqslv
#' @import ktsolve
#' @import drc
#Econ=100
#HILL MODEL can be transformed into a linear regression model
#mlogD-mlogIC50=log[(Y-b)/(Econ-b)]
#Denote IC50 as IC_50
#viability range 20%-80% will be used in order to fit linear & log model
#data format: 1st column should be named as 'Dose', 2nd column should be named as 'Viability'
#All missing data will be eliminated when using this program
#Attention: All datasets should be in TXT format
ThreeDrug_identify_design<- function (location1, location2, location3, c0, r) {
#location1: location of Drug 1's dataset on your hard disk
#Drug1: data set of Drug 1, column 1-- dose, column 2--response
#location2: location of Drug 2's dataset on your hard disk
#Drug2: data set of Drug 2, column 1-- dose, column 2--response
#location3: tion of Drug 3's dataset on your hard disk
#Drug3: data set of Drug 3, column 1-- dose, column 2--response
# c0: the smallest meaningful difference to be detected
# r: number of replications at each dose-mixture, and usually 3<=r<=5,sometimes can be 6
#All dataset should be in txt file
#1st Drug
data<-read.table(file = location1, header=T)
#if response value is larger than 100%, convert it to 100%
for (i in 1:length(data$Viability)) {
if (data[i,"Viability"]>100) {
data[i,"Viability"]<-100
}
}
#remove missing observations and reorder the data
data<-data[complete.cases(data), ]
data<-data[order(data[,1],decreasing=FALSE),]
Dose<-data$Dose
Viability<-data$Viability
#Intercept b is the minimum viability oberserved
b<-min(Viability)
#Reformulate the Hill model into a linear model
#Now the response is:
y<-matrix(NA,length(Viability),1)
Viability.matrix<-as.matrix(Viability)
for (i in 1:length(Viability.matrix)) {
if (Viability.matrix[i,1]>b) {
y[i,1]<-log((Viability.matrix[i,1]-b)/(100-b))
} else {
break
}
}
y<-y[!is.na(y),]
y<-data.frame((y))
data.length<-dim(y)[1]
Dose<-as.matrix(Dose)
Dose<-Dose[1:data.length,]
Dose<-as.numeric(Dose)
data<-data[1:data.length,]
data.transform<-as.data.frame(cbind(Dose,y$X.y.))
#fit the Hill model
#Estimate the unknown coefficients in the HILL model
model<-nls(V2~m*log(Dose)+m*log(IC_50),start=list(m=-20, IC_50=1.1),data=data.transform,trace=F)
#the coefficients of optimized model
coef<-summary(model)$coefficients
m<-coef[1,1]
IC_50<-coef[2,1]
#Build the optimized Hill Model
#y=(((Econ-b)(Dose/IC_50)^m)/(1+(Dose/IC_50)^m))+b
#And calculate the fitted response value
Viability_hat<-matrix(NA,length(Dose),1)
Econ=100
for (i in 1:length(Dose)) {
Viability_hat[i]<-(((Econ-b)*((data[i,1]/IC_50)**m))/(1+(data[i,1]/IC_50)**m))+b
}
Viability_hat<-data.frame(Dose,Viability_hat)
#Create a dataset with residuals that can be used to fit linear or loglinear model
#Viability range from 20% to 80% will be used;
residual_data_2080_range<-data[data$Viability>=20 & data$Viability<=80,]
residual_data_2080_range_dose<-residual_data_2080_range$Dose
residual<-as.data.frame(Viability_hat$Viability_hat)-as.matrix(data$Viability)
residual<-data.frame(data$Dose,residual)
residual_data<-residual[max(residual_data_2080_range_dose)>=residual$data.Dose & residual$data.Dose>=min(residual_data_2080_range_dose),]
#Decide either Linear or Log model suffice
#1.fit the data with linear model
#Note: Only the response range from 20% to 80% in the dataset with residuals will be used to fit both linear and log model
linear<-lm(Viability_hat.Viability_hat~data.Dose,data=residual_data)
if (summary(linear)$coefficients[2,4]>0.05) {
print("Warning: the linear model may not suffice")
}
#Calculate fitted responses under linear model
linear_hat<-matrix(NA,length(residual_data$data.Dose),1)
for (i in 1:length(residual_data$data.Dose)){
linear_hat[i]<-(summary(linear)$coefficients[1,1])+(summary(linear)$coefficients[2,1])*residual_data[i,1]
}
#2.fit the data with log-linear model
log<-lm(Viability_hat.Viability_hat~log(data.Dose),data=residual_data)
if (summary(log)$coefficients[2,4]>0.05) {
print("Warning: the log model may not suffice")
}
#Calculate fitted responses under log model
log_hat<-matrix(NA,length(residual_data$data.Dose),1)
for (i in 1:length(residual_data$data.Dose)){
log_hat[i]<-(summary(log)$coefficients[1,1])+(summary(log)$coefficients[2,1])*log(residual_data[i,1])
}
#The fitted response from Hill Model
#range Viability 20%-80%
hill_hat<-Viability_hat[max(residual_data_2080_range_dose)>=residual$data.Dose & residual$data.Dose>=min(residual_data_2080_range_dose),]
hill_hat<-as.matrix(hill_hat[ ,2])
#linear v.s Hill
diff_linear_hill<-matrix(NA,length(linear_hat),1)
for (i in 1:length(linear_hat)) {
diff_linear_hill[i]<-(linear_hat[i]-hill_hat[i])**2
}
#Sum of square of Difference between linear model and Hill model
SSD_linear_Hill<-0
for (i in 1:length(diff_linear_hill)) {
SSD_linear_Hill<-diff_linear_hill[i]+SSD_linear_Hill
}
#log v.s Hill
diff_log_hill<-matrix(NA,length(log_hat),1)
for (i in 1:length(log_hat)) {
diff_log_hill[i]<-(log_hat[i]-hill_hat[i])**2
}
#Sum of square of Difference between linear model and Hill model
SSD_log_Hill<-0
for (i in 1:length(diff_log_hill)) {
SSD_log_Hill<-diff_log_hill[i]+SSD_log_Hill
}
#Compare the linear model with log model
#Can use the following ifelse() function as well.
#ifelse(SSD_log_Hill>SSD_linear_Hill, print("The Linear Model should suffice"), ifelse(SSD_log_Hill>SSD_linear_Hill, print("The Log Model should suffice"), print("Either Log model or lieanr model suffices")))
#2nd Drug
data2<-read.table(file = location2, header=T)
#if response value is larger than 100%, convert it to 100%
for (i in 1:length(data2$Viability)) {
if (data2[i,"Viability"]>100) {
data2[i,"Viability"]<-100
}
}
#remove missing values and reorder the data
data2<-data2[complete.cases(data2), ]
data2<-data2[order(data2[,1],decreasing=FALSE),]
Dose2<-data2$Dose
Viability2<-data2$Viability
#Intercept b is the minimum viability oberserved
b2<-min(Viability2)
#Reformulate the Hill model into a linear model
#The response is now:
y2<-matrix(NA,length(Viability2),1)
Viability.matrix2<-as.matrix(Viability2)
for (i in 1:length(Viability.matrix2)) {
if (Viability.matrix2[i,1]>b2) {
y2[i,1]<-log((Viability.matrix2[i,1]-b2)/(100-b2))
} else {
break
}
}
y2<-y2[!is.na(y2),]
y2<-data.frame((y2))
data.length2<-dim(y2)[1]
Dose2<-as.matrix(Dose2)
Dose2<-Dose2[1:data.length2,]
Dose2<-as.numeric(Dose2)
data2<-data2[1:data.length2,]
data.transform2<-as.data.frame(cbind(Dose2,y2$X.y2.))
#fit the Hill model
#Estimate the coefficients in the HILL model
model2<-nls(V2~m*log(Dose2)+m*log(IC_50),start=list(m=-20, IC_50=1.1),data=data.transform2,trace=F)
#the coefficients of optimized model
coef2<-summary(model2)$coefficients
m2<-coef2[1,1]
IC_50_2<-coef2[2,1]
#Build the optimized Hill Model
#y=(((Econ-b)(Dose/IC_50)^m)/(1+(Dose/IC_50)^m))+b
#And calculated the fitted response
Viability_hat2<-matrix(NA,length(Dose2),1)
Econ=100
for (i in 1:length(Dose2)) {
Viability_hat2[i]<-(((Econ-b2)*((data2[i,1]/IC_50_2)**m2))/(1+(data2[i,1]/IC_50_2)**m2))+b2
}
Viability_hat2<-data.frame(Dose2,Viability_hat2)
#Create a dataset with residuals
#To be used to fit linear|log model
#Viability range from 20% to 80% will be used;
residual_data_2080_range2<-data2[data2$Viability>=20 & data2$Viability<=80,]
residual_data_2080_range_dose2<-residual_data_2080_range2$Dose
residual2<-as.data.frame(Viability_hat2$Viability_hat)-as.matrix(data2$Viability)
residual2<-data.frame(data2$Dose,residual2)
residual_data2<-residual2[max(residual_data_2080_range_dose2)>=residual2$data2.Dose & residual2$data2.Dose>=min(residual_data_2080_range_dose2),]
#Decide either Linear or Log model suffice
#1.fit the data with linear model
#Note: Only the response range from 20% to 80% in the dataset with residuals will be used to fit both linear and log model
linear2<-lm(Viability_hat2.Viability_hat~data2.Dose,data=residual_data2)
if (summary(linear2)$coefficients[2,4]>0.05) {
print("Warning: the linear model may not suffice")
}
#Calculate fitted responses under linear model
linear_hat2<-matrix(NA,length(residual_data2$data2.Dose),1)
for (i in 1:length(residual_data2$data2.Dose)){
linear_hat2[i]<-(summary(linear2)$coefficients[1,1])+(summary(linear2)$coefficients[2,1])*residual_data2[i,1]
}
#2.fit the data with log-linear model
log2<-lm(Viability_hat2.Viability_hat~log(data2.Dose),data=residual_data2)
if (summary(log2)$coefficients[2,4]>0.05) {
print("Warning: the log model may not suffice")
}
#Calculate fitted responses under log model
log_hat2<-matrix(NA,length(residual_data2$data2.Dose),1)
for (i in 1:length(residual_data2$data2.Dose)){
log_hat2[i]<-(summary(log2)$coefficients[1,1])+(summary(log2)$coefficients[2,1])*log(residual_data2[i,1])
}
#The fitted response from Hill Model
#range Viability 20%-80%
hill_hat2<-Viability_hat2[max(residual_data_2080_range_dose2)>=residual2$data2.Dose & residual2$data2.Dose>=min(residual_data_2080_range_dose2),]
hill_hat2<-as.matrix(hill_hat2[ ,2])
#linear v.s Hill
diff_linear_hill2<-matrix(NA,length(linear_hat2),1)
for (i in 1:length(linear_hat2)) {
diff_linear_hill2[i]<-(linear_hat2[i]-hill_hat2[i])**2
}
#Sum of square of Difference between linear model and Hill model
SSD_linear_Hill2<-0
for (i in 1:length(diff_linear_hill2)) {
SSD_linear_Hill2<-diff_linear_hill2[i]+SSD_linear_Hill2
}
#log v.s Hill
diff_log_hill2<-matrix(NA,length(log_hat2),1)
for (i in 1:length(log_hat2)) {
diff_log_hill2[i]<-(log_hat2[i]-hill_hat2[i])**2
}
#Sum of square of Difference between linear model and Hill model
SSD_log_Hill2<-0
for (i in 1:length(diff_log_hill2)) {
SSD_log_Hill2<-diff_log_hill2[i]+SSD_log_Hill2
}
#Compare the linear model with log model
#Can use the following ifelse() function as well.
#ifelse(SSD_log_Hill>SSD_linear_Hill, print("The Linear Model should suffice"), ifelse(SSD_log_Hill>SSD_linear_Hill, print("The Log Model should suffice"), print("Either Log model or lieanr model suffices")))
####3rd Drug
data3<-read.table(file = location3, header=T)
#if response value is larger than 100%, convert it to 100%
for (i in 1:length(data3$Viability)) {
if (data3[i,2]>100) {
data3[i,2]<-100
}
}
#remove missing values and reorder the data
data3<-data3[complete.cases(data3), ]
data3<-data3[order(data3[,1],decreasing=FALSE),]
Dose3<-data3$Dose
Viability3<-data3$Viability
#Intercept b is the minimum viability oberserved
b3<-min(Viability3)
#Reformulate the Hill model into a linear model
#The response is now:
y3<-matrix(NA,length(Viability3),1)
Viability.matrix3<-as.matrix(Viability3)
for (i in 1:length(Viability.matrix3)) {
if (Viability.matrix3[i,1]>b3) {
y3[i,1]<-log((Viability.matrix3[i,1]-b3)/(100-b3))
} else {
break
}
}
y3<-y3[!is.na(y3),]
y3<-data.frame((y3))
data.length3<-dim(y3)[1]
Dose3<-as.matrix(Dose3)
Dose3<-Dose3[1:data.length3,]
Dose3<-as.numeric(Dose3)
data3<-data3[1:data.length3,]
data.transform3<-as.data.frame(cbind(Dose3,y3$X.y3.))
#fit the Hill model
#Estimate the coefficients in the HILL model
model3<-nls(V2~m*log(Dose3)+m*log(IC_50),start=list(m=-20, IC_50=1.1),data=data.transform3,trace=F)
#the coefficients of optimized model
coef3<-summary(model3)$coefficients
m3<-coef3[1,1]
IC_50_3<-coef3[2,1]
#Build the optimized Hill Model
#y=(((Econ-b)(Dose/IC_50)^m)/(1+(Dose/IC_50)^m))+b
#And calculated the fitted response
Viability_hat3<-matrix(NA,length(Dose3),1)
Econ=100
for (i in 1:length(Dose3)) {
Viability_hat3[i]<-(((Econ-b3)*((data3[i,1]/IC_50_3)**m3))/(1+(data3[i,1]/IC_50_3)**m3))+b3
}
Viability_hat3<-data.frame(Dose3,Viability_hat3)
#Create a dataset with residuals
#To be used to fit linear|log model
#Viability range from 20% to 80% will be used;
residual_data_2080_range3<-data3[data3[,2]>=20 & data3[,2]<=80,]
residual_data_2080_range_dose3<-residual_data_2080_range3[,1]
residual3<-as.data.frame(Viability_hat3$Viability_hat)-as.matrix(data3$Viability)
residual3<-data.frame(data3$Dose,residual3)
residual_data3<-residual3[max(residual_data_2080_range_dose3)>=residual3$data3.Dose & residual3$data3.Dose>=min(residual_data_2080_range_dose3),]
#Decide either Linear or Log model suffice
#1.fit the data with linear model
#Note: Only the response range from 20% to 80% in the dataset with residuals will be used to fit both linear and log model
linear3<-lm(Viability_hat3.Viability_hat~data3.Dose,data=residual_data3)
if (summary(linear3)$coefficients[2,4]>0.05) {
print("Warning: the linear model may not suffice")
}
#Calculate fitted responses under linear model
linear_hat3<-matrix(NA,length(residual_data3$data3.Dose),1)
for (i in 1:length(residual_data3$data3.Dose)){
linear_hat3[i]<-(summary(linear3)$coefficients[1,1])+(summary(linear3)$coefficients[2,1])*residual_data3[i,1]
}
#2.fit the data with log-linear model
log3<-lm(Viability_hat3.Viability_hat~log(data3.Dose),data=residual_data3)
if (summary(log3)$coefficients[2,4]>0.05) {
print("Warning: the log model may not suffice")
}
#Calculate fitted responses under log model
log_hat3<-matrix(NA,length(residual_data3$data3.Dose),1)
for (i in 1:length(residual_data3$data3.Dose)){
log_hat3[i]<-(summary(log3)$coefficients[1,1])+(summary(log3)$coefficients[2,1])*log(residual_data3[i,1])
}
#The fitted response from Hill Model
#range Viability 20%-80%
hill_hat3<-Viability_hat3[max(residual_data_2080_range_dose3)>=residual3$data3.Dose & residual3$data3.Dose>=min(residual_data_2080_range_dose3),]
hill_hat3<-as.matrix(hill_hat3[ ,2])
#linear v.s Hill
diff_linear_hill3<-matrix(NA,length(linear_hat3),1)
for (i in 1:length(linear_hat3)) {
diff_linear_hill3[i]<-(linear_hat3[i]-hill_hat3[i])**2
}
#Sum of square of Difference between linear model and Hill model
SSD_linear_Hill3<-0
for (i in 1:length(diff_linear_hill3)) {
SSD_linear_Hill3<-diff_linear_hill3[i]+SSD_linear_Hill3
}
#log v.s Hill
diff_log_hill3<-matrix(NA,length(log_hat3),1)
for (i in 1:length(log_hat3)) {
diff_log_hill3[i]<-(log_hat3[i]-hill_hat3[i])**2
}
#Sum of square of Difference between linear model and Hill model
SSD_log_Hill3<-0
for (i in 1:length(diff_log_hill3)) {
SSD_log_Hill3<-diff_log_hill3[i]+SSD_log_Hill3
}
#Compare the linear model with log model
#Can use the following ifelse() function as well.
#ifelse(SSD_log_Hill>SSD_linear_Hill, print("The Linear Model should suffice"), ifelse(SSD_log_Hill>SSD_linear_Hill, print("The Log Model should suffice"), print("Either Log model or lieanr model suffices")))
#Compare the linear model with log model
#identify the model for each drug-response relationship
#1st drug
if (SSD_log_Hill>SSD_linear_Hill) {
print("The Linear Model should suffice for Drug 1")
model_drug1="linear"
drug1_a1<-summary(linear)$coefficients[1,1]
drug1_b1<-summary(linear)$coefficients[2,1]
} else if (SSD_log_Hill<SSD_linear_Hill) {
print("The Log Model should suffice for Drug 1")
model_drug1="log"
drug1_a1<-summary(log)$coefficients[1,1]
drug1_b1<-summary(log)$coefficients[2,1]
} else {
print("Either Log model or lieanr model suffices for Drug 1")
model_drug1="linear"
}
#2nd drug
if (SSD_log_Hill2>SSD_linear_Hill2) {
print("The Linear Model should suffice for Drug 2")
model_drug2<-"linear"
drug2_a2<-summary(linear2)$coefficients[1,1]
drug2_b2<-summary(linear2)$coefficients[2,1]
} else if (SSD_log_Hill2<SSD_linear_Hill2) {
print("The Log Model should suffice for Drug 2")
model_drug2<-"log"
drug2_a2<-summary(log2)$coefficients[1,1]
drug2_b2<-summary(log2)$coefficients[2,1]
} else {
print("Either Log model or lieanr model suffices for Drug 2")
model_drug2<-"linear"
}
#3rd drug
if (SSD_log_Hill3>SSD_linear_Hill3) {
print("The Linear Model should suffice for Drug 3")
model_drug3<-"linear"
drug3_a3<-summary(linear3)$coefficients[1,1]
drug3_b3<-summary(linear3)$coefficients[2,1]
} else if (SSD_log_Hill3<SSD_linear_Hill3) {
print("The Log Model should suffice for Drug 3")
model_drug3<-"log"
drug3_a3<-summary(log3)$coefficients[1,1]
drug3_b3<-summary(log3)$coefficients[2,1]
} else {
print("Either Log model or lieanr model suffices for Drug 3")
model_drug3<-"linear"
}
#Create new variables to be further used in the following if-else function
Drug1<-data
Drug2<-data2
Drug3<-data3
# method: specifying the fitted models for the drug data;
#"liloglog"
#"lilili"
#"lililog"
#"logloglog"
if (model_drug1=="linear" & model_drug2=="linear" & model_drug3=="linear") {
# method="lilili"
# Drug1: y=a1+b1*x, linear response
# Drug2: y=a2+b2*x, linear response
# Drug3: y=a3+b3*x, linear response
method<-"lilili"
coef1 <- as.numeric( lm( Drug1[,2] ~ Drug1[,1] )$coeff )
coef2 <- as.numeric( lm( Drug2[,2] ~ Drug2[,1] )$coeff )
coef3 <- as.numeric( lm( Drug3[,2] ~ Drug3[,1] )$coeff )
a1<-coef1[1]
b1<-coef1[2]
a2<-coef2[1]
b2<-coef2[2]
a3<-coef3[1]
b3<-coef3[2]
print(paste0("Method should be", " ", method))
} else if (model_drug1=="linear" & model_drug2=="log" & model_drug3=="log") {
# method="liloglog"
# Drug1: y=a1+b1*x, linear response
# Drug2: y=a2+b2*log(x), log response
# Drug3: y=a3+b3*log(x), log response
method<-"liloglog"
coef1 <- as.numeric( lm( Drug1[,2] ~ Drug1[,1] )$coeff )
coef2 <- as.numeric( lm( Drug2[,2] ~ log(Drug2[,1] ))$coeff )
coef3 <- as.numeric( lm( Drug3[,2] ~ log(Drug3[,1] ))$coeff )
a1<-coef1[1]
b1<-coef1[2]
a2<-coef2[1]
b2<-coef2[2]
a3<-coef3[1]
b3<-coef3[2]
print(paste0("Method should be", " ", method))
} else if (model_drug1=="log" & model_drug2=="linear" & model_drug3=="log") {
# method="loglilog"
# Drug1: y=a1+b1*log(x), log response
# Drug2: y=a2+b2*x, linear response
# Drug3: y=a3+b3*log(x), log response
method<-"loglilog"
coef1 <- as.numeric( lm( Drug2[,2] ~ log(Drug2[,1] ))$coeff )
coef2 <- as.numeric( lm( Drug1[,2] ~ Drug1[,1] )$coeff )
coef3 <- as.numeric( lm( Drug3[,2] ~ log(Drug3[,1] ))$coeff )
a1<-coef2[1]
b1<-coef2[2]
a2<-coef1[1]
b2<-coef1[2]
a3<-coef3[1]
b3<-coef3[2]
print(paste0("Method should be", " ", method))
} else if (model_drug1=="log" & model_drug2=="log" & model_drug3=="linear") {
# method="loglogli"
# Drug1: y=a1+b1*log(x), log response
# Drug2: y=a2+b2*log(x), log response
# Drug3: y=a3+b3*x, linear response
method<-"loglogli"
coef1 <- as.numeric( lm( Drug1[,2] ~ log(Drug1[,1] ))$coeff )
coef2 <- as.numeric( lm( Drug2[,2] ~ log(Drug2[,1] ))$coeff )
coef3 <- as.numeric( lm( Drug3[,2] ~ Drug3[,1] )$coeff )
a1<-coef3[1]
b1<-coef3[2]
a2<-coef1[1]
b2<-coef1[2]
a3<-coef2[1]
b3<-coef2[2]
print(paste0("Method should be", " ", method))
} else if (model_drug1=="linear" & model_drug2=="linear" & model_drug3=="log") {
# method="lililog"
# Drug1: y=a1+b1*x, linear response
# Drug2: y=a2+b2*x, linear response
# Drug3: y=a3+b3*log(x), log response
method<-"lililog"
coef1 <- as.numeric( lm( Drug1[,2] ~ Drug1[,1] )$coeff )
coef2 <- as.numeric( lm( Drug2[,2] ~ Drug2[,1] )$coeff )
coef3 <- as.numeric( lm( Drug3[,2] ~ log(Drug3[,1] ))$coeff )
a1<-coef1[1]
b1<-coef1[2]
a2<-coef2[1]
b2<-coef2[2]
a3<-coef3[1]
b3<-coef3[2]
print(paste0("Method should be", " ", method))
} else if (model_drug1=="linear" & model_drug2=="log" & model_drug3=="linear") {
# method="lilogli"
# Drug1: y=a1+b1*x, linear response
# Drug2: y=a2+b2*log(x), log response
# Drug3: y=a3+b3*x, linear response
method<-"lilogli"
coef1 <- as.numeric( lm( Drug1[,2] ~ Drug1[,1] )$coeff )
coef2 <- as.numeric( lm( Drug2[,2] ~ log(Drug2[,1] ))$coeff )
coef3 <- as.numeric( lm( Drug3[,2] ~ Drug3[,1] )$coeff )
a1<-coef1[1]
b1<-coef1[2]
a2<-coef3[1]
b2<-coef3[2]
a3<-coef2[1]
b3<-coef2[2]
print(paste0("Method should be", " ", method))
} else if (model_drug1=="log" & model_drug2=="linear" & model_drug3=="linear") {
# method="loglili"
# Drug1: y=a1+b1*log(x), log response
# Drug2: y=a2+b2*x, linear response
# Drug3: y=a3+b3*x, linear response
method<-"loglili"
coef1 <- as.numeric( lm( Drug1[,2] ~ log(Drug1[,1] ))$coeff )
coef2 <- as.numeric( lm( Drug2[,2] ~ Drug2[,1] )$coeff )
coef3 <- as.numeric( lm( Drug3[,2] ~ Drug3[,1] )$coeff )
a1<-coef2[1]
b1<-coef2[2]
a2<-coef3[1]
b2<-coef3[2]
a3<-coef1[1]
b3<-coef1[2]
print(paste0("Method should be", " ", method))
} else if (model_drug1=="log" & model_drug2=="log" & model_drug3=="log") {
# method="logloglog"
# Drug1: y=a1+b1*log(x), log response
# Drug2: y=a2+b2*log(x), log response
# Drug3: y=a3+b3*log(x), log response
method<-"logloglog"
coef1 <- as.numeric( lm( Drug1[,2] ~ log(Drug1[,1] ))$coeff )
coef2 <- as.numeric( lm( Drug2[,2] ~ log(Drug2[,1] ))$coeff )
coef3 <- as.numeric( lm( Drug3[,2] ~ log(Drug3[,1] ))$coeff )
a1<-coef1[1]
b1<-coef1[2]
a2<-coef2[1]
b2<-coef2[2]
a3<-coef3[1]
b3<-coef3[2]
print(paste0("Method should be", " ", method))
}
#Create variables to be further used in SYN.design, SYN.test and SYN.analysis programs
method<-method
#stop("Please enter the correct location of your dataset on the disk. Note:Use left slash sign after every folder names.")
#visual check of the fitness: Drug1
par(mfrow=c(1,3))
fit.ll <- drm(Viability~Dose, data=data, fct=LL.5(), type="continuous")
plot(fit.ll,type = "none",col='red',xlab = "Dose",ylab = "Viability(%)",main = "Drug 1")
points(x=data$Dose,y=data$Viability)
#Can use ggplot() to achieve a more smooth curve on the scattered plot
#visual check of the fitness: Drug2
fit.ll2 <- drm(Viability~Dose, data=data2, fct=LL.5(), type="continuous")
plot(fit.ll2,type = "none",col='red',xlab = "Dose",ylab = "Viability(%)",main = "Drug 2")
points(x=data2$Dose,y=data2$Viability)
#visual check of the fitness: Drug3
#visual check of the fitness: Drug3
fit.ll3 <- drm(Viability~Dose, data=data3, fct=LL.5(), type="continuous")
plot(fit.ll3,type = "none",col='red',xlab = "Dose",ylab = "Viability(%)",main = "Drug 3")
points(x=data3$Dose,y=data3$Viability)
#Try this ThreeDrugIdentify function with the following example:
#ThreeDrugIdentify<-ThreeDrugIdentify(location1 = "ARAC03.txt",location2 = "SAHA03.txt",location3 = "VP1603.txt")
SY.design3<-function (c0, r,method)
{
if ( method != "liloglog" & method != "loglilog" & method != "loglogli" & method != "lilili" & method != "loglili" & method != "lilogli" & method != "lililog" & method != "logloglog") {
stop( "method = unknown regression model." )
}
# function name: SY.design3(c0, r,method="lilili",UDFactor3=location)
# data: from the single-dose experiments of three drugs
# cited: UDFactor3
# output: dose mixtures for combination experiments
# parameters:
# a1 and b1: intercept and slope of drug1
# a2 and b2: intercept and slope of drug2
# a3 and b3: intercept and slope of drug3
##### b3 <= b2 <= b1 ????
# si2: variance estimated from the pooled data of the single experiments
# m: number of dose-mixtures for combination study
# r: number of replications at each dose-mixture
# c0: the smallest meaningful difference to be detected
# totla sample size: m*r
# power: 80%
# significance level: 5%
# Dis: the central L2-discrepancy of m mixtures
# h1: to choose the low dose boundary from drug1: eg. 80%
# h2: to choose the high dose boundary from drug1: eg. 20%
##--- input parameters of the single dose-responses
si2 <- var(c(Drug1[, 2], Drug2[, 2], Drug3[,2]))
a1 <- a1
b1 <- b1
a2 <- a2
b2 <- b2
a3 <- a3
b3 <- b3
#si2 <- 988.422
#a1 <- 4.8
#b1 <- -12.76
#a2 <- 41.52
#b2 <- -13.02
#a3 <- 54.55
#b3 <- -23.98
h1<-80
h2<-20
rho0 <- exp((a2 - a1)/b1)
rho1 <- exp((a3 - a1)/b1)
##--- calculate the sample size for uniform design
#m is the number of mixtures
#k is the number of drugs, in this two drug combination case, k=3
#3<=r<=8
#3<=m<=30
#k is automatically set to 3 since this is a three-drug combination study
#d <- c0^2/si2
#delta0: non-central parameter of the F-distribution
#delta0 <- (m * (r+1)) * d
#To obtain m, Step 1: Obtain the quantile under F-distribution
#Note: alpha (significance level)=0.05
#Substitute quantile with the following equation, otherwise an error occurs since m is the unknown parameter
#and has to be estimated
#quantile<-qf(0.95,(m-2),(m*r))
#Step 2: Calculate the non-central parameter as delta0
#Substitute delta0 with the following equation, otherwise an error occurs since m is the unknown parameter
#and has to be estimated
#Since d <- c0^2/si2 and delta0 <- (m * (r+1)) * d, then delta0 can be written into:
#delta0 <- (m * (r+1)) * (c0^2/si2)
#Step 3: Use pre-determined power (80%) to determine m
#Constructing essential elements to be used in 'ktsolve' function
r<-r
yfunc <- function(x) {
y <- vector()
y[1] <- exp(-(m * (r+1)) * (c0^2/si2)/2)*pf(qf(0.95,(m-2),(m*r)),m-2,m*r)+
exp(-(m * (r+1)) * (c0^2/si2)/2)*((m * (r+1)) * (c0^2/si2)/2)*pf(((m-2)/m)*qf(0.95,(m-2),(m*r)), m, m*r)+
((exp(-(m * (r+1)) * (c0^2/si2)/2)*((m * (r+1)) * (c0^2/si2)/2)^2)/2)*pf(((m-2)/(m+2))*qf(0.95,(m-2),(m*r)), (m+2), m*r)-0.2
}
known=list(r=r,c0=c0,si2=si2)
guess=list(m=3)
#ktsolve function
solvm <- ktsolve(yfunc, known=known, guess=guess, tool=c("BB","nleqslv"), show=TRUE)
if(ceiling(solvm$results$par)>30 | ceiling(solvm$results$par)<3) { m<-NULL
print("Please choose a larger r (Iteration) and rerun this program")} else
{m<-ceiling(solvm$results$par)}
##--- find the U-type matrix
UDFactor3<-structure(.Data = list(c(0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.,
17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30.)
, c(4., 3., 1., 4., 2., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.039476, 1., 4., 3., 2., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(5., 2., 5., 1., 4., 3., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.026331, 4., 2., 1., 5., 3., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(6., 2., 4., 6., 1., 3., 5., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.018637, 3., 6., 2., 5., 1., 4., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(7., 5., 2., 7., 3., 6., 1., 4., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.01425, 4., 2., 6., 7., 1., 5., 3., NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(8., 3., 7., 5., 1., 8., 4., 2., 6., NA, NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.0105, 4., 7., 1., 6., 3., 8., 2., 5., NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(9., 6., 2., 9., 3., 5., 7., 1., 8., 4., NA, NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.008758, 3., 8., 6., 1., 5., 9., 4., 2., 7., NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(10., 5., 8., 2., 10., 3., 7., 1., 9., 6., 4., NA, NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.007398, 4., 8., 2., 6., 10., 1., 7., 3., 9., 5., NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(11., 7., 3., 9., 4., 10., 1., 6., 11., 5., 2., 8., NA, NA, NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.006193, 6., 3., 10., 8., 2., 5., 11., 7., 1., 9., 4., NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(12., 6., 10., 2., 8., 4., 12., 3., 9., 7., 1., 11., 5., NA, NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.005332, 4., 8., 11., 1., 7., 10., 2., 6., 12., 5., 3., 9., NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(13., 8., 3., 12., 6., 10., 1., 4., 13., 9., 5., 2., 11., 7., NA, NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.004629, 7., 11., 4., 2., 13., 8., 5., 10., 1., 12., 3., 6., 9., NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(14., 7., 12., 4., 10., 1., 13., 5., 8., 2., 14., 9., 3., 11., 6., NA, NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.004064, 5., 9., 13., 1., 7., 12., 3., 8., 11., 4., 14., 2., 6., 10., NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(15., 5., 14., 10., 3., 7., 12., 2., 9., 15., 6., 1., 13., 8., 11., 4., NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.00359, 9., 5., 12., 2., 15., 7., 13., 1., 10., 4., 6., 14., 8., 3., 11.,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(16., 12., 6., 4., 14., 9., 1., 16., 8., 5., 11., 2., 15., 7., 13., 3., 10.,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.003194, 7., 12., 4., 15., 1., 10., 5., 16., 8., 13., 2., 11., 6., 3.,
14., 9., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(17., 6., 13., 11., 4., 16., 2., 8., 14., 10., 1., 17., 7., 5., 15., 9.,
3., 12., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.002846, 7., 13., 3., 16., 10., 5., 11., 1., 17., 9., 6., 14., 2., 15.,
4., 12., 8., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(18., 13., 3., 7., 18., 11., 5., 15., 9., 1., 16., 10., 4., 12., 6., 17.,
2., 14., 8., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.002558, 8., 15., 2., 11., 18., 6., 3., 13., 9., 16., 5., 12., 1., 17.,
7., 4., 14., 10., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(19., 12., 5., 17., 3., 15., 10., 7., 19., 1., 8., 11., 14., 4., 18., 9.,
2., 16., 6., 13., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.002313, 6., 14., 11., 2., 18., 9., 16., 4., 7., 12., 19., 1., 10., 15.,
3., 17., 8., 5., 13., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(20., 9., 17., 3., 15., 7., 12., 2., 20., 5., 13., 19., 6., 10., 14., 1.,
18., 8., 11., 16., 4., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.002109, 15., 8., 4., 19., 11., 1., 17., 12., 6., 14., 3., 20., 7., 10.,
9., 16., 2., 18., 5., 13., NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(21., 6., 16., 12., 1., 20., 9., 18., 4., 11., 15., 3., 21., 8., 14., 10.,
5., 19., 2., 17., 7., 13., NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.001939, 9., 18., 2., 14., 5., 21., 11., 4., 16., 7., 19., 13., 1., 15.,
8., 12., 20., 6., 3., 17., 10., NA, NA, NA, NA, NA, NA, NA, NA, NA)
, c(22., 11., 4., 18., 14., 7., 21., 1., 15., 9., 17., 3., 22., 6., 13., 10.,
19., 5., 16., 2., 20., 8., 12., NA, NA, NA, NA, NA, NA, NA, NA)
, c(0.001785, 14., 8., 18., 3., 21., 6., 12., 16., 1., 9., 19., 13., 5., 22.,
10., 2., 17., 11., 4., 20., 15., 7., NA, NA, NA, NA, NA, NA, NA, NA)
, c(23., 14., 8., 21., 3., 17., 10., 6., 19., 13., 1., 23., 5., 16., 12., 9.,
20., 2., 22., 11., 4., 18., 15., 7., NA, NA, NA, NA, NA, NA, NA)
, c(0.001648, 12., 20., 4., 8., 17., 14., 2., 22., 6., 18., 10., 15., 1., 23.,
9., 13., 5., 19., 3., 21., 7., 16., 11., NA, NA, NA, NA, NA, NA, NA)
, c(24., 15., 6., 22., 4., 19., 12., 9., 24., 2., 13., 17., 7., 21., 1., 10.,
18., 16., 5., 11., 23., 3., 14., 20., 8., NA, NA, NA, NA, NA, NA)
, c(0.001527, 13., 19., 4., 8., 23., 16., 2., 11., 21., 5., 18., 6., 15., 12.,
24., 1., 9., 17., 10., 20., 3., 22., 7., 14., NA, NA, NA, NA, NA,
NA)
, c(25., 14., 6., 22., 11., 18., 3., 20., 9., 24., 5., 16., 2., 13., 25., 8.,
12., 21., 1., 17., 10., 23., 7., 15., 4., 19., NA, NA, NA, NA, NA)
, c(0.001421, 16., 5., 23., 12., 2., 21., 10., 19., 7., 14., 25., 3., 8., 18.,
11., 22., 4., 17., 15., 1., 13., 24., 6., 9., 20., NA, NA, NA, NA,
NA)
, c(26., 14., 7., 25., 4., 20., 17., 11., 1., 23., 9., 18., 13., 3., 26., 15.,
6., 22., 8., 21., 12., 2., 16., 24., 10., 5., 19., NA, NA, NA, NA)
, c(0.001321, 10., 22., 14., 3., 25., 7., 18., 16., 5., 12., 20., 1., 24., 9.,
21., 6., 17., 13., 2., 26., 8., 15., 23., 4., 19., 11., NA, NA, NA,
NA)
, c(27., 17., 8., 25., 4., 20., 10., 14., 22., 1., 12., 27., 7., 18., 3., 24.,
16., 9., 21., 5., 11., 26., 13., 2., 23., 15., 6., 19., NA, NA, NA)
, c(0.001215, 20., 5., 13., 24., 2., 16., 9., 26., 11., 22., 7., 18., 15., 3.,
23., 6., 27., 10., 14., 1., 19., 12., 21., 4., 25., 8., 17., NA, NA,
NA)
, c(28., 22., 4., 16., 11., 26., 8., 18., 1., 20., 13., 24., 6., 14., 28., 3.,
21., 10., 7., 25., 15., 19., 2., 27., 9., 17., 5., 23., 12., NA, NA)
, c(0.001157, 19., 6., 24., 12., 3., 27., 9., 17., 15., 8., 26., 2., 21., 11.,
23., 5., 20., 14., 16., 1., 28., 10., 22., 4., 13., 25., 7., 18.,
NA, NA)
, c(29., 15., 24., 3., 9., 19., 28., 7., 13., 22., 2., 16., 27., 10., 18., 5.,
26., 11., 21., 6., 20., 1., 25., 14., 12., 29., 8., 23., 4., 17.,
NA)
, c(0.001092, 21., 11., 8., 28., 4., 25., 15., 1., 18., 23., 13., 6., 17., 29.,
3., 20., 10., 9., 26., 22., 12., 2., 16., 24., 14., 5., 27., 19.,
7., NA)
, c(30., 18., 5., 24., 12., 28., 7., 20., 2., 15., 25., 13., 10., 30., 4., 21.,
8., 27., 16., 1., 22., 9., 17., 29., 14., 3., 19., 26., 6., 23., 11.)
, c(0.001026, 9., 27., 19., 4., 13., 16., 19., 6., 21., 2., 23., 14., 25., 11.,
7., 30., 17., 1., 18., 22., 8., 26., 5., 12., 24., 15., 28., 3., 10.,
20.)
)
, names = c("C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "C10", "C11", "C12",
"C13", "C14", "C15", "C16", "C17", "C18", "C19", "C20", "C21", "C22",
"C23", "C24", "C25", "C26", "C27", "C28", "C29", "C30", "C31", "C32",
"C33", "C34", "C35", "C36", "C37", "C38", "C39", "C40", "C41", "C42",
"C43", "C44", "C45", "C46", "C47", "C48", "C49", "C50", "C51", "C52",
"C53", "C54", "C55")
, row.names = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14",
"15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25",
"26", "27", "28", "29", "30", "31")
, class = "data.frame"
)
UD <- UDFactor3
Dis <- UD[1, (2 * m - 5)]
MU <- matrix(c(UD[2:(m + 1), 1], UD[2:(m + 1), (2 * m - 6)], UD[2:
(m + 1), (2 * m - 5)]), ncol = 3, byrow = F)
if ( method == "liloglog" | method == "loglilog" | method == "loglogli" | method == "lilili" | method == "loglili" | method == "lilogli" | method == "lililog") {
# the total dose is from d01 to d02
h01 <- h1 - a1
h02 <- h2 - a1
d01 <- (h1 - a1)/b1
d02 <- (h2 - a1)/b1
h001 <- h01/b1
h002 <- h02/b1
##--- find the m dose-mixtures
u1 <- (as.numeric(MU[, 1]) - 0.5)/m
u2 <- (as.numeric(MU[, 2]) - 0.5)/m
u3 <- (as.numeric(MU[, 3]) - 0.5)/m
y1 <- 1 - sqrt(1 - u1)
y2 <- (1 - y1) * u2
y03 <- u3 * (h01^3 - h02^3) + h02^3
y3 <- ((abs(y03))^(1/3) * sign(y03) - h02)/(h01 - h02)
A <- matrix(c(h002, 0, 0, 0, (h02/b2), 0, 0, 0, (h02/b3), h001, 0,
0, 0, (h01/b2), 0, 0, 0, (h01/b3)), ncol = m , byrow = F)
B <- matrix(c((1 - y1 - y2) * (1 - y3), y1 * (1 - y3), y2 * (1 - y3),
(1 - y1 - y2) * y3, y1 * y3, y2 * y3), nrow = m, byrow = T)
Z <- A%*% B
z1 <- c(Z[1, ])
z2 <- c(Z[2, ])
z3 <- c(Z[3, ])
##--- d031 <- (h1 - a2)/b2
##--- d032 <- (h2 - a2)/b2
##--- d041 <- (exp((h1 - a3)/b3))
##--- d042 <- (exp((h2 - a3)/b3))
if ( method == "lilili"){
# method="lilili"
# Drug1: y=a1+b1*x, linear response
# Drug2: y=a2+b2*x, linear response
# Drug3: y=a3+b3*x, linear response
d1 <- abs(z1)
d2 <- abs(z2 + (a1 - a2)/b2)
d3 <- abs(z3 + (a1 - a3)/b3)
method <- "lilili"
}
if ( method == "lililog"){
# method="lililog"
# Drug1: y=a1+b1*x, linear response
# Drug2: y=a2+b2*x, linear response
# Drug3: y=a3+b3*log(x), log-linear response
z01 <- exp((b1 * z1 + b2 * z2 + z3 + a1 - a3)/b3)
d1 <- abs(z1)
d2 <- abs(z2 + (a1 - a2)/b2)
d3 <- abs((z3 * z01)/(b1 * z1 + b2 * z2 + z3))
method <-"lililog"
}
if ( method == "lilogli"){
# method="lilogli"
# Drug1: y=a1+b1*x, linear response
# Drug2: y=a2+b2*log(x), log-linear response
# Drug3: y=a3+b3*x, linear response
z01 <- exp((b1 * z1 + b3 * z3 + z2 + a1 - a2)/b2)
d1 <- abs(z1)
d2 <- abs((z2 * z01)/(b1 * z1 + b3 * z3 + z2))
d3 <- abs(z3 + (a1 - a3)/b3)
method <-"lilogli"
}
if ( method == "loglili"){
# method="loglili"
# Drug1: y=a1+b1*log(x), log-linear response
# Drug2: y=a2+b2*x, linear response
# Drug3: y=a3+b3*x, linear response
z01 <- exp((b2 * z2 + b3 * z3 + z1 + a2 - a1)/b1)
d1 <- abs((z1 * z01)/(b2 * z2 + b3 * z3 + z1))
d2 <- abs(z2)
d3 <- abs(z3 + (a1 - a3)/b3)
method <-"loglili"
}
if ( method == "liloglog"){
# method="liloglog"
# Drug1: y=a1+b1*x, linear response
# Drug2: y=a2+b2*log(x), log-linear response
# Drug3: y=a3+b3*log(x), log-linear response
z02 <- exp((a2-a3)/b2)
z03 <-(b1 * z1 + z2 + z3 + a1 - a3)/b3
d1 <- abs(z1)
d2 <- abs((z2*z02*exp((z03)^(b3/b2)))/(b1 * z1 + z2 + z3))
d3 <- abs((z3*exp(z03))/(b1 * z1 + z2 + z3))
method <- "liloglog"
}
if ( method == "loglilog"){
# method="loglilog"
# Drug1: y=a1+b1*log(x), log-linear response
# Drug2: y=a2+b2*x, linear response
# Drug3: y=a3+b3*log(x), log-linear response
z02 <- exp((a1-a3)/b1)
z03 <-(b2 * z2 + z1 + z3 + a2 - a3)/b3
d1 <- abs((z1*z02*exp((z03)^(b3/b1)))/(b2 * z2 + z1 + z3))
d2 <- abs(z2)
d3 <- abs((z3*exp(z03))/(b2 * z2 + z1 + z3))
method <- "loglilog"
}
if ( method == "loglogli"){
# method="loglogli"
# Drug1: y=a1+b1*log(x), log-linear response
# Drug2: y=a2+b2*log(x), log-linear response
# Drug3: y=a3+b3*x, linear response
z02 <- exp((a1-a2)/b1)
z03 <-(b3 * z3 + z1 + z2 + a3 - a2)/b2
d1 <- abs((z1*z02*exp((z03)^(b2/b1)))/(b3 * z3 + z1 + z2))
d2 <- abs((z2*exp(z03))/(b3 * z3 + z1 + z2))
d3 <- abs(z3)
method <- "loglogli"
}
Dose.mixtures <- matrix(c(d1, d2, d3), ncol = 3, byrow = F)
Mixture.number <- m+1
Discrepancy <- Dis
Experiment.number <- (m+1) * r
Variables <- c("Drug1", "Drug2", "Drug3")
dimnames(Dose.mixtures) <- list(NULL, Variables)
return(list(Mixture.number, Experiment.number, Discrepancy, Dose.mixtures,method))
}
if ( method == "logloglog"){
# method="logloglog"
# Drug1: y=a1+b1*log(x), log-linear response
# Drug2: y=a2+b2*log(x), log-linear response
# Drug3: y=a3+b3*log(x), log-linear response
##--- choose the experimental domain
# the total dose is from d01 to d02
h1 <- 80
h2 <- 20
d01 <- exp((h1 - a1)/b1)
d02 <- exp((h2 - a1)/b1)
d03 <- (exp((h2 - a2)/b2))
d04 <- (exp((h2 - a3)/b3))
##--- find the m dose-mixtures
y1 <- (as.numeric(MU[, 1]) - 0.5)/m
y2 <- (as.numeric(MU[, 2]) - 0.5)/m
y3 <- (as.numeric(MU[, 3]) - 0.5)/m
z1 <- y1 * (d02 - d01) + d01
z2 <- y2 * sqrt(y3)
z3 <- (1 - y2) * sqrt(y3)
d1 <- z1 * z2 * (1 - (1 - rho1/rho0) * z3)
d2 <- rep(0, m)
d3 <- rep(0, m)
D2 <- seq(0.001, d03, 0.002)
D3 <- seq(0.001, d04, 0.002)
r1 <- length(D2)
r2 <- length(D3)
D4 <- c(kronecker(D2, rep(1, r2)))
D5 <- c(kronecker(rep(1, r1), D3))
D0 <- rep(0, m)
dd2 <- rep(0, m)
dd3 <- rep(0, m)
w0 <- (rho0/rho1)^(b1/b2)
w1 <- (b3 * (b2 - b1))/(b2 * (b3 - b1))
w2 <- b1/b3
#return(w1,w0,rho0,rho1)
f1 <- w0 * w1 * D4 + D5
for(j in 1:m) {
f2 <- d1[j]/rho1 + w0 * (1 - w1) * D4
f3 <- ((1 - w2) * f2 * f1 - f2^(1 + w2) + f2 * sqrt(((1 - w2) *
f1 - f2^w2)^2 + 2 * w2 * (1 - w2) * f1^2))/(w2 * (
1 - w2) * f1^2)
f4 <- (z1[j] * (1 - z2[j] - z3[j]) + (1 - rho1/rho0) * z1[
j] * z2[j] * z3[j] - (rho0/rho1)^(b1/b2 - 1) * D4 *
f3^w1)^2 + (z1[j] * z3[j] - D5 * f3^w1)^2
Ind <- sort.list(f4)[1]
d2[j] <- (D4[Ind])^(b1/b2) * exp((a1 - a2)/b2)
d3[j] <- (D5[Ind])^(b1/b3) * exp((a1 - a3)/b3)
D0[j] <- f4[Ind]
dd2[j] <- D4[Ind]
dd3[j] <- D5[Ind]
method <-"logloglog"
}
Dose.mixtures <- matrix(c(d1, d2, d3), ncol = 3, byrow = F)
Mixture.number <- m+1
Discrepancy <- Dis
Experiment.number <- (m+1) * r
Variables <- c("Drug1", "Drug2", "Drug3")
dimnames(Dose.mixtures) <- list(NULL, Variables)
return(list(Mixture.number, Experiment.number, round(Discrepancy,3), round(Dose.mixtures,3),method,
D0, dd2, dd3))
}
}
# Try in this example:
#SYN.design3 <- SY.design3(0.01, 6,method="logloglog")
SYN.design3<-SY.design3(c0=c0, r=r,method)
print(paste0("The method used is"," ",SYN.design3[[5]]))
print(paste0("The number of combinations (mixtures) needed is m="," ", SYN.design3[[1]]))
print(paste0("Total sample size at least is m*r="," ", SYN.design3[[2]]))
print(paste0("The central L2-discrepancy of m mixtures is"," ", SYN.design3[[3]]))
print(paste0("Please use the following scheme of dose mixtures to measure the response, input the results into a txt file and name as 'Combination' "))
print(SYN.design3[[4]])
print("The Combination dataset should have the pattern as: column1--dose of drug1, column 2--dose of drug2, column 3--dose of drug3,column 4--response")
}
|
4fbc0b9c291dc5e6953fc9c2da3f914d3e5b9012
|
cef3b5e2588a7377281a8f627a552350059ca68b
|
/paws/man/forecastservice_create_forecast.Rd
|
1ee061b8ddbbea0386564eab4922c6d3fd5fb39e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
sanchezvivi/paws
|
b1dc786a9229e0105f0f128d5516c46673cb1cb5
|
2f5d3f15bf991dcaa6a4870ed314eb7c4b096d05
|
refs/heads/main
| 2023-02-16T11:18:31.772786
| 2021-01-17T23:50:41
| 2021-01-17T23:50:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,570
|
rd
|
forecastservice_create_forecast.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forecastservice_operations.R
\name{forecastservice_create_forecast}
\alias{forecastservice_create_forecast}
\title{Creates a forecast for each item in the TARGET_TIME_SERIES dataset that
was used to train the predictor}
\usage{
forecastservice_create_forecast(ForecastName, PredictorArn,
ForecastTypes, Tags)
}
\arguments{
\item{ForecastName}{[required] A name for the forecast.}
\item{PredictorArn}{[required] The Amazon Resource Name (ARN) of the predictor to use to generate the
forecast.}
\item{ForecastTypes}{The quantiles at which probabilistic forecasts are generated. \strong{You can
currently specify up to 5 quantiles per forecast}. Accepted values
include \verb{0.01 to 0.99} (increments of .01 only) and \code{mean}. The mean
forecast is different from the median (0.50) when the distribution is
not symmetric (for example, Beta and Negative Binomial). The default
value is \verb{\\\["0.1", "0.5", "0.9"\\\]}.}
\item{Tags}{The optional metadata that you apply to the forecast to help you
categorize and organize them. Each tag consists of a key and an optional
value, both of which you define.
The following basic restrictions apply to tags:
\itemize{
\item Maximum number of tags per resource - 50.
\item For each resource, each tag key must be unique, and each tag key can
have only one value.
\item Maximum key length - 128 Unicode characters in UTF-8.
\item Maximum value length - 256 Unicode characters in UTF-8.
\item If your tagging schema is used across multiple services and
resources, remember that other services may have restrictions on
allowed characters. Generally allowed characters are: letters,
numbers, and spaces representable in UTF-8, and the following
characters: + - = . \\_ : / @.
\item Tag keys and values are case sensitive.
\item Do not use \verb{aws:}, \verb{AWS:}, or any upper or lowercase combination of
such as a prefix for keys as it is reserved for AWS use. You cannot
edit or delete tag keys with this prefix. Values can have this
prefix. If a tag value has \code{aws} as its prefix but the key does not,
then Forecast considers it to be a user tag and will count against
the limit of 50 tags. Tags with only the key prefix of \code{aws} do not
count against your tags per resource limit.
}}
}
\description{
Creates a forecast for each item in the \code{TARGET_TIME_SERIES} dataset
that was used to train the predictor. This is known as inference. To
retrieve the forecast for a single item at low latency, use the
operation. To export the complete forecast into your Amazon Simple
Storage Service (Amazon S3) bucket, use the CreateForecastExportJob
operation.
The range of the forecast is determined by the \code{ForecastHorizon} value,
which you specify in the CreatePredictor request. When you query a
forecast, you can request a specific date range within the forecast.
To get a list of all your forecasts, use the ListForecasts operation.
The forecasts generated by Amazon Forecast are in the same time zone as
the dataset that was used to create the predictor.
For more information, see howitworks-forecast.
The \code{Status} of the forecast must be \code{ACTIVE} before you can query or
export the forecast. Use the DescribeForecast operation to get the
status.
}
\section{Request syntax}{
\preformatted{svc$create_forecast(
ForecastName = "string",
PredictorArn = "string",
ForecastTypes = list(
"string"
),
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
effba8d21d3c10cb2699618729c577cc40657a08
|
0623eecf6c59db6edc381d43f5cfef02f7b6e4fd
|
/run_analysis.R
|
de33bb6025e94dd7a235bbc726ecb6b73dd8d655
|
[] |
no_license
|
j100cky/CourseraGettingAndCleaningData
|
4986c229c480277d518ca2922b920d905ec295fb
|
3c027a2968cc3838aa6995cc1e35120fd7e2ea35
|
refs/heads/master
| 2020-06-21T17:17:05.332192
| 2019-07-18T05:04:16
| 2019-07-18T05:04:16
| 197,513,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,254
|
r
|
run_analysis.R
|
library(dplyr)
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
setwd("C:/Users/Dell XPS 9575 4K/Google Drive/My/Programming/DataObtainingAndCleaning/Project/")
fileName <- "data.zip"
#To create a transferable code that automatically download the file in any workstation
if(!file.exists(fileName)){
download.file(fileURL, destfile = fileName, method = "curl")
}
if(!file.exists("UCI HAR Dataset")){
unzip(fileName)
}
#Load the activity labels and features
list.files("UCI HAR Dataset/")
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
#Each person performed six activities (WALKING, WALKING_UPSTAIRS,
#WALKING_DOWNSTAIRS, SITTING, STANDING, LAYING)
tbl_df(labels)
#Convert factor into characters
labels[,2] <- as.character(labels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
#Using its embedded accelerometer and gyroscope, we captured 3-axial linear
#acceleration and 3-axial angular velocity at a constant rate of 50Hz.
#The sensor acceleration signal has gravitational and body motion components
tbl_df(features)
#Also convert factor into characters
features[,2] <- as.character(features[,2])
#Before loading the datasets, select the mean and std first because the dataset
#is too big.
#Need to review usage rule of regular expressions
featuresWanted <- grep(pattern = ".*mean.*|.*std.*", features[,2])
#Select out the actual feature names that contain "mean" and "std". This is to
#prepare the names for labeling the combined table in the later step, since
#the original table did not contain column names.
featuresWanted.names <- features[featuresWanted,2]
#Clean up the format of data feature names
featuresWanted.names = gsub(pattern = "-mean", replacement = "Mean", x = featuresWanted.names)
featuresWanted.names = gsub(pattern = "-std", replacement = "Std", x = featuresWanted.names)
featuresWanted.names <- gsub(pattern = "[-()]", replacement = "", x = featuresWanted.names)
#Load the datasets
#Opened the README.txt file allowed me to see what each file contains
#Combine the feature labels and subjects together for the training data
trainSet <- read.table("UCI HAR Dataset/train/X_train.txt")
#select out the features wanted from trainSet
trainSet <- trainSet[featuresWanted] #I don't know why, but it automatically selected from column, not row.
trainLabels <- read.table("UCI HAR Dataset/train/y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
trainCombined <- cbind(trainSubjects, trainLabels, trainSet)
#Do the same for the test group data
testSet <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWanted]
testLabels <- read.table("UCI HAR Dataset/test/y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
testCombined <- cbind(testSubjects, testLabels, testSet)
#Now we got the mean and std from each group (train and test), we can combine
#them into one table
allData <- rbind(trainCombined, testCombined)
#I found that I wasn't able to view the allData because there are duplicates in
#column labels.
#So we need to give column names to the allData
colnames(allData) <- c("subjects", "activity", featuresWanted.names)
#Now we can view the combined table
tbl_df(allData)
#We need to label the activities with descriptive activity names, instead of numbers
allData$activity <- factor(x = allData$activity, levels = labels[,1], labels = labels[,2])
#Also turn subject into factors
allData$subjects <- as.factor(allData$subjects)
View(tbl_df(allData))
#According to the requirement, I have to take the mean of each subject's individual activity
#Use melt function from the reshape2 package to transform each variable into one col.
install.packages("reshape2")
library(reshape2)
allData.melt <- melt(allData, id = c("subjects", "activity"))
View(tbl_df(allData.melt))
#Then use the dcast function to take the average of each activity's mean variable value
allData.mean <- dcast(allData.melt, subjects+activity~variable, mean)
View(allData.mean)
#Export data table as txt named tidyData.txt
write.table(allData.mean,"tidyData.txt", row.names = FALSE, quote = FALSE)
|
12bc66669df9c1ac04077117881ae9c5c3cc2846
|
5f935eb4f7bb4de4cfecb689e2691fb1cbbf9602
|
/Main text data and analysis/01_Abundance-based-ews-script-analysis.R
|
8cf2a45e6abeac689f538b71bd98657834bb4d8f
|
[] |
no_license
|
GauravKBaruah/ECO-EVO-EWS-DATA
|
be7f1d4b45121d3a06d5182c0d558c293f3c498c
|
1792eeaaecdd12798bb35a9486913e38de4ee76b
|
refs/heads/master
| 2020-05-25T14:41:42.716388
| 2019-06-22T09:31:17
| 2019-06-22T09:31:17
| 187,850,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,222
|
r
|
01_Abundance-based-ews-script-analysis.R
|
#this particular script will reproduce figure 1 and figure S3, S4 and S5.
rm(list=ls())
#libraries needed
library("earlywarnings")
require(grid)
require(gridExtra)
require(ggplot2)
#please check where the below function script is located
source('~/Dropbox/Zurich PhD Research/2_Chapter_2/J Animal Ecology codes/Functions_J_A_Ecology.R')
# loading the genetic variation data
load("EWS.genvar0.05.RData")
load("EWS.genvar0.1.RData")
load("EWS.genvar0.2.RData")
load("EWS.genvar0.3.RData")
load("EWS.genvar0.4.RData")
load("EWS.genvar0.5.RData")
#assigning the data
second.op<-EWS.trait.genvar.0.05
third.op<- EWS.trait.genvar.0.1
fourth.op<- EWS.trait.genvar.0.2
fifth.op<-EWS.trait.genvar.0.3
six.op<- EWS.trait.genvar.0.4
sev.op<-EWS.trait.genvar.0.5
#initializing empty lists
EWS.gen.1<-list()
EWS.gen.2<-list()
EWS.gen.3<-list()
EWS.gen.4<-list()
EWS.gen.5<-list()
EWS.gen.6<-list()
EWS.gen.7<-list()
EWS.gen.8<-list()
TauSD_1<-numeric()
TauAR_1<-numeric()
TauAR_2<-numeric()
TauSD_2<-numeric()
TauSD_3<-numeric()
TauAR_3<-numeric()
TauAR_4<-numeric()
TauSD_4<-numeric()
TauAR_5<-numeric()
TauSD_5<-numeric()
TauAR_6<-numeric()
TauSD_6<-numeric()
TauAR_7<-numeric()
TauSD_7<-numeric()
TauAR_8<-numeric()
TauSD_8<-numeric()
#reps of 100 replicates analysis
for(i in 1:100){
# calling a function genericEWS within Composite.ews() from the Functions_J_A_Ecology script. Note the genericEWS function is a modified script from the library earlywarnings
EWS.gen.2[i]<- list(Composite.ews((genericEWS(second.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(second.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$sd)))
EWS.gen.3[i]<- list(Composite.ews((genericEWS(third.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(third.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$sd)))
EWS.gen.4[i]<- list(Composite.ews((genericEWS(fourth.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(fourth.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$sd)))
EWS.gen.5[i]<- list(Composite.ews((genericEWS(fifth.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(fifth.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$sd)))
EWS.gen.6[i]<- list(Composite.ews((genericEWS(six.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(six.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$sd)))
EWS.gen.7[i]<- list(Composite.ews((genericEWS(sev.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(sev.op[[i]]$N[500:530],winsize = 50,detrending = "gaussian",bandwidth = 50)$sd)))
# EWS.gen.2[[i]]$Tau.SD gives a list of Kendall Tau values of SD with all having the same Kendall Tau value.
TauSD_2[i]<-mean(EWS.gen.2[[i]]$Tau.SD)
TauAR_2[i]<-mean(EWS.gen.2[[i]]$Tau.AR)
TauSD_3[i]<-mean(EWS.gen.3[[i]]$Tau.SD)
TauAR_3[i]<-mean(EWS.gen.3[[i]]$Tau.AR)
TauSD_4[i]<-mean(EWS.gen.4[[i]]$Tau.SD)
TauAR_4[i]<-mean(EWS.gen.4[[i]]$Tau.AR)
TauSD_5[i]<-mean(EWS.gen.5[[i]]$Tau.SD)
TauAR_5[i]<-mean(EWS.gen.5[[i]]$Tau.AR)
TauSD_6[i]<-mean(EWS.gen.6[[i]]$Tau.SD)
TauAR_6[i]<-mean(EWS.gen.6[[i]]$Tau.AR)
TauSD_7[i]<-mean(EWS.gen.7[[i]]$Tau.SD)
TauAR_7[i]<-mean(EWS.gen.7[[i]]$Tau.AR)
print(i)
}
# data frame
Gtau_AR1<-data.frame(Variation=factor(rep(c("0.05","0.1","0.2","0.3","0.4","0.5"),each=length(TauAR_2))),
Kendall_Tau=c( TauAR_2,TauAR_3,TauAR_4,TauAR_5,TauAR_6,TauAR_7),value=c( TauAR_2,TauAR_3,TauAR_4,TauAR_5,TauAR_6,TauAR_7))
Gtau_SD1<-data.frame(Variation=factor(rep(c("0.05","0.1","0.2","0.3","0.4","0.5"),each=length(TauSD_2))),
Kendall_Tau=c(TauSD_2,TauSD_3,TauSD_4,TauSD_5,TauSD_6,TauSD_7),value=c(TauSD_2,TauSD_3,TauSD_4,TauSD_5,TauSD_6,TauSD_7))
Gar1<-ggplot(Gtau_AR1,aes(Variation,Kendall_Tau))+geom_boxplot(alpha=0)+geom_jitter(alpha=0.3,width = 0.1)+ggtitle("AR1")+
theme_bw()+theme(plot.title = element_text(size = 14, face = "bold"),
text = element_text(size = 12 ),
axis.title = element_text(face="bold"),
axis.text.x=element_text(size = 11),
legend.position = "right") +
scale_fill_brewer(palette = "Accent")+
ylim(c(-1,1))+ylab("Kendall Tau")+xlab("Genetic variation")
Gsd1<-ggplot(Gtau_SD1,aes(Variation,Kendall_Tau))+geom_boxplot(alpha=0.1)+geom_jitter(alpha=0.3,width=0.1)+ggtitle("SD")+
theme_bw()+theme(plot.title = element_text(size = 14, face = "bold"),
text = element_text(size = 12 ),
axis.title = element_text(face="bold"),
axis.text.x=element_text(size = 11),
legend.position = "right") +
scale_fill_brewer(palette = "Accent")+ylim(c(-1,1))+ylab("Kendall Tau")+xlab("Genetic variation")
################################ Reproduction rate data #################
# calling reproductive rate data
load("EWS.reproduction1.1.RData")
load("EWS.reproduction1.2.RData")
load("EWS.reproduction1.3.RData")
load("EWS.reproduction1.4.RData")
load("EWS.reproduction1.5.RData")
first.R0<-EWS.trait.reproduction_1.1
second.R0<-EWS.trait.reproduction_1.2
third.R0<- EWS.trait.reproduction_1.3
fourth.R0<- EWS.trait.reproduction_1.4
fifth.R0<-EWS.trait.reproduction_1.5
EWS.pl.1<-list()
EWS.pl.2<-list()
EWS.pl.3<-list()
EWS.pl.4<-list()
EWS.pl.5<-list()
TauSD_1<-numeric(); Sk1<-numeric();Sk2<-numeric();sk3<-numeric();sk3<-numeric();sk4<-numeric();sk5<-numeric();
TauAR_1<-numeric();rr1<-numeric();rr2<-numeric();rr3<-numeric();rr4<-numeric();rr5<-numeric();
TauAR_2<-numeric();dr1<-numeric();dr2<-numeric();dr3<-numeric();dr4<-numeric();dr5<-numeric();
TauSD_2<-numeric();
TauSD_3<-numeric()
TauAR_3<-numeric()
TauAR_4<-numeric()
TauSD_4<-numeric()
TauAR_5<-numeric()
TauSD_5<-numeric()
#similar stuff like the one above for genetic variation
for(i in 1:100){
EWS.pl.1[i]<-list(Composite.ews( (genericEWS(first.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$ar1),(genericEWS(first.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.pl.2[i]<- list(Composite.ews( (genericEWS(second.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(second.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.pl.3[i]<- list(Composite.ews( (genericEWS(third.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(third.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.pl.4[i]<- list(Composite.ews( (genericEWS(fourth.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(fourth.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.pl.5[i]<- list(Composite.ews( (genericEWS(fifth.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth = 50)$ar1),(genericEWS(fifth.R0[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
TauSD_1[i]<-mean(EWS.pl.1[[i]]$Tau.SD)
TauAR_1[i]<-mean(EWS.pl.1[[i]]$Tau.AR)
TauSD_2[i]<-mean(EWS.pl.2[[i]]$Tau.SD)
TauAR_2[i]<-mean(EWS.pl.2[[i]]$Tau.AR)
TauSD_3[i]<-mean(EWS.pl.3[[i]]$Tau.SD)
TauAR_3[i]<-mean(EWS.pl.3[[i]]$Tau.AR)
TauSD_4[i]<-mean(EWS.pl.4[[i]]$Tau.SD)
TauAR_4[i]<-mean(EWS.pl.4[[i]]$Tau.AR)
TauSD_5[i]<-mean(EWS.pl.5[[i]]$Tau.SD)
TauAR_5[i]<-mean(EWS.pl.5[[i]]$Tau.AR)
print(i)
}
Rtau_AR1<-data.frame(Variation=factor(rep(c("1.1","1.2", "1.3","1.4","1.5"),each=length(TauAR_1))),
Kendall_Tau=c(TauAR_1, TauAR_2,TauAR_3,TauAR_4,TauAR_5),value=c(TauAR_1, TauAR_2,TauAR_3,TauAR_4,TauAR_5))
Rtau_SD1<-data.frame(Variation=factor(rep(c("1.1","1.2", "1.3","1.4","1.5"),each=length(TauSD_1))),
Kendall_Tau=c(TauSD_1, TauSD_2,TauSD_3,TauSD_4,TauSD_5),value=c(TauSD_1 ,TauSD_2,TauSD_3,TauSD_4,TauSD_5))
R.ar1<-ggplot(Rtau_AR1,aes(Variation,Kendall_Tau))+geom_boxplot(alpha=0.1)+geom_jitter(alpha=0.3,width=0.1)+ggtitle("AR1")+
theme_bw()+theme(plot.title = element_text(size = 14, face = "bold"),
text = element_text(size = 12 ),
axis.title = element_text(face="bold"),
axis.text.x=element_text(size = 11),
legend.position = "right") +
scale_fill_brewer(palette = "Accent")+ylim(c(-1,1))+ylab("Kendall Tau")+xlab("Reproductive Rate")
R.sd1<-ggplot(Rtau_SD1,aes(Variation,Kendall_Tau))+geom_boxplot(alpha=0.1)+geom_jitter(alpha=0.3,width=0.1)+ggtitle("SD")+
theme_bw()+theme(plot.title = element_text(size = 14, face = "bold"),
text = element_text(size = 12 ),
axis.title = element_text(face="bold"),
axis.text.x=element_text(size = 11),
legend.position = "right") +
scale_fill_brewer(palette = "Accent")+ylim(c(-1,1))+ylab("Kendall Tau")+xlab("Reproductive rate")
############################## Plasticity Data ##########################
load("EWS.plasticity.0.05.RData")
load("EWS.plasticity.0.1.RData")
load("EWS.plasticity.0.2.RData")
load("EWS.plasticity.0.3.RData")
load("EWS.plasticity.0.4.RData")
load("EWS.plasticity.0.5.RData")
load("EWS.plasticity.0.8.RData")
second.p<-EWS.trait.plasticity0.05
third.p<- EWS.trait.plasticity0.1
fourth.p<- EWS.trait.plasticity0.2
fifth.p<-EWS.trait.plasticity0.3
six.p<- EWS.trait.plasticity0.4
sev.p<-EWS.trait.plasticity0.5
e.p<-EWS.trait.plasticity0.8
#
EWS.p.1<-list()
EWS.p.2<-list()
EWS.p.3<-list()
EWS.p.4<-list()
EWS.p.5<-list()
EWS.p.6<-list()
EWS.p.7<-EWS.p.8<-list()
TauSD_1<-numeric()
TauAR_1<-numeric()
TauAR_2<-numeric()
TauSD_2<-numeric()
TauSD_3<-numeric()
TauAR_3<-numeric()
TauAR_4<-numeric()
TauSD_4<-numeric()
TauAR_5<-numeric()
TauSD_5<-numeric()
TauAR_6<-numeric()
TauSD_6<-numeric()
TauAR_7<-numeric()
TauSD_7<-numeric()
for(i in 1:100){
EWS.p.2[i]<- list(Composite.ews((genericEWS(second.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$ar1),(genericEWS(second.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.p.3[i]<- list(Composite.ews((genericEWS(third.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$ar1),(genericEWS(third.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.p.4[i]<- list(Composite.ews((genericEWS(fourth.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$ar1),(genericEWS(fourth.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.p.5[i]<- list(Composite.ews((genericEWS(fifth.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$ar1),(genericEWS(fifth.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.p.6[i]<- list(Composite.ews((genericEWS(six.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$ar1),(genericEWS(six.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.p.7[i]<- list(Composite.ews((genericEWS(sev.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$ar1),(genericEWS(sev.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
EWS.p.8[i]<-list(Composite.ews((genericEWS(e.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$ar1),(genericEWS(e.p[[i]]$N[500:530],winsize=50,detrending = "gaussian",bandwidth =50 )$sd)))
TauSD_2[i]<-mean(EWS.p.2[[i]]$Tau.SD)
TauAR_2[i]<-mean(EWS.p.2[[i]]$Tau.AR)
TauSD_3[i]<-mean(EWS.p.3[[i]]$Tau.SD)
TauAR_3[i]<-mean(EWS.p.3[[i]]$Tau.AR)
TauSD_4[i]<-mean(EWS.p.4[[i]]$Tau.SD)
TauAR_4[i]<-mean(EWS.p.4[[i]]$Tau.AR)
TauSD_5[i]<-mean(EWS.p.5[[i]]$Tau.SD)
TauAR_5[i]<-mean(EWS.p.5[[i]]$Tau.AR)
TauSD_6[i]<-mean(EWS.p.6[[i]]$Tau.SD)
TauAR_6[i]<-mean(EWS.p.6[[i]]$Tau.AR)
TauSD_7[i]<-mean(EWS.p.7[[i]]$Tau.SD)
TauAR_7[i]<-mean(EWS.p.7[[i]]$Tau.AR)
#
TauSD_1[i]<-mean(EWS.p.8[[i]]$Tau.SD)
TauAR_1[i]<-mean(EWS.p.8[[i]]$Tau.AR)
#
print(i)
}
Ptau_AR1<-data.frame(Variation=factor(rep(c("0.05", "0.1","0.3","0.3", "0.4","0.8"),each=length(TauAR_2))),
Kendall_Tau=c(TauAR_2,TauAR_3,TauAR_5,TauAR_6,TauAR_7,TauAR_1),value=c(TauAR_2,TauAR_3,TauAR_5,TauAR_6,TauAR_7,TauAR_1))
Ptau_SD1<-data.frame(Variation=factor(rep(c("0.05", "0.1","0.3","0.3", "0.4","0.8"),each=length(TauSD_2))),
Kendall_Tau=c( TauSD_2,TauSD_3,TauSD_5, TauSD_6,TauSD_7,TauSD_1),value=c( TauSD_2,TauSD_3,TauSD_5, TauSD_6,TauSD_7,TauSD_1))
Par1<-ggplot(Ptau_AR1,aes(Variation,Kendall_Tau))+geom_boxplot(alpha=0.1)+geom_jitter(alpha=0.3,width=0.1)+ggtitle("AR1")+
theme_bw()+theme(plot.title = element_text(size = 14, face = "bold"),
text = element_text(size = 12 ),
axis.title = element_text(face="bold"),
axis.text.x=element_text(size = 11),
legend.position = "right") +
scale_fill_brewer(palette = "Accent")+ylim(c(-1,1))+ylab("Kendall Tau")+xlab("Strength of plasticity")
Psd<-ggplot(Ptau_SD1,aes(Variation,Kendall_Tau))+geom_boxplot(alpha=0.1)+geom_jitter(alpha=0.3,width=0.1)+ggtitle("SD")+
theme_bw()+theme(plot.title = element_text(size = 14, face = "bold"),
text = element_text(size = 12 ),
axis.title = element_text(face="bold"),
axis.text.x=element_text(size = 11),
legend.position = "right") +
scale_fill_brewer(palette = "Accent")+ylim(c(-1,1))+ylab("Kendall Tau")+xlab("Strength of plasticity")
#pdf("03_Fig_abundanceEWS-all-factors-30datapoints.pdf", width=9, height =7)
multiplot(Psd,Gsd1,R.sd1,Par1, Gar1,R.ar1, cols=2)
#dev.off()
######### Plotting population dynamics of genetic variation #############
######### Plotting population dynamics of genetic variation #############
r2 = grDevices::colors()[grep('gr(a|e)y', grDevices::colors(), invert = T)]
#pdf("short_genetic-variation-population-collapse_short.pdf")
par(mfrow=c(3,2),mar=c(1,4,1,1)+0.9)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "var = 0.06")
for (i in 1:100){
lines(t[1:31],second.op[[i]]$N[500:530],col=r2[i])
lines(t[31:50],second.op[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "var = 0.1")
for (i in 1:100){
lines(t[1:31],third.op[[i]]$N[500:530],col=r2[i])
lines(t[31:50],third.op[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2, lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "var = 0.2")
for (i in 1:100){
lines(t[1:31],fourth.op[[i]]$N[500:530],col=r2[i])
lines(t[31:50],fourth.op[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "var = 0.3")
for (i in 1:100){
lines(t[1:31],fifth.op[[i]]$N[500:530],col=r2[i])
lines(t[31:50],fifth.op[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "var = 0.4")
for (i in 1:100){
lines(t[1:31],six.op[[i]]$N[500:530],col=r2[i])
lines(t[31:50],six.op[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
#dev.off()
############################## plotting population dynamics Reproduction plots ####################################################
############################## plotting population dynamics Reproduction plots ####################################################
#pdf("Short_Reproduction_plots_collapse_short.pdf")
par(mfrow=c(3,2),mar=c(1,4,1,1)+0.9)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "R0 = 1.1")
for (i in 1:100){
lines(t[1:31],first.R0[[i]]$N[500:530],col=r2[i])
lines(t[31:50],first.R0[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "R0 = 1.2")
for (i in 1:100){
lines(t[1:31],second.R0[[i]]$N[500:530],col=r2[i])
lines(t[31:50],second.R0[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "R0 = 1.3")
for (i in 1:100){
lines(t[1:31],third.R0[[i]]$N[500:530],col=r2[i])
lines(t[31:50],third.R0[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "R0 = 1.4")
for (i in 1:100){
lines(t[1:31],fourth.R0[[i]]$N[500:530],col=r2[i])
lines(t[31:50],fourth.R0[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "R0 = 1.5")
for (i in 1:100){
lines(t[1:31],fifth.R0[[i]]$N[500:530],col=r2[i])
lines(t[31:50],fifth.R0[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
#dev.off()
############################################## plotting population dynamics plasticity plots#############################################
#######################################################################################################
#pdf("short_plasticity-population-collapse_short.pdf")
par(mfrow=c(3,2),mar=c(1,4,1,1)+0.9)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "b = 0.05")
for (i in 1:100){
lines(t[1:31],second.p[[i]]$N[500:530],col=r2[i])
lines(t[31:50],second.p[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "b = 0.1")
for (i in 1:100){
lines(t[1:31],third.p[[i]]$N[500:530],col=r2[i])
lines(t[31:50],third.p[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "b = 0.3")
for (i in 1:100){
lines(t[1:31],fifth.p[[i]]$N[500:530],col=r2[i])
lines(t[31:50],fifth.p[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "b = 0.4")
for (i in 1:100){
lines(t[1:31],six.p[[i]]$N[500:530],col=r2[i])
lines(t[31:50],six.p[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "b = 0.5")
for (i in 1:100){
lines(t[1:31],sev.p[[i]]$N[500:530],col=r2[i])
lines(t[31:50],sev.p[[i]]$N[531:550],col='grey')
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
t<-seq(500,550)
plot(0,0,xlim = c(500,550),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "b = 0.8")
for (i in 1:100){
lines(t[1:31],e.p[[i]]$N[500:530],col=r2[i])
lines(t[31:50],e.p[[i]]$N[531:550],col='grey')
# lines(t,fifth.op[[i]]$N,col=r3[i])
}
abline(v=530,col='red',lwd=2,lty=2);abline(v=500,col='steelblue',lwd=2,lty=2)
#dev.off()
################### population extinction figure S2 ####
rm(list=ls())
load("Extinction_genetic_variation_0.05.RData")
load("Extinction_genetic_variation_0.5.RData") #low plasticity low genvariation
ls()
first.op<- EWS.genetic.extinction.0.05
second.op<-EWS.genetic.extinction.0.5
library("earlywarnings")
source('~/Dropbox/Zurich PhD Research/2_Chapter_2/J Animal Ecology codes/Functions_J_A_Ecology.R')
# Lamdba vales for evolving gentic var
par(mfrow=c(1,2))
r2 = grDevices::colors()[grep('gr(a|e)y', grDevices::colors(), invert = T)]
t<-seq(1,900)
plot(0,0,xlim = c(200,900),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "genetic variation = 0.05")
for (i in 1:20){
lines(t[200:900],first.op[[i]]$N[200:900],col=r2[i])
}
plot(0,0,xlim = c(200,900),ylim = c(0,80),type = "n",xlab="Time",ylab = "Population size",main = "genetic variation = 0.5")
for (i in 1:20){
lines(t[200:900],second.op[[i]]$N[200:900],col=r2[i])
}
# lines(t,fifth.op[[i]]$N,col=r3[i])
|
bdc13e44277f2c8e462a6d82e9af1bad4c3c5e96
|
ea238cddcece1bf08c93a167c4d9bda88a839fe7
|
/run_analysis.R
|
17fa9705d836299413bac4e7a042c681bebbf5b1
|
[] |
no_license
|
pswpsh/getting-and-cleaning-data
|
a303b83503412a25b374534ef59cf04b590f702c
|
775bcd2b46246b6739b7e686875adb5233cfef39
|
refs/heads/master
| 2021-01-10T04:41:34.816238
| 2015-05-24T23:19:17
| 2015-05-24T23:19:17
| 36,198,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,403
|
r
|
run_analysis.R
|
# This R script, called run_analysis.R, does the following:
#
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation.
# 3. Uses descriptive activity names to name the activities in the data set.
# 4. Appropriately labels the data set with descriptive variable names.
# 5. Creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
library(RCurl)
setwd("C:/MOOC/R/coursera/Stat_inference_proj")
#if (file.info('UCI HAR Dataset')$isdir == FALSE) {
# dataFile <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
# dir.create('UCI HAR Dataset')
# download.file(dataFile, 'UCI-HAR-dataset.zip', method='curl')
# unzip('./UCI-HAR-dataset.zip')
#}
# 1. Merge the training and the test sets to create one data set.
# - "Features" merges data from "X_train.txt" and "X_test.txt"
# - "Activity" merges data from "Y_train.txt" and "Y_test.txt"
# - "Subject" merges data from "subject_train.txt" and subject_test.txt"
# - Levels of "Activity" are defined in "activity_labels.txt"
# - Names of "Features" are defined in "features.txt"
x.train <- read.table('./UCI HAR Dataset/train/X_train.txt')
x.test <- read.table('./UCI HAR Dataset/test/X_test.txt')
x.data <- rbind(x.train, x.test)
y.train <- read.table('./UCI HAR Dataset/train/y_train.txt')
y.test <- read.table('./UCI HAR Dataset/test/y_test.txt')
y.data <- rbind(y.train, y.test)
subject.train <- read.table('./UCI HAR Dataset/train/subject_train.txt')
subject.test <- read.table('./UCI HAR Dataset/test/subject_test.txt')
subject.data <- rbind(subject.train, subject.test)
names(subject.data)<-c("subject")
#labels
names(y.data)<- c("activity")
#features
feature.name.data <- read.table('./UCI HAR Dataset/features.txt')
names(x.data) <- feature.name.data$V2
#merge all data
allData <- cbind(x.data, subject.data, y.data)
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
extractNames<-cbind(as.character(feature.name.data$V2[grep("-mean\\(\\)|-std\\(\\)", feature.name.data[, 2])]), "subject", "activity" )
extractData <- subset(allData,select=extractNames)
# 3. Use descriptive activity names to name the activities in the data set.
activity.label <- read.table('./UCI HAR Dataset/activity_labels.txt')
#before factorization
head(extractData$activity)
#after factorization
activity.label$V2 <- toupper(as.character(activity.label$V2))
head(extractData$activity<-activity.label[extractData$activity, 2])
# 4. Appropriately labels the data set with descriptive variable names.
names(extractData)<-gsub("^t", "time-", names(extractData))
names(extractData)<-gsub("^f", "frequency-", names(extractData))
names(extractData)<-gsub("Mag", "Magnitude", names(extractData))
names(extractData)<-gsub("Acc", "Accelerometer", names(extractData))
names(extractData)<-gsub("Gyro", "Gyroscope", names(extractData))
names(extractData)
# 5. Creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
library(plyr);
tidyData<-aggregate(. ~subject + activity, extractData, mean)
tidyData<-tidyData[order(tidyData$subject,tidyData$activity),]
write.table(tidyData, file = "tidydata.txt",row.name=FALSE)
library(knitr)
knit2html("codebook.Rmd");
|
a5bff14a9618fcd0e8e75595c9bc21dc23526d7e
|
54c55dae302bc6cc761c8fd63e33a886d4c09e9d
|
/src/fr/tagc/rainet/core/execution/analysis/EnrichmentAnalysis/lncRNAGroup_odds_ratio.R
|
11e20db56adc21108fd3288bb405c79190593815
|
[] |
no_license
|
diogomribeiro/RAINET
|
66f41e21da73dc4ace8184b8785f144abc70799a
|
4d2f919a4554c45f5d5b8ddc9d35bc83b4bc2925
|
refs/heads/master
| 2021-09-14T02:11:49.965465
| 2018-05-07T13:50:19
| 2018-05-07T13:50:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,005
|
r
|
lncRNAGroup_odds_ratio.R
|
# 2017 Diogo Ribeiro
# Script to plot/table results from fisher exact test and its odds ratio on groups of lncRNAs
library(data.table)
require(ggplot2)
require(grid)
require(gridExtra)
library(RColorBrewer)
##############################
# Basic one-way all vs all table
##############################
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/outFile.tsv"
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/experimental_interactions/outFile.tsv"
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/rbp_enrichments/outFile.tsv"
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/rbp_enrichments/outFile_complex_dataset.tsv"
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/outFile_simple.tsv"
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/outFile_complex_dataset.tsv"
#inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/structure_comparison/outFile.tsv"
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/structure_comparison/outFile_gencodebasic_background.tsv"
inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/minProt_topEnrich/outFile_minProt_topEnrich.tsv"
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/RBP_analysis/GroupOddsRatio/cutoff50_background_rbp/rbp.tsv"
dataset <- fread(inputFile, stringsAsFactors = FALSE, header = TRUE, sep="\t")
# get all different categories
categories = unique(dataset$ExternalList)
# create a table for each category
for (i in categories){
grid.newpage()
grid.table( dataset[dataset$ExternalList == i])
}
# Whole summary
grid.newpage()
grid.table( dataset)
##############################
# Two-sided colored table (as in Mukherjee2016)
##############################
### change format of dataset: one line per external dataset and 'yes' or 'no', value is odds ratio
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/outFile_simple.tsv_two_sided.tsv"
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/rbp_enrichments/outFile.tsv_two_sided.tsv"
inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/minProt_topEnrich/outFile_minProt_topEnrich.tsv_two_sided.tsv"
# inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/outFile_complex_dataset.tsv_two_sided.tsv"
dataset <- fread(inputFile, stringsAsFactors = FALSE, header = TRUE, sep="\t")
### filter dataset to have only scaffolding candidates
# Proteome-wide
# dataset = dataset[dataset$TranscriptGroup == "2-scaffolding_candidates"]
# RBP-only
# dataset = dataset[dataset$TranscriptGroup == "2-RBP-enriched_lncRNAs"]
# # MinProt topEnrich parameter
# dataset = dataset[dataset$TranscriptGroup == "minProt5_topEnrich5"]
# # excluding Mukherjee2016 because it has infinite odds ratio
# dataset = dataset[dataset$ExternalList != "Mukherjee2016"]
# excluding Necsulea2014 because there is not significance
#dataset = dataset[dataset$ExternalList != "Necsulea2014"]
## make Inf odds ratio appear as NA, so that we can put a good enrichment color on it
dataset[dataset$OddsRatio == "Inf"]$OddsRatio = "NA"
plt1 = ggplot( dataset, aes(x = ExternalList, y = InGroup)) +
geom_tile( aes( fill = OddsRatio), colour = "black", size = 1) +
scale_fill_continuous( low = "white", high = "#de2d26", name = "Fisher's Exact Test \nOdds ratio", na.value = "#de2d26") +
xlab("Orthogonal lncRNA gene dataset") +
ylab("Scaffolding candidate") +
geom_text( label = dataset$Overlap, size = 8) +
theme_minimal() +
theme(text = element_text(size=20)) +
theme(axis.title.x=element_text(vjust=-0.6))
plt1
# print as 20.91 x 4.50 inches
# # Plot for functional RNAs vs protein dataset
# plt1 = ggplot( dataset, aes(x = TranscriptGroup, y = InGroup)) +
# geom_tile( aes( fill = OddsRatio), colour = "black", size = 1) +
# scale_fill_continuous( low = "white", high = "#de2d26", name = "Fisher's Exact Test \nOdds ratio", na.value = "#de2d26") +
# xlab("Protein dataset") +
# ylab("Functional RNA") +
# geom_text( label = dataset$Overlap, size = 8) +
# theme_minimal() +
# theme(text = element_text(size=20)) +
# theme(axis.title.x=element_text(vjust=-0.6))
# plt1
#
# # print as 13.91 x 4.50 inches
##############################
# Colored table for complex datasets
##############################
inputFile = "/home/diogo/Documents/RAINET_data/TAGC/rainetDatabase/results/LncRNAGroupAnalysis/LncRNAGroupOddsRatio/real/outFile_complex_dataset.tsv"
dataset <- fread(inputFile, stringsAsFactors = FALSE, header = TRUE, sep="\t")
plt2 = ggplot( dataset, aes(x = TranscriptGroup, y = ExternalList)) +
geom_tile( aes( fill = OddsRatio), colour = "black", size = 1) +
scale_fill_continuous( low = "white", high = "#de2d26", name = "Fisher's Exact Test \nOdds ratio", na.value = "#de2d26", limits = c(1.,2.)) +
scale_x_discrete( labels = c("Corum","Wan 2015","BioPlex","Network modules","All candidates")) +
xlab("Protein dataset") +
ylab("Functional/Conserved lncRNAs") +
geom_text( label = dataset$Overlap, size = 8) +
theme_minimal() +
theme(text = element_text(size=20)) +
theme(axis.title.x=element_text(vjust=-0.6), axis.text.y=element_blank())
plt2
# print as 20.91 x 4.50 inches
|
9fc97cb9ddbb360848fce3de0fbba6ca02216b8a
|
3ad61e2720034ea8f5686efc2a9754063abf338d
|
/pixelArtDrawer/HourOfCodeScripts.R
|
317d05824e949b6e23c7c37953d5e147f9ae9013
|
[] |
no_license
|
origamiwolf/scripts
|
e9aa81d9b76d2bed766596b920270c56711f1139
|
97ec03d6f3ece919e165aeb48cdcbdaf61627de3
|
refs/heads/master
| 2016-09-06T15:22:51.217201
| 2015-12-01T14:58:32
| 2015-12-01T14:58:32
| 27,330,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 910
|
r
|
HourOfCodeScripts.R
|
# HOC scripts
# png pixel to number colouring sheet
# 0 - black, 1 - red, 2 - green, 3 - yellow, 4 - blue, 5 - magenta, 6 - cyan, 7 - white
# load png library read png file
library(png)
pngFilename <- "test2.png"
csvFilename <- sub(".png",".csv",pngFilename)
txtFilename <- sub(".png",".txt",pngFilename)
pngData <- readPNG(pngFilename)
# create the final indexed colour array
arraySize <- length(pngData[,,1])
indexColArray <- array(1, c(arraySize,arraySize))
# extract out RGB channels
arrayRed <- pngData[,,1]
arrayGreen <- pngData[,,2]
arrayBlue <- pngData[,,3]
# combine into 3 bit RGB
indexColArray <- round(arrayRed,0) + 2*round(arrayGreen,0) + 4*round(arrayBlue,0)
# write to csv
# just for fun, not used if using the javascript pixel art converter
# write.csv(indexColArray,csvFilename,row.names=F)
# write to plain file
write.table(indexColArray,txtFilename,sep="",row.names=F,col.names=F)
|
73aff7e210958e7b06ab2985c5aa1991c72f6f8b
|
27000599d84eec822222a7e30021cfe47607143a
|
/man/correlation_scatterplot.Rd
|
7735eaf76b8ced5db7884ab82eeb8c165177a7ed
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.flusshygiene
|
eeec7b5bc7481c959c6bed7d073a146ef094a09e
|
b9fee176e392f7932673e8efb092114cfdcbd9d0
|
refs/heads/master
| 2020-04-11T03:46:17.572649
| 2019-10-20T20:26:19
| 2019-10-20T20:26:19
| 161,489,062
| 1
| 0
|
MIT
| 2019-10-20T20:23:12
| 2018-12-12T13:01:41
|
R
|
UTF-8
|
R
| false
| true
| 878
|
rd
|
correlation_scatterplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correlation_variables.R
\name{correlation_scatterplot}
\alias{correlation_scatterplot}
\alias{correlation_values}
\title{Scatterplotmatrix of similar Variables to E.Coli}
\usage{
correlation_scatterplot(df, ...)
correlation_values(df, ...)
}
\arguments{
\item{df}{data.frame with data for e.coli and chosen variables in lagdays}
\item{\dots}{Arguments passed to \code{stats::cor}}
}
\value{
Plotting function. Returns a plot.
Returns correlation values.
}
\description{
Takes similar named variables and produces a matrix of scatterplots and their
correlation coefficients to E.Coli.
}
\section{Functions}{
\itemize{
\item \code{correlation_values}: Internal function
}}
\examples{
\donttest{correlation_values(data.frame(datum = rep("egal",10), e.coli = 1:10, var = 1:10), variable = "var")}
}
|
2b157bccaf96761bb40e75d8ae27dc5c36c946c8
|
b4ec5c53b10da35158c4713945885846f5273582
|
/R/f_text_bar.R
|
bce4bc0725a2c091b1f301029f65e32e0b24df8f
|
[] |
no_license
|
trinker/numform
|
25b4dda8d485dc149c9e74d0a676782afce3393a
|
171a423941b8f0743eadf145bb1f1e89f7911765
|
refs/heads/master
| 2021-10-15T15:08:59.043628
| 2021-10-08T20:17:46
| 2021-10-08T20:17:46
| 68,654,221
| 57
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,429
|
r
|
f_text_bar.R
|
#' Format Text Based Bar Plots
#'
#' Use a text symbol to create scaled horizontal bar plots of numeric vectors.
#' Note that you will have to coerce the table to a \code{data.frame} in order
#' for the output to look pretty.
#'
#' @param x A numeric vector.
#' @param symbol A sumbol to use for the bars.
#' @param width The max width of the bar.
#' @param \ldots ignored.
#' @return Returns a vector of concatenated symbols as a string that represent x% of the bar.
#' @export
#' @rdname f_text_bar
#' @examples
#' \dontrun{
#' library(dplyr)
#'
#' mtcars %>%
#' count(cyl, gear) %>%
#' group_by(cyl) %>%
#' mutate(
#' p = numform::f_pp(n/sum(n))
#' ) %>%
#' ungroup() %>%
#' mutate(
#' cyl = numform::fv_runs(cyl),
#' ` ` = f_text_bar(n) ## Overall
#' ) %>%
#' as.data.frame()
#'
#' mtcars %>%
#' count(cyl, gear) %>%
#' group_by(cyl) %>%
#' mutate(
#' p = numform::f_pp(n/sum(n)),
#' ` ` = f_text_bar(n) ## within groups
#' ) %>%
#' ungroup() %>%
#' mutate(
#' cyl = numform::fv_runs(cyl),
#' ` ` = f_text_bar(n)
#' ) %>%
#' as.data.frame()
#'
#' mtcars %>%
#' count(cyl, gear) %>%
#' group_by(cyl) %>%
#' mutate(
#' p = numform::f_pp(n/sum(n)),
#' `within` = f_text_bar(n, width = 3, symbol = '#')
#' ) %>%
#' ungroup() %>%
#' mutate(
#' cyl = numform::fv_runs(cyl),
#' `overall` = f_text_bar(n, width = 30, symbol = '*')
#' ) %>%
#' as.data.frame() %>%
#' pander::pander(split.tables = Inf, justify = alignment(.), style = 'simple')
#'
#' ## Drop the headers
#' mtcars %>%
#' count(cyl, gear) %>%
#' group_by(cyl) %>%
#' mutate(
#' p = numform::f_pp(n/sum(n)),
#' ` ` = f_text_bar(n, symbol = '=')
#' ) %>%
#' ungroup() %>%
#' mutate(
#' cyl = numform::fv_runs(cyl),
#' ` ` = f_text_bar(n, symbol = '#')
#' ) %>%
#' as.data.frame()
#' }
f_text_bar <- function(x, symbol = '_', width = 9, ...){
stopifnot(is.numeric(x))
stri_pad_right(strrep(symbol, round(width * x/max(x), 0)), width = width)
}
#' @export
#' @include utils.R
#' @rdname f_text_bar
ff_text_bar <- functionize(f_text_bar)
stri_pad_right <- function(x, width = floor(0.9 * getOption("width")), pad = " "){
r <- width - nchar(x)
r[r < 0] <- 0
paste0(x, strrep(pad, r))
}
|
01a864c8a9e9bf6e63c919467b8b474d54653502
|
31be997aacecee56ee5c883234b4d100a6cb7ecf
|
/Titanic.R
|
9f7dc3cd3f1d91042037205e5eb7f18825237ce4
|
[] |
no_license
|
eih2nn/sys6018-competition-titanic
|
18f0309c850227d0c4f6e48950e0c740101e1679
|
785712f0adf62a3100fd2b1a84d41648e03f8d6f
|
refs/heads/master
| 2021-06-24T09:20:59.542983
| 2017-08-23T16:52:13
| 2017-08-23T16:52:13
| 101,177,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,779
|
r
|
Titanic.R
|
#Titanic Project on Github
#eih2nn
install.packages("tidyverse") #Download library; (only required once)
library(tidyverse) #Load the core tidyverse packages: ggplot2, tibble, tidyr, readr, purrr, and dplyr
#Read in files:
train <- read_csv("train.csv") #Read in the comma separated value data file for training the model
test <- read_csv("test.csv") #Read in the csv data file for testing the model
#Change categorical variables in training set:
train$Survived <- factor(train$Survived) # Make Survived categorical
train$Pclass <- factor(train$Pclass) # Make Pclass categorical
train$Embarked <- factor(train$Embarked) # Make Embarked categorical
#Change categorical variables in testing set:
test$Pclass <- factor(test$Pclass) # Make Pclass categorical
test$Embarked <- factor(test$Embarked) # Make Embarked categorical
#Train logistic regression model using everything that might be helpful
#Remove names, cabins, ticket, and fare (since this last one is too similar to class)
train.lg1 <- glm(Survived ~ .-(PassengerId+Name+Ticket+Fare+Cabin), data=train, family = "binomial")
summary(train.lg1) #Summarize the values generated by glm
#Output:
#Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 4.62467 0.89055 5.193 2.07e-07 ***
# Pclass2 -0.04389 0.82171 -0.053 0.95740
# Pclass3 -1.62822 0.90932 -1.791 0.07336 .
# Sexmale -2.91751 0.50252 -5.806 6.41e-09 ***
# Age -0.04032 0.01474 -2.736 0.00622 **
# ... (all other variables have no significance)
#Try with just sex/gender, age, and class
train.lg2 <- glm(Survived~Sex+Age+Pclass,data=train, family = "binomial")
summary(train.lg2)
#Output:
#Coefficients:
# Estimate Std. Error z value Pr(>|z|)
#(Intercept) 3.777013 0.401123 9.416 < 2e-16 ***
# Sexmale -2.522781 0.207391 -12.164 < 2e-16 ***
# Age -0.036985 0.007656 -4.831 1.36e-06 ***
# Pclass2 -1.309799 0.278066 -4.710 2.47e-06 ***
# Pclass3 -2.580625 0.281442 -9.169 < 2e-16 ***
#Use the most recent model for predictions on the test set
probs <- as.vector(predict(train.lg2,newdata=test, type="response"))
preds <- rep(0,418) # Initialize prediction vector
preds[probs>0.5] <- 1 # p>0.5 -> 1
preds #Appears to have worked, as all outuput is in 1s and 0s
#Add prediction column to test set
test["Survived"] <- NA
test$Survived <- preds #Fill in values generated from prediction above
test_survived <- test[,c("PassengerId","Survived")] #Select only the necessary columns and assign to new df
write.table(test_survived, file = "eih2nn_titanic1.csv", row.names=F, sep=",") #Write out to a csv
preds1 <- read_csv("eih2nn_titanic1.csv") #Read in the comma separated value data file (just to check)
|
eefe823b995e3bf68044074c885a55adffcd67ad
|
55bdc9a36d8564216db073f19fffd931ffeaa9ae
|
/R/tests/testthat/test-spatial-join.R
|
863739e32f2efad785adaf02b19b2d8d5411cf2a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
awadhesh14/GeoSpark
|
2362d691d8e84397f9cee33692a609ee218faf9e
|
86b90fc41a342088d20429ebcd61a95b2f757903
|
refs/heads/master
| 2023-04-09T07:02:03.610169
| 2023-04-01T07:30:02
| 2023-04-01T07:30:02
| 202,829,602
| 0
| 0
|
Apache-2.0
| 2022-12-21T21:28:50
| 2019-08-17T03:20:13
|
Java
|
UTF-8
|
R
| false
| false
| 2,691
|
r
|
test-spatial-join.R
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
context("spatial join")
sc <- testthat_spark_connection()
test_that("sedona_spatial_join() works as expected with 'contain' as join type", {
for (partitioner in c("quadtree", "kdbtree")) {
pt_rdd <- read_point_rdd()
polygon_rdd <- read_polygon_rdd()
pair_rdd <- sedona_spatial_join(
pt_rdd, polygon_rdd,
join_type = "contain", partitioner = partitioner
)
expect_equal(invoke(pair_rdd$.jobj, "count"), 1207)
expect_true(inherits(pair_rdd, "pair_rdd"))
}
})
test_that("sedona_spatial_join() works as expected with 'contain' as join type", {
for (partitioner in c("quadtree", "kdbtree")) {
pt_rdd <- read_point_rdd()
polygon_rdd <- read_polygon_rdd()
pair_rdd <- sedona_spatial_join(
pt_rdd, polygon_rdd,
join_type = "intersect", partitioner = partitioner
)
expect_equal(invoke(pair_rdd$.jobj, "count"), 1207)
expect_true(inherits(pair_rdd, "pair_rdd"))
}
})
test_that("sedona_spatial_join_count_by_key() works as expected with 'contain' as join type", {
for (partitioner in c("quadtree", "kdbtree")) {
pt_rdd <- read_point_rdd()
polygon_rdd <- read_polygon_rdd()
pair_rdd <- sedona_spatial_join_count_by_key(
pt_rdd, polygon_rdd,
join_type = "contain", partitioner = partitioner
)
expect_equal(invoke(pair_rdd$.jobj, "count"), 1207)
expect_true(inherits(pair_rdd, "count_by_key_rdd"))
}
})
test_that("sedona_spatial_join_count_by_key() works as expected with 'contain' as join type", {
for (partitioner in c("quadtree", "kdbtree")) {
pt_rdd <- read_point_rdd()
polygon_rdd <- read_polygon_rdd()
pair_rdd <- sedona_spatial_join_count_by_key(
pt_rdd, polygon_rdd,
join_type = "intersect", partitioner = partitioner
)
expect_equal(invoke(pair_rdd$.jobj, "count"), 1207)
expect_true(inherits(pair_rdd, "count_by_key_rdd"))
}
})
|
df80aadfa80c6737b30028d6d16469f7a8940ce1
|
df13cc3414e2e56fb879b297b0048b1c5a179077
|
/man/standard_dt.Rd
|
0e31bbf27ddb2301b1abdee3b49de1e0b7b53382
|
[
"MIT"
] |
permissive
|
BWAM/validator
|
797c2447370622c037b516c6a84a7e46717f9fa9
|
5471815c369774ed1f31679bf221d67981882f45
|
refs/heads/master
| 2022-12-07T00:17:01.179481
| 2020-08-24T20:10:01
| 2020-08-24T20:10:01
| 261,248,291
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 322
|
rd
|
standard_dt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standard_dt.R
\name{standard_dt}
\alias{standard_dt}
\title{Standard DT Table}
\usage{
standard_dt(x)
}
\arguments{
\item{x}{a data table}
}
\value{
A DT table with vertical and horizontal scrolling enabled.
}
\description{
Standard DT Table
}
|
2868f6ff0920268619ceeb5976fa99b3ceccc69f
|
55b18b9a577fec2d86655a303761e680d05aea3d
|
/pkpc_test.R
|
241c37ed919d4bdd7fc47f4da8f12905d4d6155c
|
[
"MIT"
] |
permissive
|
stefanfausser/skc-kpca
|
3ec66479a8b801c32f11303ee93a37ee32d33304
|
aa80cecf702332027720233200a3ac34a2797aee
|
refs/heads/master
| 2020-05-17T21:28:22.070143
| 2017-05-03T12:26:12
| 2017-05-03T12:26:12
| 38,367,061
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,281
|
r
|
pkpc_test.R
|
## Used solely in pkpc_experiments.R
simpleCap <- function(x)
{
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2), sep="", collapse=" ")
}
pkpc.test.unsupervised.framework <- function(param, basic = TRUE, evalWeight = FALSE, evalWeightTwoDataChunks = FALSE, evalMaxDataChunks = FALSE)
{
# Unsupervised
if(evalWeight)
{
maxDataChunksSeq <- param$maxDataChunksEval
a_L <- param$a_L[length(param$a_L)]
weightFactorSeq <- param$weightFactorEvalSeq
a_method_kpca <- 2 # KKMEANS
subfix <- "-evalWeight"
}
else if(evalWeightTwoDataChunks)
{
a_L <- param$a_L[length(param$a_L)]
weightFactorSeq <- param$weightFactorEvalSeq
a_method_kpca <- 2 # KKMEANS
subfix <- "-evalWeightTwoDataChunks"
}
else if(evalMaxDataChunks)
{
maxDataChunksSeq <- param$maxDataChunksEvalSeq
a_L <- param$a_L[length(param$a_L)]
weightFactorSeq <- param$weightFactor
a_method_kpca <- 2 # KKMEANS
subfix <- "-evalMaxDataChunks"
}
else
{
maxDataChunksSeq <- param$maxDataChunks
a_L <- param$a_L
weightFactorSeq <- param$weightFactor
a_method_kpca <- 0:2
a_method <- 3:5
subfix <- ""
}
if(!evalWeight && !evalWeightTwoDataChunks && basic)
pkpc.test.func(param$seedSamples, param$seedLabels, param$seed, param$dataset, param$datasetfunc, "-unsupervised-2fold-basic", a_method = a_method, a_M = param$MBasicSeq, K = param$K, degree = param$degree, offset = param$offset, sigma = param$sigma, normalizeKernel = param$normalizeKernel, fuzzifier = param$fuzzifier)
if(evalWeightTwoDataChunks)
{
pkpc.test.func(param$seedSamples, param$seedLabels, param$seed, param$dataset, param$datasetfunc, paste("-unsupervised-2fold-kpca", subfix, sep=""), a_method = a_method_kpca, a_M = param$M, K = param$K, a_L = a_L, degree = param$degree, offset = param$offset, sigma = param$sigma, normalizeKernel = param$normalizeKernel, fuzzifier = param$fuzzifier, weightFactorSeq = weightFactorSeq, maxDataChunks = 2)
pkpc.test.func(param$seedSamples, param$seedLabels, param$seed, param$dataset, param$datasetfunc, paste("-unsupervised-2fold-all", subfix, sep=""), a_method = a_method_kpca, a_M = param$M, K = param$K, a_L = 0, allSamplesFromLast = 1, degree = param$degree, offset = param$offset, sigma = param$sigma, normalizeKernel = param$normalizeKernel, fuzzifier = param$fuzzifier, weightFactorSeq = weightFactorSeq)
}
else if(!basic)
pkpc.test.func(param$seedSamples, param$seedLabels, param$seed, param$dataset, param$datasetfunc, paste("-unsupervised-2fold-kpca", subfix, sep=""), a_method = a_method_kpca, a_M = param$M, K = param$K, a_L = a_L, degree = param$degree, offset = param$offset, sigma = param$sigma, normalizeKernel = param$normalizeKernel, fuzzifier = param$fuzzifier, weightFactorSeq = weightFactorSeq, maxDataChunks = maxDataChunksSeq)
}
pkpc.test.semisupervised.framework <- function(param, evalLabelledUnlabelledFactor = FALSE)
{
# Semi-Supervised
if(evalLabelledUnlabelledFactor)
{
labelFactorSeq <- param$labelFactorEvalSeq
labelledUnlabelledFactorSeq <- param$labelledUnlabelledFactorEvalSeq
a_method <- 5 # KKMEANS
subfix <- "-evalLabelledUnlabelledFactor"
}
else
{
labelFactorSeq <- param$labelFactorSeq
labelledUnlabelledFactorSeq <- param$labelledUnlabelledFactor
a_method <- 3:5
subfix <- ""
}
pkpc.test.func(param$seedSamples, param$seedLabels, param$seed, param$dataset, param$datasetfunc, paste("-unsupervised-2fold-skc", subfix, sep=""), a_method = a_method, a_M = param$M_semisupervised, K = param$K, labelFactorSeq = labelFactorSeq, labelledUnlabelledFactorSeq = labelledUnlabelledFactorSeq, degree = param$degree, offset = param$offset, sigma = param$sigma, normalizeKernel = param$normalizeKernel, fuzzifier = param$fuzzifier)
}
pkpc.test.func <- function(seedSamples, seedLabels, seed, datasetname, datasetfunc, filesuffix, a_method, a_M, a_L = 0, labelFactorSeq = 0, K = 2, degree = 1, offset = 0, sigma = 0, scaleFeatures = 1, normalizeKernel = 0, removeOutliers = TRUE, removeFeatures = TRUE, fuzzifier = 1.25, labelledUnlabelledFactorSeq = 1, R = 350, kFoldCrossValidation = 2, normalizeAssignmentsKFCM = 0, getHardClusteringsKFCM = 0, allSamplesFromLast = 0, maxDataChunksSeq = 0, weightFactorSeq = 0.5, epsilon = 0.000001, T = 100, hardAssignmentsApproxPseudoCentresRNG = 1, softInitialAssignmentsApproxPseudoCentresRNG = 0, excludeSamplesFromAPCInGreedySelection = 0)
{
ret <- datasetfunc(removeOutliers = removeOutliers, removeFeatures = removeFeatures)
N <- ret$N
x <- ret$x
labels <- ret$labels
if(sigma > 0)
{
# Gaussian Kernel
kernelfunc <- kernel.gauss
kernelparam <- list(sigma=sigma)
}
else if(degree > 0)
{
# Polynomial Kernel (for degree = 1: Linear Kernel)
kernelfunc <- kernel.poly
kernelparam <- list(offset=offset, degree=degree, normalizeKernel=normalizeKernel)
}
a_mu <- c(1)
a_K <- K
dirname <- "labelsTmp/"
if(!dir.exists(dirname))
{
dir.create(dirname)
}
savefile <- paste(dirname, datasetname, sep="")
pkpca_params <- list(verbose=0, maxRepeats=10, maxRowsKmatOut=1000, allSamplesFromLast=allSamplesFromLast, maxSamplesValidate=2000, getHardClusteringsKFCM=getHardClusteringsKFCM, normalizeAssignmentsKFCM=normalizeAssignmentsKFCM, limitSamplesByHardAssignments=0, localMinimum=0, hardAssignmentsApproxPseudoCentresRNG=hardAssignmentsApproxPseudoCentresRNG, softInitialAssignmentsApproxPseudoCentresRNG=softInitialAssignmentsApproxPseudoCentresRNG, excludeSamplesFromAPCInGreedySelection=excludeSamplesFromAPCInGreedySelection)
a_pkpcparam <- rbind(pkpca_params, NULL)
fileToSave <- paste("results-", datasetname, filesuffix, "_", date(), sep="")
# write parameter file
fileToSaveParameters <- paste("parameters-", datasetname, filesuffix, "_", date(), sep="")
parameterList <- list(kFoldCrossValidation=kFoldCrossValidation, scaleFeatures=scaleFeatures, removeFeatures=removeFeatures, removeOutliers=removeOutliers, normalizeKernel=normalizeKernel, degree=degree, offset=offset, sigma=sigma, fuzzifier=fuzzifier, T=T, epsilon=epsilon, normalizeAssignmentsKFCM=normalizeAssignmentsKFCM, getHardClusteringsKFCM=getHardClusteringsKFCM, hardAssignmentsApproxPseudoCentresRNG=hardAssignmentsApproxPseudoCentresRNG, softInitialAssignmentsApproxPseudoCentresRNG=softInitialAssignmentsApproxPseudoCentresRNG, excludeSamplesFromAPCInGreedySelection=excludeSamplesFromAPCInGreedySelection)
write.table(parameterList, fileToSaveParameters)
samplesMat <- saveOrRestoreSampleMat(seedSamples, R, N)
## only performed once
createAndSaveRandomZeroLabels(seedLabels, savefile, labels, labelFactorSeq, R)
pkpc.main.test(seed, fileToSave, savefile, a_pkpcparam, a_mu, a_M, a_K, a_L, a_method, x, kernelfunc, kernelparam, labelledUnlabelledFactorSeq, labelFactorSeq, samplesMat, kFoldCrossValidation, T, N, labels, fuzzifier, epsilon, scaleFeatures, maxDataChunksSeq, weightFactorSeq)
}
pkpc.plot.get.ylim <- function(vals, nVals = 5, nDigits = 2, spaceTopFactor = 0.1)
{
ylim <- c(min(vals),max(vals) + (max(vals) - min(vals)) * spaceTopFactor)
yStep <- round((max(ylim) - min(ylim)) / (nVals - 1),digits=nDigits)
yMax <- round(max(ylim),digits=nDigits)
yMin <- yMax - (nVals - 1) * yStep
yLabelSeq <- seq(yMax, yMin, -yStep)
return(list(yLabelSeq=yLabelSeq, ylim=ylim))
}
pkpc.semisupervised.evaluation <- function(datasetname, date, legendLocationARI = "topleft", legendLocationDBI = "topright", shiftDBI=NULL, shiftARI=NULL, spaceTopFactor=0.35)
{
if(length(date) != length(datasetname))
{
cat("Length of datasetname and date must match\n")
return(NULL)
}
iSeq <- 1:length(date)
valsARI <- NULL
valsDBI <- NULL
legendSeq <- NULL
for(i in iSeq)
{
file <- list.files(pattern = paste("results-", datasetname[i], ".+", date[i], sep=""))
if(!file.exists(file))
{
cat("File ", file, " does not exist\n")
return(NULL)
}
res <- read.table(file)
xVal <- res$labelledUnlabelledFactor
xLabel <- "gamma"
mainLabel <- "SKC(KKM)"
valsARICurr <- res$predAdjustedRandPseudoCentresMean
valsDBICurr <- res$errDaviesBouldinOrgRmsePseudoCentresMean
if(!is.null(shiftARI))
valsARICurr <- valsARICurr + shiftARI[i]
if(!is.null(shiftDBI))
valsDBICurr <- valsDBICurr + shiftDBI[i]
valsARI <- rbind(valsARI, valsARICurr)
valsDBI <- rbind(valsDBI, valsDBICurr)
legendSeq <- c(legendSeq, sapply(datasetname[i],simpleCap))
}
lwdSeq <- c(2,2,2,2,2)
pchSeq <- c(21,23,24,25,22)
colSeq <- c('grey', 'grey', 'black', 'black','black')
ltySeq <- c("solid","longdash","solid","dotted",'longdash')
for(j in 1:2)
{
if(j == 1)
{
# ARI
ylabel <- "ARI"
vals <- valsARI
legendLocation <- legendLocationARI
}
else
{
# DBI
ylabel <- "DBI"
vals <- valsDBI
legendLocation <- legendLocationDBI
}
xLabelSeq <- xVal
ret <- pkpc.plot.get.ylim(vals, spaceTopFactor = spaceTopFactor)
ylim <- ret$ylim
yLabelSeq <- ret$yLabelSeq
setEPS()
postscript(paste("results-semisupervised-eval-", ylabel, ".eps", sep=""))
for(i in iSeq)
{
if(i == 1)
{
# margins: top, left, bottom, right. Default: 5,4,4,2 + 0.1
par(mar=c(5, 4 + 0.5, 4, 2))
plot(xLabelSeq, vals[1,], type='o',ylim=ylim, lwd=lwdSeq[i], lty=ltySeq[i], col=colSeq[i], main=mainLabel, xlab=xLabel, ylab=ylabel, pch=pchSeq[i], yaxt="n", xaxt="n", cex.lab=2.0, cex.axis=2.0, cex.main=2.0, cex.sub=1.7, cex=2.0)
axis(1, at=xLabelSeq, labels=xLabelSeq, cex.axis=2.0)
axis(2, at=yLabelSeq, labels=yLabelSeq, cex.axis=2.0)
}
else
lines(xLabelSeq, vals[i,], type='o', lwd=lwdSeq[i], lty=ltySeq[i], col=colSeq[i], pch=pchSeq[i], cex=2.0)
}
legend(legendLocation, lwd=lwdSeq[iSeq], pch=pchSeq[iSeq], col=colSeq[iSeq], lty=ltySeq[iSeq], legend=legendSeq[iSeq], cex=1.5, ncol=2)
dev.off()
}
}
pkpc.test.evaluation.maxDataChunks <- function(datasetname, date, legendLocation = "top")
{
file <- list.files(pattern = paste("results-", datasetname, ".+", date, sep=""))
if(!file.exists(file))
{
cat("File ", file, " does not exist\n")
return(NULL)
}
ret <- read.table(file)
valsARI <- ret$predAdjustedRandPseudoCentresMean
valsSSE <- ret$errPseudoCentreQuantMean
xLabelSeq <- ret$maxDataChunks
ret <- pkpc.plot.get.ylim(valsARI, nVals = 4, nDigits = 3)
yLabelSeq <- ret$yLabelSeq
ylim <- ret$ylim
ret2 <- pkpc.plot.get.ylim(valsSSE, nVals = 4, nDigits = 0)
yLabelSeq2 <- ret2$yLabelSeq
ylim2 <- ret2$ylim
setEPS()
postscript(paste("results-", datasetname, "-maxChunks.eps", sep=""))
# margins: top, left, bottom, right. Default: 5,4,4,2 + 0.1
par(mar=c(5, 4.5, 4, 4) + 0.1)
plot(xLabelSeq, valsARI, type='o', ylim=ylim, lwd=2, lty = "solid", col='grey', main=paste(sapply(datasetname,simpleCap), ", KPC-A(KKM)", sep=""), xlab="number of data chunks", ylab="ARI", pch=21, yaxt="n", xaxt="n", cex.lab=2.0, cex.axis=2.0, cex.main=2.0, cex.sub=1.7, cex=2.0)
axis(1, at=xLabelSeq, labels=xLabelSeq, cex.axis=2.0)
axis(2, at=yLabelSeq, labels=yLabelSeq, cex.axis=2.0)
par(new=TRUE)
plot(xLabelSeq, valsSSE, type='o', ylim=ylim2, lwd=2, lty = "solid", col='black', xlab="", ylab="", pch=23, yaxt="n", xaxt="n", cex.lab=2.0, cex.axis=2.0, cex.main=2.0, cex.sub=1.7, cex=2.0)
mtext("SSE",side=4,line=3,cex=2)
axis(4, at=yLabelSeq2, labels=yLabelSeq2, cex.axis=2.0)
legend(legendLocation, lwd=c(2,2), pch=c(21,23), col=c("gray","black"), lty=c("solid","solid"), legend=c("ARI","SSE"), cex=1.5)
dev.off()
}
pkpc.test.evaluation.dist <- function(datasetname, date, namelen = 5)
{
iSeq <- 1:length(date)
vals <- NULL
for(i in iSeq)
{
file <- list.files(pattern = paste("results-", datasetname[i], ".+", date[i],sep=""))
if(!file.exists(file))
{
cat("File ", file, " does not exist\n")
return(NULL)
}
ret <- read.table(file)
if(i == 1)
Lseq <- unique(ret$L)
ind <- NULL
for(l in Lseq)
ind <- c(ind, which(ret$L == l))
ret <- ret[ind,]
ind2 <- ret$method == 2
vals <- cbind(vals, ret[ind2,]$bestDistsMean)
}
ylim <- c(0,0.02)
setEPS()
postscript(paste("results-dists.eps", sep=""))
# margins: top, left, bottom, right. Default: 5,4,4,2 + 0.1
par(mar=c(5, 4.5, 4, 4) + 0.1)
barplot(vals, names.arg=substr(datasetname,1,namelen), ylab="error", col=c("white","gray","black"), beside=TRUE, ylim=ylim, cex.lab=2.0, cex.axis=2.0, cex.main=2.0, cex.sub=1.7, cex=1.9, main="KPC-A(KKM)")
legend("topleft", legend = Lseq, fill = c("white","gray","black"), cex=1.5)
dev.off()
}
pkpc.test.evaluation <- function(datasetname, date, semisupervised = FALSE, baselineSamples = 0)
{
nDigitsExternal <- 3
nDigitsInternal <- 3
nDigitsInternalQuant <- 0
nDigitsInternalQuantSd <- 1
str <- NULL
if((length(date) > 3 && semisupervised) || (length(date) > 2 && !semisupervised))
{
printf("Wrong length of date (%i)\n", length(date))
return(NULL)
}
if(semisupervised && baselineSamples <= 0)
{
printf("Must set baselineSamples when semisupervised\n")
return(NULL)
}
iSeq <- 1:length(date)
index1Str <- "DBI"
index2Str <- "SSE"
if(semisupervised)
str <- paste("Method \t\t & La. & NMI & ARI & ", index1Str, " & ", index2Str, "\\\\\n", sep="")
else
str <- paste("Method \t\t & L & NMI & ARI & ", index1Str, " & ", index2Str, "\\\\\n", sep="")
# four indexes and three methods
indexesBaseline <- matrix(NA, 3, 4)
indexesBaselineSd <- matrix(NA, 3, 4)
hasIndexesBaseline <- rep(FALSE, 3)
indexesBaselineR <- rep(0,3)
for(i in iSeq)
{
file <- list.files(pattern = paste("results-", datasetname, ".+", date[i],sep=""))
if(!file.exists(file))
{
cat("File ", file, " does not exist\n")
return(NULL)
}
res <- read.table(file)
for(r in 1:nrow(res))
{
if(i == 1 && baselineSamples > 0)
{
if(res[r,]$M != baselineSamples)
next # for
}
if(res[r,]$method == 0)
method <- "KPC-A(RNG)"
else if(res[r,]$method == 1)
method <- "KPC-A(KFCM)"
else if(res[r,]$method == 2)
method <- "KPC-A(KKM)"
if(res[r,]$method == 3)
method <- "RNG"
else if(res[r,]$method == 4)
method <- "KFCM"
else if(res[r,]$method == 5)
{
if(i == 3)
method <- "SS-KKM"
else
method <- "KKM"
}
doTtest <- FALSE
haveBaseline <- FALSE
if(res[r,]$method >= 3 && res[r,]$method <= 5)
{
method <- paste(method, "(\\num{", res[r,]$M ,"})", sep="")
k <- "NA"
baseLineMethod <- res[r,]$method - 2
if(hasIndexesBaseline[baseLineMethod])
{
doTtest <- TRUE
baseLineM <- baseLineMethod
}
else
haveBaseline <- TRUE
}
else
{
k <- paste(res[r,]$L, sep="")
doTtest <- TRUE
baseLineM <- res[r,]$method + 1
}
nDigitsInternal2 <- nDigitsInternal
nDigitsInternal2Sd <- nDigitsInternal - 1
extIndex1 <- res[r,]$predNMIPseudoCentresMean
extIndex1Sd <- res[r,]$predNMIPseudoCentresStd
extIndex2 <- res[r,]$predAdjustedRandPseudoCentresMean
extIndex2Sd <- res[r,]$predAdjustedRandPseudoCentresStd
index1 <- res[r,]$errDaviesBouldinOrgRmsePseudoCentresMean
index1Sd <- res[r,]$errDaviesBouldinOrgRmsePseudoCentresStd
index2 <- res[r,]$errPseudoCentreQuantMean
index2Sd <- res[r,]$errPseudoCentreQuantStd
nDigitsInternal2 <- nDigitsInternalQuant
nDigitsInternal2Sd <- nDigitsInternalQuantSd
strSignif1 <- rep("",4)
strSignif2 <- rep("",4)
if(doTtest)
{
signif <- rep(FALSE, 4)
R1 <- indexesBaselineR[baseLineM]
R2 <- res[r,]$nSuccesses
ret <- myttest(c(extIndex1,indexesBaseline[baseLineM,1]),c(extIndex1Sd,indexesBaselineSd[baseLineM,1]),c(R1,R2))
if(ret$p.value < 0.05 && extIndex1 > indexesBaseline[baseLineM,1])
signif[1] <- TRUE
ret <- myttest(c(extIndex2,indexesBaseline[baseLineM,2]),c(extIndex2Sd,indexesBaselineSd[baseLineM,2]),c(R1,R2))
if(ret$p.value < 0.05 && extIndex2 > indexesBaseline[baseLineM,2])
signif[2] <- TRUE
ret <- myttest(c(index1,indexesBaseline[baseLineM,3]),c(index1Sd,indexesBaselineSd[baseLineM,3]),c(R1,R2))
if(ret$p.value < 0.05 && index1 < indexesBaseline[baseLineM,3])
signif[3] <- TRUE
ret <- myttest(c(index2,indexesBaseline[baseLineM,4]),c(index2Sd,indexesBaselineSd[baseLineM,4]),c(R1,R2))
if(ret$p.value < 0.05 && index2 < indexesBaseline[baseLineM,4])
signif[4] <- TRUE
for(j in 1:4)
{
if(signif[j])
{
strSignif1[j] <- "\\bfseries "
strSignif2[j] <- ""
}
}
}
if(haveBaseline)
{
if(!hasIndexesBaseline[baseLineMethod])
{
indexesBaseline[baseLineMethod,1] <- extIndex1
indexesBaseline[baseLineMethod,2] <- extIndex2
indexesBaseline[baseLineMethod,3] <- index1
indexesBaseline[baseLineMethod,4] <- index2
indexesBaselineSd[baseLineMethod,1] <- extIndex1Sd
indexesBaselineSd[baseLineMethod,2] <- extIndex2Sd
indexesBaselineSd[baseLineMethod,3] <- index1Sd
indexesBaselineSd[baseLineMethod,4] <- index2Sd
indexesBaselineR[baseLineMethod] <- res[r,]$nSuccesses
hasIndexesBaseline[baseLineMethod] <- TRUE
}
}
if(semisupervised)
str2 <- res[r,]$labelFactor
else
str2 <- k
str <- paste(str, method, " \t & ", str2,
" & ", strSignif1[1], round(extIndex1, digits=nDigitsExternal), " \\pm ", round(extIndex1Sd, nDigitsExternal), strSignif2[1],
" & ", strSignif1[2], round(extIndex2, digits=nDigitsExternal), " \\pm ", round(extIndex2Sd, nDigitsExternal), strSignif2[2],
" & ", strSignif1[3], round(index1, digits=nDigitsInternal), " \\pm ", round(index1Sd, nDigitsInternal), strSignif2[3],
" & ", strSignif1[4], round(index2, digits=nDigitsInternal2), " \\pm ", round(index2Sd, nDigitsInternal2), strSignif2[4],
"\\\\ \n", sep="")
} # for r
} # for i
cat("Latex table entries:\n", str,"\n", sep="")
}
|
b4b818142f7befdd300a25e9ce4d622e20d04281
|
6bfce066d969cd7d910fd759d9e5aef3a3ef0769
|
/R/plots.R
|
0cf4e6a1b9c9ce37f30fb0ede8f7cbb8a7c5c1db
|
[] |
no_license
|
JohnMBrandt/text-classification
|
110c1855325e10bb488faf79cfd41ec2bb2c6e1b
|
a24cdcd81164c01d74bd32ef242cf04713c2670c
|
refs/heads/master
| 2020-04-06T13:04:36.007860
| 2019-01-21T19:12:17
| 2019-01-21T19:12:17
| 157,482,882
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 452
|
r
|
plots.R
|
log <- read.csv("log.csv")
library(ggplot2)
ggplot(data = log, aes(x= epoch, y = top_3_accuracy*100))+
geom_smooth(se=F, aes(color = "Train"))+
geom_smooth(aes(y=val_top_3_accuracy*100, color = "Validation"), se = F)+
theme_bw()+
ylab("Top 3 accuracy")+
xlab("Epoch")+
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank())+
theme(legend.title = element_blank(),
legend.position=c(0.1,0.93))
|
802e5cef5a5879d2c271317f433ecdef797ab305
|
e3994abdf34a95e73ca1395afdef24fba0bb99ee
|
/Session1.R
|
1a94797203cacc8d9bba2f799f39759c449f1cda
|
[] |
no_license
|
vinitg91/Introduction-to-R
|
c0ac0591a5975c4e0432e339eb26875702169d80
|
f9302c047f585c8dc9382636a474a447fbd41ddf
|
refs/heads/master
| 2021-07-19T18:26:30.374066
| 2017-10-27T13:56:56
| 2017-10-27T13:56:56
| 108,552,579
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,093
|
r
|
Session1.R
|
#--------------------------------------------------------------------------------------------
#Data Objects
weight = c(60,72,57,95,90,72)
height = c(1.75,1.8,1.65,1.9,1.74,1.91)
mean(weight)
sd(height)
quantile(weight)
quantile(weight,0.75)
quantile(weight,probs = c(0.2,0.75))
length(height)
range(weight)
t.test(weight,mu = 80)
bmi = (weight)/(height^2)
bmi
plot(height,weight)
hist(weight)
plot(height,weight, pch=7,col="Blue", main="Height vs Weight")
colors()
weight = c(weight,86)
height = c(height,NA)
height
mean(height,na.rm = TRUE)
gender = c("M","F","M","M","F","M","F")
names(gender)
names(gender)=c("Bob","Susan","Jim","Mary","Jane","Tim","Nicole")
plot(factor(gender),weight) #Important here is to treat Gender as factor as its categorical
#--------------------------------------------------------------------------------------------
#Creating Vectors
x= seq(1,15,2) #seq(Start, End, StepSize)
x
x =seq(1,15,length.out = 4)
x
rep("A",5)
rep(c("A","B"),5)
rep(c("A","B"),c(5,7))
c(rep("A",5),rep("B",7))
weight[weight>70]
#--------------------------------------------------------------------------------------------
#Indexing and Subsetting
weight[-3]
height[height>1.7 & is.na(height)==F]
#--------------------------------------------------------------------------------------------
#Data Frame
ghw=data.frame(gender, height, weight)
ghw
str(ghw)
summary(ghw)
ghw[1,]
ghw[c(3,5),]
#--------------------------------------------------------------------------------------------
#Viewing and Editing
edit(ghw) #Very Useful
fix(ghw)
View(ghw)
#--------------------------------------------------------------------------------------------
#Indexing data frames
ghw$height
s1=ghw[ghw$gender=="F",]
s1
edit(s1)
View(whiteside)
plot(whiteside$Insul,whiteside$Gas)
#--------------------------------------------------------------------------------------------
#data frame
str(whiteside)
str(ghw)
summary(ghw)
dim(ghw)
row.names(whiteside)
row.names(ghw)
rownames(ghw)
|
d20c05c485643af5c54e748029eba2eca550335b
|
c04b1eb347dadf93d2a2d4f13a0954c3c49b0b23
|
/test fin draft-tbc.R
|
18d3325818accef991b5e33a70e53f4155fec5f6
|
[] |
no_license
|
nitijsingh/Assignment-1
|
f5c6ebdbb115677c8f2a31f2bc44294af55b638a
|
3dd9b5b8ab43b8a8ee509e16373b41de6df4e8ca
|
refs/heads/master
| 2021-01-10T06:23:05.205818
| 2015-12-11T15:15:04
| 2015-12-11T15:15:04
| 43,159,505
| 0
| 2
| null | 2015-11-20T11:48:27
| 2015-09-25T16:21:45
|
HTML
|
UTF-8
|
R
| false
| false
| 33,157
|
r
|
test fin draft-tbc.R
|
title: What effect does ICT investment and non-ICY investments have on economic growth in Developed, Emerging and Developing Countris
author: "By Alessia D??? Alessandro and Nitij Singh
Hertie School of Governance
Instructor: Christopher Gandrud
Fall Semester, 2015
11.12.2015"
date: "11 December 2015"
output: pdf_document
bibliography: Biblio.bib
highlight: tango
geometry: margin=3cm
theme: united
toc:yes
toc_depth: 3
# Abstract
This paper aims at analyzing the validity of the relationship between Information and Communication Technology (ICT) and economic development, expressed in terms of GDP growth. The study provides a cross-country view on the issue upon assessing the impact of ICT on economic growth for 54 countries from the developed, emerging and developing world. Despite various panel regressions from other studies showing a positive relationship between ICT and GDP growth, our results show that even though the impact of ICT capital stock per capita is significant in the developed and emerging economies, it cannot be used to make generalizations for the developing countries.
# Table of Contents
1. Introduction??????????????????????????????????????????????????????????????????????????????????????????......... 3
2. Literature Review??????????????????????????????????????????????????????????????????????????????????????????.4
3. Data and Datasets??????????????????????????????????????????????????????????????????????????????????????????.5
4. Definition of key concepts and variables???????????????????????????????????????????????????????????????..6
5. Outlier detection and missing values??????????????????????????????????????????????????????..????????????..8
6. Descriptive Statistics ?????????????????????????????????????????????????????????????????????????????????...???9
7. Model??????????????????????????????????????????????????????????????????????????????????????????????????????..10
8. Empirical Results ???...????????????????????????????????????????????????????????????????????????????????????10
9. Visual inspections of the Data ??????.....???????????????????????????????????????????????????????????????..11
10. Limitations of the study ???????????????????????????.????????????????????????????????????????????????..???15
11. Concluding Remarks ??????.???????????????????????????????????????????????????????????????????????????..16
12. Policy Recommendations??????????????????????????????????????????????????????????????????????????????.17
13. Bibliography?????????????????????????????????????????????????????????????????????????????????????????????.18
# Introduction
The information revolution and the extraordinary increase in the spreading of knowledge have given rise to a new era of knowledge and information, affecting directly economic, social, cultural and political activities of all regions of the world (Ogunsola 2005).
For years, Information and Communication Technologies (ICT) has been identified as a key driver for improving living standards. These higher living standards often correlate with productivity growth (Timmer et al. 2010)and thus economic growth.
Given the higher productivity effect, researchers and policy makers have been highlighting the incredible benefits brought about by Information Technology, integrating modern economies and inevitably affecting economic and industrial growth. Overall, incredibly high expectations have been set on the advancements in ICT, as being a tool for developing and emerging economies to leap frog traditional methods of increased productivity resulting in ICT related positive spillovers (Steinmueller, 2010) and reducing poverty upon increasing productivity and determining economic growth (Group 2012).
However, it is known from time immemorial that everything in life is like the two sides of a coin, with a positive and negative aspect. As a matter of fact, only people who have access to ICT will benefit from it whereas those who do not, will not. Especially for those countries in which ICT investments are scarce, the risk of being marginalized or bypassed becomes even higher. Many scholars and researchers nowadays have been focusing on the topic of digital divide and increased gross inequalities between nations determined by globalized investments in ICT. Aside from its reliance on technology, ICT also requires an absorptive capacity in terms of labor and technical skills, to fully benefit from the investments in ICT infrastructure (Jorgenson and Stiroh 1995).
Moreover given the limitation of studies of the effective benefits brought about by ICT in developing and emerging markets, it is hard to make generalizations on the actual impact that the mere investment in ICT infrastructure is causing (Pe??a-L??pez et al, 2007).
The largest effect of increased availability of ICT technology is probably the facility by which members of societies have access to information. In modern times, in a world of increasing gross inequalities between nations, contemporary discourses have been trying to identify forces that could improve countries socio???economic conditions and reduce inequalities between developed and developing countries. However, current debates are being rather critical towards thee actual effect of ICT and are centered on the topic of digital divide, how the globalization of markets has rather harmed emerging and developing economies.
# Literature Review
It comes with no doubts that the investment in ICT has had a significant impact on many countries worldwide, affecting the way people learn, work and exchange information. Ever since the advent of computers, government policy advisors and international developing agencies have pointed to the opportunities that technologies open to innovation (Avgerou, 2009).
For years ICT was also identified as one of the key tools to foster economic development, with a major- ity of studies and panel regressions confirming the positive relationship between ICT Capita and GDP Growth (Cardona, Kretschmer, and Strobel 2013).???This evolving global communication fabric is intelligent, adaptive and highly innovative and its impact can be felt at both the micro and macro economics levels???(World Economic Forum, 2009).
Several growth accounting studies revealed a significant contribution of ICT on economic performance especially after the year 1990 in developed economies. This effect can be related to the productivity miracle occurring as soon as quality adjusted and the costs for ICT tools started falling. Closer examination on the contribution of ICT to output and productivity growth was initiated by (Oliner and Sichel 2000) and (Jorgenson and Stiroh 1995).Besides the follow-up studies by Jorgenson and Stiroh (2000), and Oliner and Sichel (2001, 2002) for the U.S. economy, the notable studies on individual countries include (Oulton 2002) for the United Kingdom,(Jalava and Pohjola 2001) for Finland, (Van der WIEL and others 2001) and (Khan, Santos, and others 2002) for Canada,(Gordon 2002) for Germany??? and (Cette, Mairesse, and Kocoglu 2000)for France. The significant studies for a group of countries include (Colecchia and Schreyer 2002), Colecchia and Schreyer (2001), Ark et al (2002), and Daveri (2002) for most EU economies; and Jorgenson (2003) for the G7 economies.
United Nations ICT Taskforce has identified ICT as key tool to enable economic growth in developing countries offering these the unique opportunity to leapfrog certain stages of development by the use of technologies that undergo the traditional stages of progress to the information society (Force, U.I.T., 2003). Moreover, United Nations ICT Taskforce has identified ICT as key tool to enable economic growth in developing countries offering these the unique opportunity to leapfrog certain stages of development by the use of technologies that undergo the traditional stages of progress to the information society (Force, U.I.T., 2003). However, as the listing shows, most of the studies are centered on developed economies, with a scarcity of studies on the emerging and developing countries. This stresses the questionable validity of the effective impact that ICT has on economic development. As a matter of facts, more recently the link between ICT and development has been articulated in the alarming terms of the ??? digital divide???(Avgerou, 2009).
Many researchers and scholars have argued that globalization has instead determined the possible widening of the gap between the rich and the poor nations, and caused the emerging of the concept of ???digital slavery???(Ogunsola, 2005) Moreover, lacking absorptive capacities such as appropriate levels of human capital or insufficient funding for conducting research and development are all valid factors to consider when studying the effect of ICT in these different country classifications.
# Data and Datasets
We analyze the impact of ICT on GDP growth as well as the effect of Non ICT on GDP growth, (i.e. manufacturing and infrastructure). The primary data source for the study was complied from Conference Board Total Economy Database (Growth). Total Economy Database (TED), is an open source database used for the economic and business knowledge collected by the organization ??? The Conference Board???. As secondary dataset we used the World Bank Development Indicator (WDI), especially for the inclusion of observations for the control variables, namely ???Export percentage of GDP??? and ???Population annual percentage growth???. The TED dataset contains annual data for GDP, ICT and non ICT Capital Service and labor services for 123 countries with a timeframe ranging from 1990 to 2013. Due to missing variables in output and capital input, the time series for this study is limited to the years 1995 to 2010. Moreover, the number of countries has been reduced to 56 (19 Developed, 19 Emerging and 18 Developing Countries, randomly selected) with a total of 864 observations.
# Definition of key concepts and variables
Developed Countries:
While there is no one, set definition we classify developed countries as those with a relatively high level of economic growth and security. Specifically we look at the GDP per capita levels and GNI. When above $ 23000 in 2013, we consider a country developed.
Developing Countries:
We define developing countries as those lacking in terms of its economy, infrastructure and industrial base. We associate low standard of livings and we group under this category all those countries with a GDP/GNI ratio lower than $6500 in 2013.
Emerging Markets
Also for emerging markets, there has not been a commonly accepted definition, however we define them as those nations experiencing rapid growth, industrialization and socio economic development. There are 3 aspects underlying the definition of emerging markets: 1) the absolute level of economic development, indicated by the average GDP per capita; 2) the relative pace of economic development, indicated by the GDP growth rate; 3) the system of market governance and the extent and stability of free market systems (Arnold, D. & Quelch, A., 1998) We only focus on point 2, studying the effect that ICT investments have on annual GDP percentage growth.
GDP
Defined as an aggregate measure of production. It is the sum of the gross values added of all resident institutional units engaged in production (plus any taxes and minus any subsidies on products not included in the value of their outputs). The sum of the final uses of goods and services (all uses except intermediate consumption) is measured in purchasers??? prices less the value of imports of goods and services, or the sum of primary incomes distributed by resident producer units. Based on the levels of GDP, we have classified countries accordingly
i)Developed Countries with a GDP per capita in terms of Purchasing Power Parity higher than $23.000 with adjusted value of year 1995 to 2013 US dollars. ii)Emerging Countries with a GDP per capita in terms of Purchasing Power Parity ranging between $23.000 and $6.500 with adjusted value of year 1995 to 2013 US dollars. iii)Developing Countries with a GDP per capita in terms of Purchasing Power Parity lower than $6.5000 with adjusted value of year 1995 to 2013 US dollars.
GDP Growth
The sum of the final uses of goods and services are measured in Purchasing Power Parity (PPP) expressed in 2013 U.S dollars.
Economic Growth
An increased capacity of an economy to produce goods and services in one period of time compared to a prior time period. It is measured in terms of Gross Domestic Product. Comparison of levels of economic growth between countries is based on levels of GDP per capita. Economic growth is usually associated with technological changes and can thus best reflect the impact of ICT. Past economic growth is key to the material well being of people today.
ICT
The acronym ICT stands for Information Communication Technology. We define ICT as the acquisition of equipment and computer software that provide access to information through telecommunication. For the purpose of this study we will only look at 2 communication technologies, the Internet and cell phones as we assume these to be key drivers in the boosting economic growth.
ICT Capital Services Growth
Defined as the change in the flow of productive services provided by ICT assets. We focus on three types of ICT assets namely computer hardware and equipment, telecommunication equipment, and computer software and services. The underlying capital stock series are calculated from the investment data using the perpetual inventory method. The aggregation of the growth in capital services over the different asset types is calculated using the user cost approach.
Non ICT Capital Service Growth
Refers to the change in the flow of productive services provided by non-ICT assets. Three types of non-ICT assets are included transport equipment; plant, machinery, and other non-ICT equipment; and construction, building and other structures. The underlying capital stock series are calculated from the investment data using the perpetual inventory method. The aggregation of the growth in capital services over the different asset types is calculated using the user cost approach.
Export percentage of GDP
The export of goods and services representing the value of all goods and other market services provided to the rest of the world. They include the value of merchandise, freight, insurance, transport, travel, royalties, license fees, and other services, such as communication, construction, financial, information, business, personal, and government services. They exclude compensation of employees and investment income (formerly called factor services) and transfer payments (WDI). In our study this variable was included as a control variable as it can be used as a good first approximation of wellbeing of a country for international and temporal comparison. However, we have to keep in mind that this measure excludes several crucial elements of general wellbeing such as environment conservation, safety, and population literacy rates.
Population Growth Annual Percentage
Annual population growth rate for year t is the exponential rate of growth of midyear population from year t-1 to t, expressed as a percentage. Population is based on the de facto definition of population, which counts all residents regardless of legal status or citizenship???except for refugees not permanently settled in the country of asylum, who are generally considered part of the population of the country of origin (WDI). This variable was only included as a control variable and is central for improving statistical robustness. Higher population growth can be reflected in higher levels of active population contributing to the production process and thus higher levels of inputs revealed in economic growth.
# Outlier Detection and Missing Values
During the visual inspection of the primary data source we identified many breaks in the data series and missing values. Several countries had to be dropped from the cross- country data source. This reduced the number of countries from 123 to 56 and ultimately to 54 countries; countries that had zero investment in ICT were also plunged from the data set. One interesting observation arising upon cleaning the data was that the missing values related to ICT investment were higher in developing countries as compared to developed and emerging countries. This supports the theory of Pe??a-L??pez et al, claiming lacking information for the assessment of the effect of ICT on GDP growth in developing economies.
# Descriptive Statistics
Table 1, 2 and 3 reports the descriptive statistics of the all three categories of the countries. In descriptive tables, we have included some other variables example such as labor input, ICT capital and Non capital in order to comprehend the primary data. With regard to the variable GDP growth, we observed that developing countries registered the highest levels of GDP growth i.e 4.8 points, as compared to developed countries, which have GDP growth of only 2.307. At this point, it is important to keep in mind that higher levels of GDP do not necessarily correspond with higher levels of development. If we take the case of India for instance, they have much higher levels of GDP than New Zealand or Belgium, but few would suggest that the latter are economically less developed than the former. Moreover, the lower levels of GDP growth in the developed world could be explained by the rampant economic recession (2008) affecting financial markets.
If we observe the mean contribution of ICT in GDP growth among all the three categories we cannot derive any concluding remarks and therefore required a further analysis of the preliminary data. In further analysis we tried to understand the driving force behind the GDP growth upon including control variables namely ???Export percentage of GDP??? and ???Population Percentage Growth???. Upon including the aforementioned variables and conducting lagged OLS regression, we were surprised by the results showing that the variable ???Export percentage of GDP??? is highest in emerging countries (we would have expected the highest level for developing countries if we take the previous results into consideration since openness to trade should be correlated with GDP growth rates) so that we can claim that emerging economies, even if not facing the highest GDP growth, are more export oriented markets and open to trade.
# Model
In order to understand the impact of ICT, a regression output can reveal the difference in effects among different economies. As our primary data source was a time series data on GDP growth, ICT capital service growth and Non-ICT capital service growth. In the time series data we obtained multiple variables captured over time in a given country. Through these variables we wanted to understand the effect of ICT and Non-ICT on GDP growth therefore we defined these as independent and dependent variables respectively. We used a dynamic econometric model with lagged independent variables.
In a lagged econometric model the dependent variable does not react fully to a change in independent variable(s) during the period in which the change occurs therefore the model captures the relationship at time t and lagged relationship at time t-1. Similarly in our model captures the lagged relation between GDP growth and ICT and Non-ICT contribution.
The most common issues with lagged econometric model are multicollinearity and heterogeneity. In order to minimize the effect of multicollinearity, the independent variables used in our model were not correlated, as one variable cannot linearly predict the other. The additional variable such as??? Exports percent in total GDP??? controls heterogeneity as export coefficients clearly positive and significant to GDP growth.
# Empirical Results Developed Countries
Once the data was arranged the dependent variable (GDP growth) was regressed with the independent variables (ICT and Non-ICT capital growth)(Table 4). Initially when no control variables were applied in the model, ICT capital growth and Non ICT capital growth both had significant impact on GDP growth. In the next step when control variables were included in the model, ICT and Non-ICT capital growth still remained significant yet the coefficient of ICT reduced. This reduction in the coefficient of ICT capital growth explains the variation in GDP growth caused by the control variables.
However as the regression table highlights, ICT capital growth does play a significant role in levels of GDP growth in developed countries. On the contrary the control variables ???Export Percentage of GDP??? and ???Population Percentage Growth??? have no significant impact on GDP growth in developed economies.
# Empirical Results Emerging Countries
Emerging countries showed similar results to developed economies, with ICT and Non-ICT capital service having a significant impact on GDP growth. Even when control variables were applied, the dependent variable showed significant results. In emerging countries, population growth was more significant when compared to developed countries; therefore from the table we can state that one unit increase in population growth causes 0.387 GDP growth. Overall we had similar results for developed and emerging countries where ICT capital growth played impact in economic development of the economies.
# Empirical Results Developing Countries
The results among developing countries were slightly reversed from the results we have seen above. In developing countries, ICT capital growth had no significant impact on GDP growth whereas non-ICT capital growth did have a significant one. Non-ICT capital growth had positive impact on GDP (i.e one unit increase in Non-ICT capital caused 0.641 percentage GDP growth}. Among the control variables, ???Population % Growth??? was highly significant yet had a negative relationship; As the output from the table displays, it is possible to claim that one unit increase in ???Population % Growth ???decreases GDP growth by 0.845 percentage points.
# Visual Inspection of the Data
In order to comprehend the data further we created some visual graphics for all the three categories of countries by using R-Plot. In the diagram below we observe that there is limited variation in developed countries when ICT capital growth is observed over the time period from 1995-2010. With the help of the plot below we were able to detect some developing countries, which had phenomenal ICT growth, such as Sri Lanka. The categories 1, 2 and 3 in the plot represent developed, developing and emerging countries respectively.
As in the paper we wanted to understand the difference in impact of ICT capital and Non ICT capital therefore it was highly essential to segregate the information on that basis. In order to understand the difference in impact we created a bar chart using googleVis. The bar chart explains the contribution of ICT and Non ICT capital for the year 2009 and GDP growth for the year 2010. We can observe that Non-ICT capital is higher in developing countries; the reason could be that these economies majorly depend on manufacturing or labor intense jobs. In developed the difference between ICT capital growth and Non ICT capital is almost negligible.
In order to understand the growth of ICT capital services among individual categories of the countries; we used ggplot methodology.
# Developed Countries
The diagram below shows the trend of ICT capital growth among selected developing countries. Overall we observe a downward trend among developing countries leaving out certain outliers such as Denmark and Italy which experienced increase in ICT capital growth in the year 2010.
# Emerging Countries
In the case of emerging countries we observe a mix trend with regards to ICT capital growth. Certain countries have observed a steady increase in ICT capital like Costa Rica and Slovenia. Some countries have shown a stable trend Mexico, Poland and Portugal. Other countries showed irregularities in their trends with sudden rise and fall, maybe recession during the time period of 2007-2009 had some impact on the ICT capital. In many discussions it has been pointed out that Brazil, South Africa, Turkey and Mexico are the emerging economies and if we observe the diagram below we find that ICT growth is positive among all these countries. Therefore ICT capital has some significance in emerging economies.
# Developing Countries
The empirical results derived above show that ICT capital growth was insignificant in developing countries. Visual analysis of the ICT capital growth augments our results because the graph shows linear trends among all the countries leaving some outliers like Sri Lanka and Nigeria.
# Google Vis
Below, our study includes an interactive visualization of the impact of ICT in all countries (Developed, Emerging and Developing) based on figures from 2010. Visualization can help the reader to better contextualize the quantitative results provided by our study. In the maps below, stronger effects are correlated with darker colors.
# Limitations of the study
One of the key limitations that our study presents is the lack of available data explaining the impact of ICT in developing and emerging markets (as claimed in the literature review). The limited number of observations and of years covered that our analysis presents might create some general measurement issues, impeding us to make any generalizations, undermining the external validity of our test. The selected sample of countries being analyzed does not allow for proper randomization and thus we might be encountering some sample selection bias effect, with the sample obtained being not representative of the population being analyzed. Moreover, the limitation of our study on the mere effect of ICT contribution and non-ICT contribution to explain economic growth does not fully constitute an answer to what is attracting ICT investments. Other factors such as social development indicators (government effectiveness, political stability, unemployment rate) educational indicators (enrolment rates in primary, secondary and tertiary education), additional economic indicators (current account balance, real interest rates, trade balance, total investment, government revenues etc.), demographic indicators and technological indicators, should have been taken into account to make the sample of the study more representative.
# Concluding remarks
Despite having ICT often referred to as a catalyst for innovation and modernization, lowering transaction costs, blurring boundaries and spreading information that will make societies better off, our findings demonstrate its empirical limitations. The conclusion of this study is that certain steps need to be undertaken in order to access the full benefits that ICT can determine. Since the early 1990, international institutions have been pushing developing nations to deregulate and heavily invest in ICT infrastructure as a strategy for accelerating socio economic development (Ngwenyama & Morawczynski, 2010). However, after more than a decade of continuous investments, some countries still have not achieved the desired outcomes. Our findings demonstrate that simply mastering technology is not enough for determining economic development in the absence of complementary factors. When presenting the results for the levels of GDP, ICT Contribution and Non ICT Contribution, it became evident that especially in those countries experiencing the highest levels of GDP Growth (surprisingly the developing countries and not the emerging economies) the role played by ICT Contribution was really marginal, and lower than the contribution of Non ICT factors meaning that it is not possible to claim a positive effect of ICT infrastructure levels on GDP growth. The results of this paper call for more empirical research to assess the performance and impact of ICT in developing and emerging countries and argues that policy makers need to cultivate other conditions such as human labor capacity and technical skill levels to enable emerging and developing economies to fully benefit from ICT investments. ??? If developing countries are to seize the opportunities of technological innovation (. . . ) they will have to harness those innovation and the knowledge that comes with them??? (UNCTAD, 2007).
# Policy recommendations
As the study has shown, it is difficult to assess the impact of ICT on GDP growth. However we believe that higher investments in ICT infrastructure can still help developing and emerging countries to reach higher economic performance.
In view of this, in this section, we turn our attention to the policy tools necessary to promote ICT investments in order to benefit the most from ICT potential to contribute to economic growth.
In our view, policy tools required to stimulate the deployment of ICT infrastructure ranges from the formulation of national ICT plans and government intervention to correction of some of the market failures making investments in these countries less attractive.
More specifically we believe that the following broad policy areas should be considered:
??? Investment in promotion of adoption programs: More specifically, the implementation of supply oriented policies to promote the adoption of IC and IT by certain social groups and firms that may not be naturally inclined to adopt the technology. This could be done via increasing digital literacy, providing economic incentives and subsides and the development of E-Government, facilitating the interaction between citizens and governments or businesses and governance.
??? Provide explicit incentives for ICT: such as tax incentives and include some evaluation programs to identify the impact of ICT on productivity and other factors over the years (historical approach). The question to be answered is whether the lesson from ICT can be applied to future types of technical change, in other words, what lessons form past waves of technical change can be applied to ICT. This can only be achieved upon collecting results over longer time periods.
??? Invest in technological skills, if governments want to enable a significant part of the population to enjoy the benefit related to ICT.
??? Adoption of competition policy: as competition might be stimulating the ICT supply, explicitly or implicitly.
??? Removal of any potential supply obstacles: lowering economic barriers to entry for international investors
??? ICT in the public sector: ICT has the great potential to also generate large productivity growth in the public sector, such as with the computerization of hospitals, schools, police etc. In view of this, it can be extremely valuable for policymakers to make the public sector work more effectively and efficiently.
# Bibliography
Cardona, Melisande, Tobias Kretschmer, and Thomas Strobel. 2013. ???ICT and Productivity:
Colecchia, Alessandra, and Paul Schreyer. 2002. ???ICT Investment and Economic Growth in the 1990s: Is the United States a Unique Case?: A Comparative Study of Nine OECD Countries.??? Review of Economic Dynamics 5 (2). Elsevier: 408???42.
Gordon, Robert J. 2002. ???New Economy???An Assessment from a German Viewpoint.??? RWI-Studie Im Auftrag Des Bundesministeriums F??r Wirtschaft Und Forschung, Erscheint Demn??chst.
Group, World Bank. 2012. World Development Indicators 2012. World Bank Publications.(Growth, Groningen. ???Development Centre and the Conference Board (2004), Total Economy Database.???
Jalava, Jukka, and Matti Pohjola. 2001. ???Economic Growth in the New Economy.??? UNU/WIDER Discussion Paper, no. 2001/5.
Jorgenson, Dale W, and Kevin Stiroh. 1995. ???Computers and Growth.??? Economics of Innovation and New Technology 3 (3-4). Taylor & Francis: 295???316.
Khan, Hashmat, Marjorie Santos, and others. 2002. Contribution of ICT Use to Output and Labour- Productivity Growth in Canada. Bank of Canada.
Ogunsola, LA. 2005. ???Information and Communication Technologies and the Effects of Globalization: Twenty- First Century ???Digital Slavery??? for Developing Countries???Myth or Reality.??? Electronic Journal of Academic and Special Librarianship 6 (1-2): 1???10.
Oliner, Stephen D, and Daniel E Sichel. 2000. ???The Resurgence of Growth in the Late 1990s: Is Information Technology the Story???? FEDS Working Paper.
Oulton, Nicholas. 2002. ???ICT and Productivity Growth in the United Kingdom.??? Oxford Review of Economic Policy 18 (3). Oxford Univ Press: 363???79.
Pe??a-L??pez, Ismael, and others. 2007. ???Information Economy Report 2007-2008: Science and Technology for Development: The New Paradigm of ICT.??? UNCTAD.
Timmer, Marcel P, Robert Inklaar, Mary O???Mahony, and Bart Van Ark. 2010. Economic Growth in Europe: A Comparative Industry Perspective. Cambridge University Press.
Van der WIEL, Henry, and others. 2001. Does ICT Boost Dutch Productivity Growth? CPB Netherlands Bureau for Economic Policy Analysis.
|
88a751042ab521b06363536dc98f4aaccea715c6
|
638fea8f54bee228ba447a346181a37eaee5efed
|
/Zadanie5.R
|
07469b08b101f7f2ee31ade8fda6aa2b005a901f
|
[] |
no_license
|
Mb1sh01/EVM
|
71bf2c3e5465cad98a9f2c883936f7e4af9760b7
|
b288b6a0bff2220152fa3f2ad83b2d01ef609065
|
refs/heads/main
| 2023-04-18T14:09:54.503745
| 2021-05-12T12:14:49
| 2021-05-12T12:14:49
| 302,025,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 593
|
r
|
Zadanie5.R
|
png("plot-ex05.png", width=600)
set.seed(4)
x <- rf(n = 300, df1 = 3, df2 = 112)
e <- rnorm(300, 0, 4)
y <- 5 - 3*x + e
median.result1 <- median(x)
print(median.result1)
median.result2 <- median(y)
print(median.result2)
layout(matrix(c(1, 2, 2, 1, 2, 2, 3, 4, 4),nrow = 3, byrow=T))
boxplot(y, pch = 0, cex = 1, range = 1.5, col = "green")
plot(x, y, abline(v=median.result1,h=median.result2,lty=2),pch = 21, cex = 1, col = "green")
plot(x, y, xlab = "", ylab = "", axes = F, col = "white")
boxplot(x, pch = 21, cex = 1, horizontal = TRUE, range = 1.5, col = "green")
dev.off()
|
40efb9569279aca994ad0a4d488b3385914422b6
|
e925c1c38bb5b43a4fe48c99470c282b314b3998
|
/R/cdc_rankedpairs.R
|
e99336e76149489f2ae3fd3ffddbc0dd6b8dd63d
|
[] |
no_license
|
cran/votesys
|
c8e729705e77e6f44651774085efe79e690c8c3c
|
1adcaf345de8ad73abc670ee473d910fdfa24134
|
refs/heads/master
| 2021-04-06T20:52:15.752148
| 2018-04-20T08:56:40
| 2018-04-20T08:56:40
| 125,397,960
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,116
|
r
|
cdc_rankedpairs.R
|
#' Ranked Pairs Method
#'
#' It is also called Tideman method. See details.
#'
#' The method first summarizes the result of pairwise comparison,
#' the order used is the order of winning votes from large to small.
#' So if pairwise comparison has ties (that is, the number of voters
#' who prefer a than b is equal to the number of voters who prefer
#' b than a, the method will fail, and the winner will be NULL).
#'
#' The second step is called tally.
#' If a wins b with 100 votes, b wins c with 80 votes, then
#' we put a-b-100 ahead of b-c-80. Suppose a wins b with 100 votes,
#' a wins c with 100 votes, then we have a tie; so we have to check
#' the relation between b and c. If b wins c, then we put a-c-100
#' ahead of a-b-100. Suppose a wins b with 100 votes, d wins b with
#' 100 votes, then again we have a tie and have to check the a-d
#' relation. If d wins a, then we put d-b-100 ahead of a-b-100. Suppose
#' a wins b with 100 votes, e wins f with 100 votes, then the ties cannot
#' be solved, so the winner will be NULL.
#'
#' The third step, after the above mentioned tally, is called lock-in.
#' As the relations have been sorted according to their strength
#' from large to small in the tally step, we now add them one
#' by one. The rule is: if a relation is contradictory with those
#' already locked in relations, this relation will be discarded.
#'
#' For example, suppose we have already add relation a > b and
#' b > c, then the two relations are locked in. As a result, we should
#' not add b > a. Also, as a > b and b > c indicate a > c, so we should
#' not add c > a. After this process, we will finally find the winner who
#' defeats all others.
#'
#' @param x it accepts the following types of input:
#' 1st, it can be an object of class \code{vote}.
#' 2nd, it can be a user-given Condorcet matrix,
#' 3rd, it can be a result of another Condorcet method,
#' which is of class \code{condorcet}.
#' @param allow_dup whether ballots with duplicated score values
#' are taken into account. Default is TRUE.
#' @param min_valid default is 1. If the number of valid entries of
#' a ballot is less than this value, it will not be used.
#'
#' @return a \code{condorcet} object, which is essentially
#' a list.
#' \itemize{
#' \item (1) \code{call} the function call.
#' \item (2) \code{method} the counting method.
#' \item (3) \code{candidate} candidate names.
#' \item (4) \code{candidate_num} number of candidate.
#' \item (5) \code{ballot_num} number of ballots in x. When
#' x is not a \code{vote} object, it may be NULL.
#' \item (6) \code{valid_ballot_num} the number of ballots that are
#' actually used to compute the result. When
#' x is not a \code{vote} object, it may be NULL.
#' \item (7) \code{winner} the winner, may be NULL.
#' \item (8) \code{input_object} the class of x.
#' \item (9) \code{cdc} the Condorcet matrix which is actually used.
#' \item (10) \code{dif} the score difference matrix. When
#' x is not a \code{vote} object, it may be NULL.
#' \item (11) \code{binary} win and loss recorded with 1 (win),
#' 0 (equal) and -1 (loss).
#' \item (12) \code{summary_m} times of win (1), equal (0)
#' and loss (-1).
#' \item (13) \code{other_info} a list of 3 elements. The 1st
#' is the reason of failure. If winner exists, it will be blank. The 2nd
#' is the tally result (it may contain unsolved ties).
#' The 3rd is the lock-in result; if the method fails,
#' it will be NULL.
#' }
#'
#' @references
#' \itemize{
#' \item Tideman, T. 1987. Independence of clones as a
#' criterion for voting rules. Social Choice and Welfare, 4(3), 185-206.
#' }
#'
#' @export
#' @examples
#' raw <- rbind(c('m', 'n', 'c', 'k'), c('n', 'c', 'k', 'm'),
#' c('c', 'k', 'n', 'm'), c('k', 'c', 'n', 'm'))
#' raw <- list2ballot(m = raw, n = c(42, 26, 15, 17))
#' vote <- create_vote(raw, xtype = 2, candidate = c('m', 'n', 'c', 'k'))
#' y <- cdc_rankedpairs(vote)
cdc_rankedpairs <-
function(x, allow_dup = TRUE, min_valid = 1) {
method <- "rankedpairs"
if (min_valid < 1)
stop("Minimux number of min_valid is 1.")
if (!class(x)[1] %in% c("vote", "matrix", "condorcet"))
stop("x must be a vote, condorcet or matrix object.")
stopifnot(allow_dup %in% c(TRUE, FALSE))
CORE_M <- fInd_cdc_mAtrIx(x, dup_ok = allow_dup, available = min_valid)
message("EXTRACTING INFO")
class1 <- CORE_M$input_object
candidate <- CORE_M$candidate
candidate_num <- CORE_M$candidate_num
ballot_num <- CORE_M$ballot_num
valid_ballot_num <- CORE_M$valid_ballot_num
cdc_matrix <- CORE_M$cdc
dif_matrix <- CORE_M$dif
binary_m <- CORE_M$binary
message("SELECTING")
summary_m <- sUmmAry_101(x = binary_m, rname = candidate)
result_ID <- 0 # decide the type of result
message("------PAIRWISE")
win_side <- c()
lose_side <- c()
win_num <- c()
lose_num <- c()
pair_have_tie <- 0
for (i in 1:nrow(cdc_matrix)) {
for (j in 1:ncol(cdc_matrix)) {
if (i > j) {
ij <- cdc_matrix[i, j]
ji <- cdc_matrix[j, i]
if (ij >= ji) {
win_side <- append(win_side, candidate[i])
lose_side <- append(lose_side, candidate[j])
win_num <- append(win_num, ij)
lose_num <- append(lose_num, ji)
if (ij == ji)
pair_have_tie <- 1
} else if (ij < ji) {
win_side <- append(win_side, candidate[j])
lose_side <- append(lose_side, candidate[i])
win_num <- append(win_num, ji)
lose_num <- append(lose_num, ij)
}
}
}
}
tally <- data.frame(win_side, lose_side, win_num, lose_num, stringsAsFactors = FALSE)
if (pair_have_tie == 0)
result_ID <- 1
# if pairwise has no tie, then result_ID=1, then go ahead
if (result_ID == 1) {
message("------SORTING")
tally_o <- order((-1) * tally[, 3], tally[, 4])
tally <- tally[tally_o, ]
nr_tally <- nrow(tally)
only_num_df <- tally[, c(3, 4)]
if (nr_tally == nrow(unique(only_num_df))) {
# do NOT use uniqueN
result_ID <- 2
} else {
BI_ZERO_ONE <- binary_m
BI_ZERO_ONE[BI_ZERO_ONE == -1] <- 0
re_tally <- RP_TIE_SOLVE(x = tally, zeroone = BI_ZERO_ONE)
if (re_tally[[1]] == TRUE) result_ID <- 2
tally <- re_tally[[2]]
}
}
# if sort has no tie, then result_ID=2, then go ahead
if (result_ID == 2) {
message("------LOCKING IN")
name_m <- as.matrix(tally[, c(1, 2)])
the_source <- lock_winner(name_m, CAND = candidate)
winner <- the_source[[2]]
LOCK_IN <- the_source[[1]]
}
message("COLLECTING RESULT")
over <- list(call = match.call(), method = method, candidate = candidate, candidate_num = candidate_num, ballot_num = ballot_num,
valid_ballot_num = valid_ballot_num, winner = NULL, input_object = class1, cdc = cdc_matrix,
dif = dif_matrix, binary = binary_m, summary_m = summary_m, other_info = NULL)
if (result_ID == 0) {
over$other_info <- list(failure = "Pairwise comparison has ties, i. e., the number of people who prefer i than j is equal to the number of people who prefer j than i.",
tally = tally, lock_in = NULL)
}
if (result_ID == 1) {
over$other_info <- list(failure = "There are unsolved ties when sorting the tally.", tally = tally, lock_in = NULL)
}
if (result_ID == 2) {
over$winner <- winner
over$other_info <- list(failure = "", tally = tally, lock_in = LOCK_IN)
}
class(over) <- "condorcet"
message("DONE")
return(over)
}
|
b21afa3510e5717286dec159755cfb8534ee6158
|
fc32885f3760cb060b9d39ddf88cf4df34fb7f4c
|
/R/backtester.R
|
174bcf8df044f6aec831e633eaa533d3d2d1e2ef
|
[] |
no_license
|
vitociciretti/ermes
|
1c74b61d648e3aed30c6befc766b066ea9ae5b79
|
dd254ac0a1173c71c06c51399afa6633c615ca47
|
refs/heads/main
| 2023-05-09T07:52:04.913448
| 2021-06-05T13:39:49
| 2021-06-05T13:39:49
| 345,759,254
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,220
|
r
|
backtester.R
|
#' Backtest a financial instrument
#'
#' Write details section here....
#'
#' @param ask as an xts object of OHLC ask prices
#' @param bid as an xts object of OHLC ask prices
#' @param entry_fun a function that takes in bid/ask prices and returns a data.frame/data.table with mandatory columns: date, OrderSize, and optional columns: OrderType = c('Market', 'Limit', 'Stop'), StopLoss, TakeProfit
#' @param entry_args named list of arguments that accompany the entry function
#' @param exit_fun a function that takes in bid/ask prices + the output of the entry function and returns...
#' @param CloseTradeOnOppositeSignal should the backtester close the current trade when an entry order in the opposite direction appears
#' @param TradeTimeLimit having a time limit increases performance
#' @return a data.table object with...
#' @examples
backtest <- function(ask, bid,
entry_fun, entry_args=list(),
exit_fun=NULL, exit_args=list(),
CloseTradeOnOppositeSignal=TRUE,
AddToPosition=FALSE,
TradeTimeLimit=lubridate::weeks(4)) {
# Load bid/ask historical data
dat <- mergeAskBid(ask, bid)
# Create entry orders, takes in a list of arguments for the function (entry_args)
arg_names <- names(formals(entry_fun))
args <- c(list(copy(dat)), entry_args)
names(args)[1] <- arg_names[1]
user_dat <- rlang::exec('entry_fun', !!!args)
# Columns that are in user_dat, but not dat (keep date column though)
user_cols <- c('date', names(user_dat)[!names(user_dat) %in% names(dat)])
dat <- merge(dat, user_dat[, c(user_cols), with=FALSE], by='date', all.x=TRUE)
entry <- dat[OrderSize!=0 & !is.na(OrderSize)]
# Return columns for prev/next order directions/sizes
entry[, prev_order:=shift(OrderSize)]
entry[, next_order:=shift(OrderSize, type='lead')]
# Estimate the timeframe of our backtesting data in minutes
mins <- getTimeFrameMins(ask)
# Entry time will be the close of the current candle
if(!'EntryTime' %in% names(entry)) {
entry[, EntryTime:=date + lubridate::minutes(mins)]
}
entry[, Side:=sign(OrderSize)]
# If EntryPrice isn't supplied by the entry_fun, create it
if(!'EntryPrice' %in% names(entry)) {
entry[, EntryPrice:=ifelse(Side>0, Close.a, Close.b)]
}
# If no entry type, all is 'Market'
if(!'EntryType' %in% names(entry)) {
entry[, EntryType:='Market']
}
entry[, Order_ID:=1:nrow(entry)]
# Entry loop ------------------
# Loop through all the market entry points and get result for each trade
results <- data.table()
prevEndDate <- first(entry)$date
for(iter in 1:nrow(entry)) {
this.entry <- entry[iter,]
if(AddToPosition==FALSE) {
if(this.entry$date < prevEndDate) {
#print('Skipping Order', Trade Open)
next
}
}
if(CloseTradeOnOppositeSignal) {
nextOppSignal <- first(entry[date > this.entry$date &
sign(OrderSize) != sign(this.entry$OrderSize), EntryTime])
this.dat <- dat[date>=this.entry$date & date < nextOppSignal]
} else {
this.dat <- dat[date>=this.entry$date & date < (this.entry$date + TradeTimeLimit)]
}
this.dat[, Side:=first(this.entry$Side)]
this.dat[, OrderSize:=this.entry$OrderSize]
this.dat[date==this.entry$EntryTime, EntryTime:=this.entry$EntryTime]
this.dat[, EntryPrice:=this.entry$EntryPrice]
this.dat[, Order_ID:=this.entry$Order_ID]
# If TP / SL is present
if('TakeProfit' %in% names(this.dat)) {
this.dat[, TakeProfit:=this.entry$TakeProfit]
} else {
this.dat[, TakeProfit:=Inf * this.entry$Side]
}
if('StopLoss' %in% names(this.dat)) {
this.dat[, StopLoss:=this.entry$StopLoss]
} else {
this.dat[, StopLoss:= -Inf * this.entry$Side]
}
# Limit/Stop Orders -----------------
# If the entry type is limit or stop, cut out the data before that price is hit
if(this.entry$EntryType=='Limit') {
if(this.entry$Side == 1) {
trade_entry_time <- first(this.dat[ Low.a <= this.entry$EntryPrice])$date
} else if(this.entry$Side == -1) {
trade_entry_time <- first(this.dat[ High.b >= this.entry$EntryPrice])$date
}
if(length(trade_entry_time) == 0) {
next
}
this.dat <- this.dat[date >= trade_entry_time]
}
if(this.entry$EntryType=='Stop') {
if(this.entry$Side == 1) {
trade_entry_time <- first(this.dat[ High.a >= this.entry$EntryPrice])$date
} else if(this.entry$Side == -1) {
trade_entry_time <- first(this.dat[ Low.b <= this.entry$EntryPrice])$date
}
if(length(trade_entry_time) == 0) {
next
}
this.dat <- this.dat[date >= trade_entry_time]
}
if(nrow(this.dat) <= 2){
warning(paste0('historical data is not granular enough: ', this.entry$date))
next
}
# Trade Exit -------------------
if(!is.null(exit_fun)) {
arg_names <- names(formals(exit_fun))
args <- c(list(copy(this.dat)), exit_args)
names(args)[1] <- arg_names[1]
exit <- rlang::exec('exit_fun', !!!args)
} else {
# If no exit function just use the TP/SL values
# and if they're not there, just the last date
exit <- exitTP_SL(this.dat)
}
prevEndDate <- last(exit)$ExitTime
# Duplicate our entry info for every exit line (for partial closes)
for(exit_rows in 1:nrow(exit)){
if(exit_rows == 1) next
this.entry <- rbind(this.entry, this.entry[1])
}
thistrade <- cbind(this.entry, exit[, !names(exit) %in% names(this.entry), with=FALSE])
thistrade[, Returns := Side * (ExitPrice - EntryPrice) * abs(ExitAmount)]
results <- rbind(results, thistrade, fill=TRUE)
print(paste0('Trade ', thistrade$Order_ID, ': ', round(sum(thistrade$Returns, na.rm=TRUE), 1), ' | Total: ',
last(round(cumsum(results[!is.na(Returns)]$Returns), 1))))
}
dat <- merge(dat,
results[, c('date', names(results)[!names(results) %in% names(dat)]), with=FALSE],
all=TRUE)
return(list(results=results, data=dat))
}
|
9bc3c79dba5253979b7537138edb87d33921eb35
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sfsmisc/examples/errbar.Rd.R
|
9a1d0f01c0f55519e2a311a03a067558b764768d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 232
|
r
|
errbar.Rd.R
|
library(sfsmisc)
### Name: errbar
### Title: Scatter Plot with Error Bars
### Aliases: errbar
### Keywords: hplot
### ** Examples
y <- rnorm(10); d <- 1 + .1*rnorm(10)
errbar(1:10, y, y + d, y - d, main="Error Bars example")
|
2fbe1856cc34504094b471c89b9c0057a947153d
|
1546947d8721a49e061e6985bc0ba44ece326c66
|
/modelFit.R
|
e59bb9f3070a4b52f76c32b8558473893eb8cde0
|
[] |
no_license
|
apjvarun/crime
|
386d3832df7aff1ed133160e7ea0a2d73c662617
|
3f58b98a0ae31ee186be0de655f517dfd4d428d3
|
refs/heads/master
| 2021-01-19T08:29:10.681379
| 2017-04-11T12:00:23
| 2017-04-11T12:00:23
| 87,635,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,042
|
r
|
modelFit.R
|
rm(list=ls())
getwd() # where am I?
crimeData_pre <- as.data.frame(read.csv("CrimeData08.csv"))
crimeData_pre <- crimeData_pre[, -26]
crime_Y <- as.numeric(crimeData_pre[,99])
crime_X <- as.data.frame(crimeData_pre[,1:98])
#summary(crime_X)
#dataset<-as.data.frame(Response = crime_Y, Reg = crime_X)
model <- lm(crime_Y ~ . , data = crime_X) # data set
table <- summary(model)
table2 <- table$coefficients
cat("Number of significant regressors SIGlevel 10%: ",length(which(table2[,4]<=0.1)) )
cat("Number of significant regressors SIGlevel 5%: ",length(which(table2[,4]<=0.05)) )
cat("Retain regressors significant for alpha = 5%. Remove all other regressors.")
index_Sig<-as.numeric(which(table2[,4]<0.05))
index_Sig
# If the intercept is significant, then '1' is an element in index_Sig.
# Index_Sig helps in removing non-significant variables. Therefore, remove "1" from index_Sig,
# as it doesn't give any meaningful information.
if(index_Sig[1] == 1)
{
index_Sig <- index_Sig[-1]
}
index_Sig <-index_Sig - 1
index_Sig
# Construct filtered dataset after retaining only significant regressors.
crime_Xnew <- crime_X[,index_Sig]
crime_Ynew <- crime_Y
#Save the filtered data on disk
save_table <- cbind(crime_Ynew, crime_Xnew)
write.csv(save_table, "filtered_CrimeData.csv", row.names = FALSE)
#Further analysis with the filtered data
modelFit <- lm(crime_Ynew ~ . , data = crime_Xnew)
summary(modelFit)
# Now, some plots related to the model fit. Residual plots, Q-Q plots, etc.
# Execute the next statement and press enter to see plots back to back.
plot(modelFit)
## Residual Analysis
plot(model$fitted.values,modelFit$residuals)
## **********************************************************************************************
## Multicollinearity Analysis
### VIF: Variable selection
library(usdm)
#step - 1
df = crime_Xnew
flag = 1
while(flag)
{
vif_table <-vif(df)
if(max(vif_table[,2])>10)
{
flag = 1
df<-df[,- which.max(vif_table[,2])]
}
else
flag = 0
}
vif_table
crime_Xnew_vif <- df
model_vif<- lm(crime_Ynew ~ .,data=crime_Xnew_vif )
#summary(model_vif)
"Rsquare adjusted"
rbind(c("actual model", "reduced model", "after VIF"), c(summary(model)$adj.r.squared, summary(modelFit)$adj.r.squared, summary(model_vif)$adj.r.squared) )
"Rsquare"
rbind(c("actual model", "reduced model", "after VIF"), c(summary(model)$r.squared, summary(modelFit)$r.squared, summary(model_vif)$r.squared) )
############################### Variance Decomposition method for variable selection*****************************
X_prod = t(as.matrix(crime_Xnew))%*%as.matrix(crime_Xnew)
evals = eigen(X_prod)$values
evecs = eigen(X_prod)$vectors
VDmatrix <- matrix(, nrow = length(evals), ncol = length(evals))
for( j in 1:length(evals)) #regressors
{
sumCol_VD <-0
for (pappu in 1:length(evals))
{
sumCol_VD<- sumCol_VD + (1/evals[pappu])*evecs[pappu,j]^2
}
for (k in 1:length(evals)) #eigenvalues
{
VDmatrix[k,j] <- (1/evals[k])*evecs[k,j]^2/sumCol_VD
}
} #Verify that colSums of VDmatrix is 1.
colSums(VDmatrix)
rowSums(VDmatrix)
#construct condition index
root_evals <- sqrt(evals)
condition_index<- max(root_evals)/root_evals
# Analysis of variance decomposition matrix
rounded_VDmatrix <- round(as.data.frame(VDmatrix),2)
#Conclusions: (x20,x18); (x24,x25)
###******************************************************************************************
## Variable Selection : forward, backward, stepwise
null=lm(crime_Ynew~1, data = crime_Xnew)
null
full=lm(crime_Ynew~ ., data = crime_Xnew)
full
#this is only for AIC based forward selection
step(null, scope=list(lower=null, upper=full), direction="forward")
# rms library for backward selection
library(rms)
xnam <- paste("x", 1:25, sep="")
fmla <- as.formula(paste("crime_Ynew ~ ", paste(colnames(crime_Xnew), collapse=" + ")))
#Automated F-test-based backward selection using rms::fastbw()
ols.full<- ols(fmla, data = crime_Xnew)
fastbw(ols.full, rule = "p", sls = 0.1)
#Manual F-test-based forward selection ****test
lm.full <- lm(crime_Ynew ~ ., data = crime_Xnew)
lm.null <- lm(crime_Ynew ~ 1, data = crime_Xnew)
lm.base<-lm.null
reglist<- colnames(crime_Xnew)
flag_fwsel<- 1
while(flag_fwsel)
{
tempfmla <- as.formula(paste("crimeY_new ~ ", paste(reglist, collapse=" + ")))
temp<-add1(lm.base, tempfmla , test = "F")
cat("display F-scores")
temp$`F value`
if(max(temp$`F value`[2:length(temp$`F value`)]) > 10) #F0 can be chosen accordingly.
{
fmla_updt<-as.formula(paste("~ . + ", rownames(temp)[which.max(temp$`F value`)]))
cat("maximum F value found",max(temp$`F value`[2:length(temp$`F value`)]))
fmla_updt #print
#update for the next step
lm.base <- update(lm.base, fmla_updt)
#update remaining variables
reglist <- reglist[-which(reglist== rownames(temp)[which.max(temp$`F value`)] )]
}
else
flag_fwsel <- 0
}
#* I tried implementing forward selection using add1(), but some error is there. Will have to debug!
## Miscellaneous
|
b4c11fa35a4b1d9c32a2dfba3abf3f8eb30f1e8f
|
eab25b055a7bbad6f8e16790641226821175d712
|
/plot2.R
|
9acd86a9577bd233443ef584bcd42b4b0d46d1e8
|
[] |
no_license
|
PasiAhopelto/eda-course-project-two
|
dbd09acdb7f1de4398839f9b4c111ee0991a761d
|
b2ede6eeea40653d340a461acadfb3fc88bb64fb
|
refs/heads/master
| 2016-09-08T01:13:55.917428
| 2015-01-17T18:20:10
| 2015-01-17T18:20:10
| 29,312,330
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 921
|
r
|
plot2.R
|
# "Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips == "24510") from 1999 to 2008? Use the base plotting system to make
# a plot answering this question."
NEI <- readRDS("summarySCC_PM25.rds")
emissionsAndYearsBaltimore <- NEI[!is.na(NEI$year) & NEI$fips == '24510', c("Emissions", "year")]
emissionsAndYearsBaltimore[,c("Emissions")] <- as.numeric(emissionsAndYearsBaltimore[,c("Emissions")])
emissionsAndYearsBaltimore[,c("year")] <- as.numeric(emissionsAndYearsBaltimore[,c("year")])
year <- factor(emissionsAndYearsBaltimore$year)
sumsByYear <- aggregate(x = emissionsAndYearsBaltimore, by = list(year), FUN = sum)
sumsByYear[,c("Group.1")] <- as.numeric(levels(sumsByYear$Group.1))
png(filename = 'plot2.png')
plot(x = sumsByYear$Group.1, y = sumsByYear$Emissions, type = "b", ylab = 'PM2.5 Emissions (Tons)', xlab = 'Year', main = "Baltimore City Total Emissions")
dev.off()
|
823b49b75b1537dacedf8c1765bce147f6406ed2
|
f985cc300f40954b09b9ffdc7a66d1ce3d0937f3
|
/man/getLineages.Rd
|
4617e500573c970d82d887a2faff3490a5a761b5
|
[] |
no_license
|
laduplessis/beastio
|
361b51b89d052bd6479b942a1c114bd1232efa98
|
ff276c2981a12555a98c6dd29a5837e58cc36104
|
refs/heads/master
| 2021-12-15T01:36:57.556253
| 2021-12-08T19:37:48
| 2021-12-08T19:37:48
| 163,682,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 317
|
rd
|
getLineages.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_utils.R
\name{getLineages}
\alias{getLineages}
\title{Internal function to get the number of lineages during an interval}
\usage{
getLineages(types)
}
\description{
Internal function to get the number of lineages during an interval
}
|
e4031562cbb9699e26c8c269c5f14c0cf6f2c44b
|
8e1d875a152340fa451cdac955d2d2ddd0959de5
|
/spearman.R
|
a40f890df4424b317cca08cbcca9f170715fd23b
|
[] |
no_license
|
chnops/TempleTexas
|
38a1341c5aa607285270a6cb4c0ded5f3452230c
|
bce42ad703db180dddb3dfe28c826fe8378e39e0
|
refs/heads/master
| 2021-01-13T02:15:00.326280
| 2014-08-13T20:09:14
| 2014-08-13T20:09:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,795
|
r
|
spearman.R
|
#
# Step 6: Calculate pariwise spearman correlations
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# 'normalized_order_counts.csv' is generated by 'normalize.R'
normalizedData <- read.csv('data/normalized_order_counts.csv', row.names=1, stringsAsFactors=F)
colCounts <- dim(normalizedData)[2] + 1
# for each of the treatment combinations that was found significant, and one that wasn't (for contrast)
trtGroups <- c("Plants.Water", "Plants", "Season")
for(i in 1:length(trtGroups)) {
curTrtGroup <- strsplit(trtGroups[i], ".", fixed=T)
curTrtGroup <- curTrtGroup[[1]]
trtGroupData <- normalizedData
if (length(curTrtGroup) > 1) {
rep <- apply(trtGroupData[,curTrtGroup], MARGIN=1, FUN=paste, collapse=" ")
fileName <- paste(curTrtGroup, collapse="_")
}
else {
rep <- trtGroupData[,curTrtGroup]
fileName <- curTrtGroup
}
trtGroupData$rep <- rep
trtGroupData <- trtGroupData[,c(colCounts, 1:(colCounts - 1))]
trts<-unique(trtGroupData$rep)
# Each treatment combination will have a 'results' matrix
# 'Treatment' is the treatment combination, i.e. exotic not irrigated
# 'Species1' and 'Species2' will make up every possible unique pair of orders
# 'Coeff' is the spearman correlation coefficient describing the strength of co-occurrence between the two taxa
# 'pValue' is the statistical significant of 'Coeff'
# 'fdr' is the false discovery rate, a p-value adjustment required for multiple test correction
results<-matrix(nrow=0,ncol=8)
colnames(results) <- c("Treatment", "Species1", "Species2", "Coeff", "pValue", "Species1Count", "Species2Count", "fdr")
for(a in 1:length(trts)){
temp<-subset(trtGroupData, rep==trts[a])
tempResults<-matrix(nrow=0,ncol=8)
colnames(tempResults) <- c("Treatment", "Species1", "Species2", "Coeff", "pValue", "Species1Count", "Species2Count", "fdr")
for(b in 8:(dim(temp)[2]-1)){
for(c in (b+1):(dim(temp)[2])){
# this is a place holder
fdr <- 1
# 'ab' = abudnance
species1.ab<-sum(temp[,b])
species2.ab<-sum(temp[,c])
if(species1.ab >1 & species2.ab >1){
test<-cor.test(temp[,b],temp[,c],method="spearman",na.action=na.rm, exact=F)
rho<-test$estimate
p.value<-test$p.value
}
else {
rho<-0
p.value<-1
}
new.row<-c(trts[a],names(temp)[b],names(temp)[c],rho,p.value,species1.ab,species2.ab,fdr)
tempResults<-rbind(tempResults,new.row)
}
}
tempResults[,"fdr"] <- p.adjust(tempResults[,"pValue"], method="fdr")
results <- rbind(results, tempResults)
}
rownames(results) <- c(1:dim(results)[1])
results <- data.frame(results)
# results are for one treatment combination
# save results to disk
fileName <- paste("data/", fileName, "spearman.csv", sep="_")
write.csv(results, file=fileName)
}
q(save="no")
|
5160e3030d4074c4eae2094bc637f02bb43d8595
|
88c26fe8230d49acce746986850846a59483a061
|
/man/costatis.randtest.Rd
|
f5629d9304038ceda7abc8de1a2dc71a390632fe
|
[] |
no_license
|
SrivastavaLab/ade4
|
d39d7ba900fea09d53bb8d723216bc91740e8048
|
c8dc5e518eb242512d5f6ae19e4a72420440bb6f
|
refs/heads/master
| 2021-01-11T12:18:28.665150
| 2016-12-13T11:52:33
| 2016-12-13T11:52:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,175
|
rd
|
costatis.randtest.Rd
|
\name{costatis.randtest}
\alias{costatis.randtest}
\title{Monte-Carlo test on a Costatis analysis (in C).}
\description{
Performs a Monte-Carlo test on a Costatis analysis.
}
\usage{
costatis.randtest(KTX, KTY, nrepet = 999)
}
\arguments{
\item{KTX}{an objet of class ktab}
\item{KTY}{an objet of class ktab}
\item{nrepet}{the number of permutations}
}
\value{
a list of the class \code{randtest}
}
\references{
Thioulouse J. (2011). Simultaneous analysis of a sequence of paired ecological tables: a comparison of several methods. \emph{Annals of Applied Statistics}, \bold{5}, 2300-2325.
}
\author{Jean Thioulouse \email{Jean.Thioulouse@univ-lyon1.fr}}
\examples{
data(meau)
wit1 <- withinpca(meau$env, meau$design$season, scan = FALSE, scal = "total")
pcaspe <- dudi.pca(meau$spe, scale = FALSE, scan = FALSE, nf = 2)
wit2 <- wca(pcaspe, meau$design$season, scan = FALSE, nf = 2)
kta1 <- ktab.within(wit1, colnames = rep(c("S1","S2","S3","S4","S5","S6"), 4))
kta2 <- ktab.within(wit2, colnames = rep(c("S1","S2","S3","S4","S5","S6"), 4))
costatis1 <- costatis(kta1, kta2, scan = FALSE)
costatis.randtest(kta1, kta2)
}
\keyword{multivariate}
\keyword{nonparametric}
|
72621883aac7f0ee9c0a4d821280410ffcfe02e1
|
217a471ec71f6a4d9db0a4a34b453b242aed053c
|
/man/build_series.Rd
|
892bbd8cbab4ac48b158f3c006c23b3b51103570
|
[] |
no_license
|
mcmventura/fcdata2qc
|
0fd5c2668cdf8e59805b7ce5e7922f36e920c17d
|
2ac2d1a63b8f1c47ea22a45a9840ba046aa57738
|
refs/heads/master
| 2020-04-23T16:33:44.888237
| 2019-05-23T09:31:52
| 2019-05-23T09:31:52
| 171,302,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 467
|
rd
|
build_series.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build-series.R
\name{build_series}
\alias{build_series}
\title{Builds Multi-year Series for the Station Variables.}
\usage{
build_series(station)
}
\arguments{
\item{station}{character string given the WIGOS compatible station
identifier.}
}
\description{
For each of the variables of a given station, rbinds the anual data frames in
the C3S-QC format to make a series of several years.
}
|
c4f8ac1a8568e4e703abdbc351fd2ec4408964bb
|
2bceb1f0cbe12f0a4761e7bb43ccab7c4a24719e
|
/scripts/ClimateNA.R
|
c09a1964e587cc72c45fc0541f438424baa0e6df
|
[] |
no_license
|
meganbontrager/climate-data-retrieval
|
d263d304d9066c75bb75edf2cbbfad9575403613
|
64985a3dee8d2752a7077a747e9238961cab194c
|
refs/heads/master
| 2021-09-23T16:04:05.126543
| 2021-09-21T19:03:12
| 2021-09-21T19:03:12
| 157,419,245
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,420
|
r
|
ClimateNA.R
|
# Gather climate data from ClimateNA
# Created by Megan Bontrager, 14 Nov 2018
# Libraries ---------------------------------------------------------------
library(tidyverse)
library(cowplot) # for plot aesthetics
library(magrittr)
# 1. Prep input data ------------------------------------------------------
locs = read_csv("sample_locations.csv")
# ClimateNA requires an input csv containing the columns ID1, ID2, lat, long, and (optionally) el.
# ID1 and ID2 can be anything you want
locs_climna = locs %>%
mutate(ID2 = NA) %>%
select(ID1 = id, ID2, lat = latitude, long = longitude, el = elev_m)
write.csv(locs_climna, "inputs/climatena_input.csv", row.names = FALSE)
# 2. Plug data into ClimateNA ---------------------------------------------
# Open the ClimateNA program. Select "time series" in the dropdown menu under multiple locations. Select "monthly variables". Specify the input file we creaetd above, and specify an output file.
# Generate monthly data for the time window of interest. Save these files to the ClimateNA directory.
# 3. Reformat ClimateNA data ----------------------------------------------
# Data is formatted with one row per site/year, one column per variable/month
all_data = read_csv("raw_outputs/climatena_input_1901-2013AMT.csv") %>%
select(-ID2) %>%
# Make data tall (one row per variable/site/month/year)
gather(variable, value, -Year, -ID1, -Latitude, -Longitude, -Elevation) %>%
# Split month from variable name
separate(variable, -2, into = c("variable", "month"), convert = TRUE) %>%
# Spread again so that there is one row per site/year/month, one column per variable type
spread(variable, value) %>%
# Make a climate date column
mutate(Year = as.numeric(Year),
month = as.numeric(month),
month_pad = str_pad(month, 2, pad = "0"),
clim_date = as.numeric(paste0(Year, month_pad))) %>%
# Drop unnecessary columns
select(-month_pad) %>%
# Rename to standard variable names
rename(id = ID1, longitude = Longitude, latitude = Latitude, elev_m = Elevation, clim_year = Year, clim_month = month, ppt_mm = PPT, tmin = Tmin, tave = Tave, tmax = Tmax)
# Rearrange columns
all_data %<>%
select(-latitude,-longitude, -elev_m) %>%
select(id, clim_year, clim_month, clim_date, everything())
# Now save this data
write.csv(all_data, "data_tall/climatena_climate_tall.csv", row.names = FALSE)
|
d682aa0573f4db56b013c818026491d1fc75a4ab
|
4fb62127144bfefb9c835a424335de6f06de6bae
|
/man/predict.ptnMix.Rd
|
bd6e08b382fc76f2d7b4e48d06010b7307e309fc
|
[] |
no_license
|
ammeir2/selectiveTweedy
|
daee55c17a5f77e8f265c1f98657f2e120c16cdf
|
11eb6e5d280830090d9b0653ebd7afe58b34c1b5
|
refs/heads/master
| 2020-09-14T04:51:52.813309
| 2018-04-11T01:55:53
| 2018-04-11T01:55:53
| 94,467,816
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 520
|
rd
|
predict.ptnMix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paretoTruncNormMix.R
\name{predict.ptnMix}
\alias{predict.ptnMix}
\title{Applies Tweedy Correction based on a Pareto/Truncated Normal Mixture}
\usage{
\method{predict}{ptnMix}(object, ...)
}
\arguments{
\item{object}{an object of class \code{ptnMix}, obtained from fitting a
\code{\link{paretoTruncNormMix}} model}
}
\description{
Computes the Tweedy correction for a truncated sample based on a
\code{\link{paretoTruncNormMix}} model fit.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.