blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4c88e99464ec729ffc147e47e70736c19878e77
|
7ba5a802f9d02cd075c78e2c1c302c34bdfab48f
|
/man/AutoScore_testing_Survival.Rd
|
ffa4239ca1d83bd745b85525175c53ad5c3de2fd
|
[] |
no_license
|
nliulab/AutoScore
|
8b2e23be56d9f75504341df6c2f004d31d985ac4
|
6eeb658441a47560b841545862ce667b8861e6a7
|
refs/heads/master
| 2023-07-19T23:11:57.036461
| 2023-07-16T02:28:40
| 2023-07-16T02:28:40
| 201,586,484
| 25
| 6
| null | 2019-08-31T01:26:44
| 2019-08-10T05:51:07
| null |
UTF-8
|
R
| false
| true
| 2,291
|
rd
|
AutoScore_testing_Survival.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AutoScore_Survival.R
\name{AutoScore_testing_Survival}
\alias{AutoScore_testing_Survival}
\title{AutoScore STEP(v) for survival outcomes: Evaluate the final score with
ROC analysis (AutoScore Module 6)}
\usage{
AutoScore_testing_Survival(
test_set,
final_variables,
cut_vec,
scoring_table,
threshold = "best",
with_label = TRUE,
time_point = c(1, 3, 7, 14, 30, 60, 90)
)
}
\arguments{
\item{test_set}{A processed \code{data.frame} that contains data for testing purpose. This \code{data.frame} should have same format as
\code{train_set} (same variable names and outcomes)}
\item{final_variables}{A vector containing the list of selected variables, selected from Step(ii) \code{\link{AutoScore_parsimony}}. Run \code{vignette("Guide_book", package = "AutoScore")} to see the guidebook or vignette.}
\item{cut_vec}{Generated from STEP(iii)
\code{AutoScore_weighting_Survival()}.Please follow the guidebook}
\item{scoring_table}{The final scoring table after fine-tuning, generated from STEP(iv) \code{\link{AutoScore_fine_tuning}}.Please follow the guidebook}
\item{threshold}{Score threshold for the ROC analysis to generate sensitivity, specificity, etc. If set to "best", the optimal threshold will be calculated (Default:"best").}
\item{with_label}{Set to TRUE if there are labels(`label_time` and `label_status`) in the test_set and
performance will be evaluated accordingly (Default:TRUE).}
\item{time_point}{The time points to be evaluated using time-dependent AUC(t).}
}
\value{
A data frame with predicted score and the outcome for downstream visualization.
}
\description{
AutoScore STEP(v) for survival outcomes: Evaluate the final score with
ROC analysis (AutoScore Module 6)
}
\examples{
## Please see the guidebook or vignettes
}
\references{
\itemize{
\item{Xie F, Ning Y, Yuan H, et al. AutoScore-Survival: Developing
interpretable machine learning-based time-to-event scores with right-censored
survival data. J Biomed Inform. 2022;125:103959. doi:10.1016/j.jbi.2021.103959}
}
}
\seealso{
\code{\link{AutoScore_rank_Survival}},
\code{\link{AutoScore_parsimony_Survival}},
\code{\link{AutoScore_weighting_Survival}},
\code{\link{AutoScore_fine_tuning_Survival}}.
}
|
4314019ddf01a4df85214df9f939566c7923eeba
|
2491ce3e1bd5762df5d129b7f4826c66723780df
|
/man/fun.plot.q.Rd
|
8792f5c7ce29b799dea24ec0f36d040c4386c0d7
|
[] |
no_license
|
cran/GLDreg
|
60e3cf2d6890604d98aad9cd09080bdc7758cb25
|
4d6ad22ceada267cf3a46ef8b4cab5b9006ae022
|
refs/heads/master
| 2022-06-04T22:22:08.379317
| 2022-05-13T06:30:09
| 2022-05-13T06:30:09
| 26,482,634
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,822
|
rd
|
fun.plot.q.Rd
|
\name{fun.plot.q}
\alias{fun.plot.q}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
2-D Plot for Quantile Regression lines
}
\description{
This function plots quantile regression lines from \code{\link{GLD.lm}} and
one of \code{\link{fun.gld.slope.vary.int.fixed}},
\code{\link{fun.gld.slope.fixed.int.vary}},
\code{\link{fun.gld.slope.fixed.int.vary.emp}},
\code{\link{fun.gld.all.vary.emp}}, \code{\link{fun.gld.all.vary}},
\code{\link{fun.gld.slope.vary.int.fixed.emp}}, \code{\link{GLD.quantreg}}.
}
\usage{
fun.plot.q(x, y, fit, quant.info, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A numerical vector of explanatory variable
}
\item{y}{
A numerical vector of response variable
}
\item{fit}{
An object from \code{\link{GLD.lm}}
}
\item{quant.info}{
An object from one of \code{\link{fun.gld.slope.vary.int.fixed}},
\code{\link{fun.gld.slope.fixed.int.vary}},
\code{\link{fun.gld.slope.fixed.int.vary.emp}},
\code{\link{fun.gld.all.vary.emp}}, \code{\link{fun.gld.all.vary}},
\code{\link{fun.gld.slope.vary.int.fixed.emp}}, \code{\link{GLD.quantreg}}
}
\item{\dots}{
Additional arguments to be passed to plot function, such as axis labels and
title of the graph
}
}
\details{
This is intended to plot only two variables, for quantile regression involving
more than one explanatory variable, consider plotting the actual values versus
fitted values by fitting a secondary GLD quantile model between actual and
fitted values.
}
\value{
A graph showing quantile regression lines
}
\references{
Su (2015) "Flexible Parametric Quantile Regression Model" Statistics &
Computing May 2015, Volume 25, Issue 3, pp 635-650
}
\author{
Steve Su
}
\examples{
## Dummy example
## Create dataset
set.seed(10)
x<-rnorm(200,3,2)
y<-3*x+rnorm(200)
dat<-data.frame(y,x)
## Fit FKML GLD regression with 3 simulations
fit<-GLD.lm.full(y~x,data=dat,fun=fun.RMFMKL.ml.m,param="fkml",n.simu=3)
## Find median regression, use empirical method
med.fit<-GLD.quantreg(0.5,fit,slope="fixed",emp=TRUE)
fun.plot.q(x=x,y=y,fit=fit[[1]],med.fit, xlab="x",ylab="y")
\dontrun{
## Plot result of quantile regression
## Extract the Engel dataset
library(quantreg)
data(engel)
## Fit GLD Regression along with simulations
engel.fit.all<-GLD.lm.full(foodexp~income,data=engel,
param="fmkl",fun=fun.RMFMKL.ml.m)
## Fit quantile regression from 0.1 to 0.9, with equal spacings between
## quantiles
result<-GLD.quantreg(seq(0.1,.9,length=9),engel.fit.all,intercept="fixed")
## Plot the quantile regression lines
fun.plot.q(x=engel$income,y=engel$foodexp,fit=engel.fit.all[[1]],result,
xlab="income",ylab="Food Expense")
}
}
\keyword{hplot}
|
09852d258ac94d28b966f25735579247d5f2697f
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query50_axquery_1344n/query50_axquery_1344n.R
|
801b1c0b87e14151bb9da747f896ca858f520b30
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
query50_axquery_1344n.R
|
e650ec0669de0fba3ea4838216825026 query50_axquery_1344n.qdimacs 119 173
|
c54b93ecdee06b18eee40bb54b6180c602b3247e
|
2f4dd001614dfe59c54c38cade59706c9ce5a770
|
/Statslearningcourse/shiny_plotly/movies/global.R
|
b5b9ae5e763a3f44dd7f8337e4e9a28900f8fd6d
|
[] |
no_license
|
amycook/cheatsheet
|
6c45e258a863627408999b2c752ae819afe1b288
|
7bf77543f2fb91b9ade1262e2fbfda4c95330fb9
|
refs/heads/master
| 2020-03-19T11:38:42.573890
| 2018-12-10T12:58:54
| 2018-12-10T12:58:54
| 136,466,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
r
|
global.R
|
# Golbal environment contains information needed in both the ui and server scripts
library(plotly)
library(shiny)
py <- plotly(username="rAPI", key="yu680v5eii", base_url="https://plot.ly")
source("plotlyGraphWidget.R")
|
671c740b2fcef81cb35fc8ed1fb798d92f613a65
|
13f0b3f37544339d5821b2a416a9b31a53f674b1
|
/man/plot_gaze.Rd
|
9419af4018fbeaf1982d4627ef6bcb704f65619f
|
[
"MIT"
] |
permissive
|
hejtmy/eyer
|
1f8a90fd7a8af0a4c4c73790633589dc624edda2
|
0b49566c76ab659184d62e1cdd658b45b0d33247
|
refs/heads/master
| 2020-04-24T11:17:25.414641
| 2019-09-17T22:44:52
| 2019-09-17T22:44:52
| 171,920,561
| 0
| 0
|
MIT
| 2019-09-10T21:54:40
| 2019-02-21T18:08:07
|
R
|
UTF-8
|
R
| false
| true
| 287
|
rd
|
plot_gaze.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eyer-visualising.R
\name{plot_gaze}
\alias{plot_gaze}
\title{Plots gaze data}
\usage{
plot_gaze(obj, ...)
}
\arguments{
\item{obj}{object to plot the data with}
\item{...}{}
}
\description{
Plots gaze data
}
|
bfd561061c219a8d3b969a5ce6d72648be1be1b5
|
034dda406f1ce494c1c64ec47a05e7f4d780d561
|
/plot2.R
|
8e3335450218ed08afc922c3b716e627e37c498f
|
[] |
no_license
|
WenWu5/ExData_Plotting1
|
7b55d963423b563f5bf50fb4f825a114352e8eb6
|
77721be223d190465d4af7bf150cf8d7aa3f5ae2
|
refs/heads/master
| 2021-01-18T12:20:36.421761
| 2016-06-05T23:01:14
| 2016-06-05T23:01:14
| 60,447,315
| 0
| 0
| null | 2016-06-05T06:53:27
| 2016-06-05T06:53:26
| null |
UTF-8
|
R
| false
| false
| 1,109
|
r
|
plot2.R
|
setwd("C:/Users/Wen/WorkingDirectory/Coursera/ExploratoryDataAnalysis/Week1")
zippedfileName <- "./data/household_power_consumption.zip"
unzippedFileName <- "./data/household_power_consumption.txt"
downloadPath <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
###downloading data####
if ( !file.exists(unzippedFileName)) {
download.file(downloadPath, destfile = zippedfileName)
###Unzipping folder###
unzip(zippedfileName, exdir = "./data")
}
ds <- read.csv(unzippedFileName, header = TRUE, sep = ";", na.strings = "?", stringsAsFactors = F)
ds$Date <- as.Date(ds$Date, format = "%d/%m/%Y")
ds <- subset(ds, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
###Adding date time###
ds$DateTime <- as.POSIXct(paste(as.Date(ds$Date), ds$Time))
###Plotting And Saving as PNG###
png("plot2.png", width = 480, height = 480)
plot(ds$Global_active_power ~ ds$DateTime, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
dev.off()
plot(ds$Global_active_power ~ ds$DateTime, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
|
33416aa7cfd963b4c654245bd2bc55c49d54fd65
|
c5a0fa9847f431cab5aa90849078d7db42779528
|
/Gap Stat - different scaling methods - discard.R
|
05c1cf86f5d9903cff9eba9ade5f7a0a23786e4e
|
[] |
no_license
|
jckailun/MA-Thesis
|
666f4d0e6b91a3ead1e207983e26a1f934ad027f
|
af01fda722da5c9967932790eb73c161cf0e9233
|
refs/heads/master
| 2023-07-02T09:51:23.233311
| 2020-09-23T15:27:58
| 2020-09-23T15:27:58
| 298,013,263
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,474
|
r
|
Gap Stat - different scaling methods - discard.R
|
##### Gap Statistic - trial for different scaling methods #####
d1.c = read.csv("NLSY79.csv")
d2.c = read.csv("NLSY97.csv")
vars1 = c("ASVAB_gs", "ASVAB_ar" , "ASVAB_wk" , "ASVAB_pc" , "ASVAB_no" , "ASVAB_cs", "ASVAB_mk", "ASVAB_mc", "ASVAB_ei",
"med", paste('acgrd', 1:13, sep = '_'), 'male', 'income', 'nuclear', 'urban', 'race')
vars1 = match(vars1, names(d1.c))
d1.c = d1.c[, vars1]
black1 = ifelse(d1.c$race==1, 1, 0); hisp1 = ifelse(d1.c$race==2, 1, 0); others1 = ifelse(d1.c$race == 3, 1, 0)
d1.c = as.data.frame(cbind(d1.c, black1, hisp1))
d1.c = d1.c[,-28]
rowstodelete1 = NULL
for (i in 1:nrow(d1.c)){
if (sum(is.na(d1.c[i,])) != 0){
rowstodelete1 = c(rowstodelete1, i)}
}
d1.c = d1.c[-rowstodelete1,]
vars2 = c("ASVAB_GS", "ASVAB_AR" , "ASVAB_WK" , "ASVAB_PC" , "ASVAB_NO" , "ASVAB_CS", "ASVAB_MK", "ASVAB_MC", 'ASVAB_EI',
"med", paste('acgrd', 1:13, sep = '_'), 'sex', 'income', 'nuclear', 'urban', 'race')
vars2 = match(vars2, names(d2.c))
d2.c = d2.c[, vars2]
d2.c$race = ifelse(d2.c$race==4, 3, d2.c$race); d2.c$sex = ifelse(d2.c$sex==1, 1, 0)
black2 = ifelse(d2.c$race==1, 1, 0); hisp2 = ifelse(d2.c$race==2, 1, 0); others2 = ifelse(d2.c$race == 3, 1, 0)
d2.c = as.data.frame(cbind(d2.c, black2, hisp2))
d2.c = d2.c[,-28]
rowstodelete2 = NULL
for (i in 1:nrow(d2.c)){
if (sum(is.na(d2.c[i,])) != 0){
rowstodelete2 = c(rowstodelete2, i)}
}
d2.c = d2.c[-rowstodelete2,]
# Next, let's compute the rates of having 0s' in ACGRDs
zeros1 = NULL
for (j in 11:23){ zeros1 = c(zeros1, sum((d1.c[,j]==0))) }
rate1 = zeros1/nrow(d1.c); margin1 = diff(zeros1)/nrow(d1.c)
# perhaps we shall choose asgrd_9 as the optimal grade transition variable.
zeros2 = NULL
for (j in 11:23){ zeros2 = c(zeros2, sum((d2.c[,j]==0))) }
rate2 = zeros2/nrow(d2.c); margin2 = diff(zeros2)/nrow(d2.c)
# perhaps we shall choose asgrd_9 as the optimal grade transition variable.
rowstodelete1 = NULL
for (i in 1:nrow(d1.c)){
if (d1.c$acgrd_9[i] == 0){
rowstodelete1 = c(rowstodelete1, i)}
}
rowstodelete2 = NULL
for (i in 1:nrow(d2.c)){
if (d2.c$acgrd_10[i] == 0){
rowstodelete2 = c(rowstodelete2, i)}
}
d1.c = d1.c[-rowstodelete1,]
d2.c = d2.c[-rowstodelete2,]
rowstodelete = NULL
for (i in 1:nrow(d1.c)){
if (d1.c$med[i] == 0){
rowstodelete = c(rowstodelete, i)}
}
d1.c = d1.c[-rowstodelete, ]
n1.c = nrow(d1.c)
n2.c = nrow(d2.c)
X1.c = as.matrix(d1.c); X1.c = X1.c[,-(11:23)]
X2.c = as.matrix(d2.c); X2.c = X2.c[,-(11:23)]
|
a5e13cbae72bc7277679557908011d2966e81cd2
|
4041b67f1b305d7be511524e11f2c0325fdd60ba
|
/FichiersAtelierNouveauxOutils2_2019/III.2. Mesurer le texte/III.2. Mesurer le texte.R
|
64212ec557cd8610cbe95d3f6588f0c61d77a37a
|
[] |
no_license
|
nicolasperreaux/notredame
|
3b97b4200774f4c566e44733ccb3aa43dc788cd1
|
1f04fd5e4604f053fc5011519185de895d8ee508
|
refs/heads/master
| 2020-08-14T23:00:17.674388
| 2019-11-24T14:01:04
| 2019-11-24T14:01:04
| 215,241,032
| 1
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,849
|
r
|
III.2. Mesurer le texte.R
|
######################################################################################################
# Script écrit pour l'atelier "Du parchemin à la fouille de données. Nouveaux outils pour la création,
# la formalisation et l'analyse des corpus médiévaux", Paris, 28-30 octobre 2019
# Paul Bertrand, Étienne Cuvelier, Sébastien de Valeriola, Nicolas Perreaux, Nicolas Ruffini-Ronzani
#
######################################################################################################
# III.2. Mesurer le texte
######################################################################################################
rm(list=ls())
setwd("D:\\DigitalHumanities\\Evenements\\2019.10.28_Atelier Nouveaux Outils 2\\Livrables\\III.2. Mesurer le texte")
library(tm)
load("../II.3. Outils disponibles et formats dentrée/fichiersRData/corpusMiniND.RData")
# 01. Matrice termes-documents ----
# Creation
matriceTDminiND = TermDocumentMatrix(corpusMiniND)
inspect(matriceTDminiND)
matriceTDminiND = TermDocumentMatrix(corpusMiniND,control = list(removePunctuation = TRUE))
inspect(matriceTDminiND)
matriceTDminiND_nettoyee = removeSparseTerms(matriceTDminiND, 0.80)
inspect(matriceTDminiND_nettoyee)
findFreqTerms(matriceTDminiND_nettoyee, 15)
findAssocs(matriceTDminiND_nettoyee, "episcopus",0.9)
# 02. Indice tf-idf ----
matriceTDminiNDtfidf = TermDocumentMatrix(corpusMiniND,control = list(removePunctuation = TRUE,
weighting = weightTfIdf))
inspect(matriceTDminiNDtfidf)
findFreqTerms(matriceTDminiNDtfidf, 0.10)
# 03. Distance de Damerau-Levenshtein ----
library(stringdist)
stringdist("abbatia","episcopus")
stringdist("episcopus","dictus")
stringdist("dictus","abbatia")
vecteurMots = c("abbatia","episcopus","dictus")
stringdistmatrix(vecteurMots)
|
c0c344ef6afe1ca2c271d92444c05a9a2828d9ed
|
217d0e2f2d7c842c0b43a193edde2d78a7cbe038
|
/Naive Bayes for loaning.R
|
e15e06ed1eeb83076b8532e919f7bd29eb04d4c1
|
[] |
no_license
|
cory1219/Naive-Bayes-for-loaning
|
5e0bbd52d0918d1d9c1929a2e013783a0ec5ad2c
|
c2a1651568c172c0087575b5f98d2b7a0548c39d
|
refs/heads/main
| 2023-02-08T16:24:03.031067
| 2021-01-04T10:01:33
| 2021-01-04T10:01:33
| 326,639,355
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,809
|
r
|
Naive Bayes for loaning.R
|
library(tidyverse)
train = read_csv("loan-train.csv")
test = read_csv("loan-test.csv")
train <- train %>%
mutate(age = case_when(
age <= 25 ~ "<=25",
age > 25 & age <= 39 ~ "25-39",
age > 40 & age <= 59 ~ "40-59",
age > 60 ~ "60+",
TRUE ~ "NA"
))
test <- test %>%
mutate(age = case_when(
age <= 25 ~ "<=25",
age > 25 & age <= 39 ~ "25-39",
age > 40 & age <= 59 ~ "40-59",
age > 60 ~ "60+",
TRUE ~ "NA"
))
##Numeric data is not suitable for Naive Bayes
##1. time complexity: calculating the density value under normal distribution consumes time
##2. overfitting: training every numeric values might lead to a overfitting model.
##-> tune numeric data into categorical variables
# library for naive Bayes
library(e1071)
# create table to append error rates for each attribute when used to create rule
table_1rule <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(table_1rule) <- c("attribute", "total_errorRate")
# loop through attributes and append to table from above
for (i in 2:15) {
train_subdata <- train %>% select(1, i)
nb2 <- naiveBayes(as.factor(loan) ~ ., data=train_subdata, laplace=1)
rules <- predict(nb2, train_subdata, type="class")
errorRate = nrow(train_subdata %>% filter(loan!=rules))/nrow(train_subdata)
table_1rule[nrow(table_1rule) + 1,] <- list(colnames(train[,i]), errorRate)
rm(errorRate, i, rules, train_subdata, nb2)
}
table_1rule
# create naive Bayes prediction model
nb <- naiveBayes(as.factor(loan) ~ ., data=train, laplace=1)
# predict "loan" attributes for test dataset
predictions = predict(nb, test, type="class")
# create confusion matrix
confMat <- table(true=test$loan,prediction=predictions)
confMat
# calculate error rate
errorRate = nrow(test %>% filter(loan!=predictions))/nrow(test)
errorRate
|
e94d5a7eba0e34102d1ee5cb8069624c22e23904
|
bfec3027143dcdeb600fd022d95bdb712ab3ed4a
|
/plot1.R
|
c4584771b6789cd272d8db2616440033ee5fdb17
|
[] |
no_license
|
cmblnd/ExData_Plotting1
|
251547bcc4f956b153c08ed11051d87d0d68a96b
|
741d31c861056790a56fdc07ef16738d7c41e843
|
refs/heads/master
| 2020-12-31T05:40:08.674315
| 2014-09-07T15:27:45
| 2014-09-07T15:27:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 381
|
r
|
plot1.R
|
con<-file("household_power_consumption.txt","r");
colname<-strsplit(readLines(con,1),";");
hhpc<-read.table(text=grep("^[1/2]/2/2007",readLines(con),value=T),sep=";",na.strings = "?");
close(con);
colnames(hhpc)<-colname[[1]];
with(hhpc,hist(Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)"))
dev.copy(png,file="plot1.png");
dev.off()
|
103c482164c502f61dc338f06b4a42ee4e2fdeb1
|
a0e4276f3f4d002263655d00f67d0878cacf6d3c
|
/lib/PKfit/R/iv.route.MD.R
|
054f5eebc40b90c21bc6daca839ac837b6db9bdd
|
[] |
no_license
|
shanmdphd/mypkfit
|
a4dff0b1181d89acce8ba7838dffde081a825ace
|
508012aea85b47f77976a7d6cd8dfe55f090332c
|
refs/heads/master
| 2021-01-01T16:43:18.814912
| 2017-07-21T03:20:37
| 2017-07-21T03:20:37
| 97,901,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,077
|
r
|
iv.route.MD.R
|
iv.route.MD <- function(PKindex)
{
OutputFilez() ### reset all output file names
cat("***************************************\n")
cat(" MM elim: Michaelis-Menten elimination \n")
cat("***************************************\n\n")
cat("\n")
file.menu <- c("IV-Bolus & 1st-ordered elim .. (MD101)",
"IV-Bolus & MM elim ........... (MD102)",
"IV-Infusion & 1st-ordered elim (MD103)",
"IV-Infusion & MM elim ........ (MD104)",
"Go Back One Upper Level",
"Go Back to Top Menu")
pick <- menu(file.menu, title = "<< IV Route - multiple-dosed >>")
if (pick ==1){
cat("\n\n")
fbolus1(PKindex, MD=TRUE)
}
else if (pick == 2){
cat("\n\n")
fbolus.mm.MD(PKindex, MD=TRUE)
}
else if (pick == 3){
cat("\n\n")
finfu1(PKindex, MD=TRUE)
}
else if (pick == 4){
cat("\n\n")
finfu.mm.MD(PKindex, MD=TRUE)
}
else if (pick == 5){
cat("\n\n")
one.list.MD(PKindex)
}
else if (pick == 6){
cat("\n\n")
run()
}
}
|
1762070e36c3d8e7aac95f00cabb655b6f180ab0
|
0cf6713a7be091ef76a7704f23ce290c508e5876
|
/1minite/readdata.R
|
d12b1d170d83be7bfab93d5ea7942fcf1982bddf
|
[] |
no_license
|
mura5726/crypto
|
268c291e68f19656b1a71e01799f2b7dbd53aab7
|
f8f09eb1b6c511cbe3c02e5da4fe62922460beb6
|
refs/heads/master
| 2021-09-01T07:26:37.009304
| 2017-12-25T17:16:23
| 2017-12-25T17:16:23
| 115,352,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 280
|
r
|
readdata.R
|
# getwd()
# > getwd()
# [1] "C:/Users/mura5/Documents/Crypto"
setwd("C:/Users/mura5/Documents/Crypto/1minite")
n = length(list.files())
D = read.csv(list.files()[1])
for(i in 2:n){
d = read.csv(list.files()[i])
D = rbind(D,d)
}
plot(D$time,D$lst)
|
db45f1aa370432d1ac0bc1dab392f3fe96d5d2f9
|
376c342e177096aca1224910e25420ffa1a7cec7
|
/titanic/scripts/submission2 womenInFirstClass.R
|
87cd529de585f56923daa94fa76c997c9e5b4a2e
|
[] |
no_license
|
danielsfawcett/thirteam
|
14af574cb743fe59f1275b3c5a2730d2ef1c94ff
|
d58728435222581cace4a7d1469023398f53ea67
|
refs/heads/master
| 2021-01-10T12:51:51.690428
| 2015-12-03T13:22:03
| 2015-12-03T13:22:03
| 46,505,324
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 630
|
r
|
submission2 womenInFirstClass.R
|
train <- read.csv("train.csv", stringsAsFactors=FALSE) #load the training data
test <- read.csv("test.csv", stringsAsFactors=FALSE) #load the test data
prop.table(table(Class = train$Pclass, Sex = train$Sex, Survived = train$Survived), 1) #show the relationship between gender and survival based on the different classes
test$Survived <- rep(0,418) #predict that everyone dies
test$Survived[test$Sex == "female" & test$Pclass == "1"] <- 1 #predict that all women in 1st class survived
data.frame(PassengerId = test$PassengerId, Survived = test$Survived)
write.csv(submit, file = "submissions/submission2.csv", row.names = FALSE)
|
51ce1c0cfcc7972938d67e2a88b51b19ce675fe0
|
36b1b3297513fe2323593fbaf9b16a5f5197b71a
|
/man/bs_vars_global.Rd
|
73d81cd658867a248e6f0fe5e810708739a7b120
|
[] |
no_license
|
cran/fresh
|
471c468d0bc554b0bf8278797aa508dc2448ec84
|
3efcf4f118ff186b3b4b078756b6cdb27226f726
|
refs/heads/master
| 2020-12-21T23:31:48.064331
| 2020-05-29T13:40:02
| 2020-05-29T13:40:02
| 236,600,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,669
|
rd
|
bs_vars_global.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vars-bootstrap.R
\name{bs_vars_global}
\alias{bs_vars_global}
\title{Bootstrap global variables}
\usage{
bs_vars_global(
body_bg = NULL,
text_color = NULL,
link_color = NULL,
link_hover_color = NULL,
line_height_base = NULL,
grid_columns = NULL,
grid_gutter_width = NULL,
border_radius_base = NULL
)
}
\arguments{
\item{body_bg}{Background color for the body.}
\item{text_color}{Global text color on body.}
\item{link_color}{Global textual link color.}
\item{link_hover_color}{Link hover color.}
\item{line_height_base}{Unit-less `line-height` for use in components like buttons.}
\item{grid_columns}{Number of columns in the grid, e.g. in \code{\link[shiny:fluidPage]{shiny::fluidRow(shiny::column(...))}}.}
\item{grid_gutter_width}{Padding between columns. Gets divided in half for the left and right.}
\item{border_radius_base}{Base border radius (rounds the corners of elements).}
}
\value{
a \code{list} that can be used in \code{\link{create_theme}}.
}
\description{
Those variables can be used to customize
Bootstrap and Bootswatch themes.
}
\examples{
# change background color
bs_vars_global(
body_bg = "#FAFAFA"
)
if (interactive()) {
library(shiny)
ui <- fluidPage(
use_theme(
create_theme(
theme = "default",
bs_vars_global(
body_bg = "#F5A9E1",
text_color = "#FFF",
grid_columns = 16
),
output_file = NULL
)
),
tags$h1("My custom app!"),
tags$h3("With plenty of columns!"),
fluidRow(
column(
width = 1, "Column 1"
),
column(
width = 1, "Column 2"
),
column(
width = 1, "Column 3"
),
column(
width = 1, "Column 4"
),
column(
width = 1, "Column 5"
),
column(
width = 1, "Column 6"
),
column(
width = 1, "Column 7"
),
column(
width = 1, "Column 8"
),
column(
width = 1, "Column 9"
),
column(
width = 1, "Column 10"
),
column(
width = 1, "Column 11"
),
column(
width = 1, "Column 12"
),
column(
width = 1, "Column 13"
),
column(
width = 1, "Column 14"
),
column(
width = 1, "Column 15"
),
column(
width = 1, "Column 16"
)
)
)
server <- function(input, output, session) {
}
shinyApp(ui, server)
}
}
|
37aa63518809be4ffaff02212e5e898fb7613c3d
|
61bcde0f794745bbc9bdd2f0ba2056dc0ed786df
|
/files/Task 2 - R. DataFrame - 18.11.2020.R
|
7e3df0f53c18ccd9cfc09eff1016dcc5ec740859
|
[] |
no_license
|
bavn/ttax
|
55ad16d1ddcd1e4ae8604ade1b1e9eadd7daba6a
|
f04c8834581e5549237a398ca622c4f35d48045b
|
refs/heads/main
| 2023-01-20T14:33:23.880583
| 2020-11-28T15:32:32
| 2020-11-28T15:32:32
| 316,758,445
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 357
|
r
|
Task 2 - R. DataFrame - 18.11.2020.R
|
# Dataframe of Employee Data
Name <- c("Amber", "Thomas", "Chris", "Sylvia", "Mike")
Age <- c("21", "25", "40", "52", "56")
Gender <- c("Female", "Male", "Male", "Female", "Male")
Role <- c("Sales", "Sales", "IT", "HR", "Sales")
Length_of_Service <- c("7", "9", "15", "11", "20")
frame <- data.frame(Name, Age, Gender, Role, Length_of_Service)
print(frame)
|
37d099b595dabbbd958d2915a9e28092b7a819d1
|
c0709f304abf991f21ad27afa10fd7f2f917ec32
|
/inst/app/server.R
|
d9edf21ffd8221aad0067733fde635efe33fd194
|
[
"BSD-3-Clause"
] |
permissive
|
msimmond/aip-analysis
|
dc8870fee30c5df498000af2821e23d6c80ceb50
|
7c1a7805eb3381c72d8dd1eaefaff0bb556aed2c
|
refs/heads/master
| 2020-12-28T22:33:54.618898
| 2015-09-30T22:12:25
| 2015-09-30T22:12:25
| 43,081,281
| 0
| 1
| null | 2015-09-24T17:13:49
| 2015-09-24T17:13:49
| null |
UTF-8
|
R
| false
| false
| 45,096
|
r
|
server.R
|
# this sources the file that checks whether all the packages are installed. It
# might be a good idea to edit this source file (located in the app working
# directory, "AIP/inst/app") so that it checks the required version is installed
# too. Required versions can be found in the AIP package DESCRIPTION file
source('pkg_check.R')
library('shiny')
# This package has bindings for some cool twitter bootstrap UI stuff that shiny
# doesn't include. Includes the modals, collapse panels, and tool tips.
library('shinyBS')
# for displaying R code - pull from github until the correct version is on CRAN
library('shinyAce')
# for reading in excel files, uncomment once on CRAN. For now it is on github
# only. It is a package that has no Java dependencies (only c++), so once
# binaries are on CRAN, anyone can install it (no JRE required!) and the app can
# have functionality to read in excel files.
#library(readxl)
library('agricolae') # for sample datasets and LSD.Test()
library('car') # for leveneTest()
library('Rmisc') # for summarySE()
library('ggplot2') # for ggplot(), etc.
# for loading dynamic reports. I don't use rmarkdown because that requires that
# pandoc be installed which is a whole different ballgame. knitr doesn't require
# dependencies like that
library('knitr')
shinyServer( function(input, output, session) {
#############################################################################
# Load Data Tab
#############################################################################
GetLoadCall <- reactive({
# Returns one of three things:
# 1. call containing the read.csv unevaluated function call
# 2. name containing the name of the agricolae data set
# 3. NULL
# if no file has been uploaded or a file has been uploaded but it is of
# length zero (i.e. corrupt or weird filetype) and the user hasn't selected
# use sample data, then... make the reactive expression "GetLoadCall" NULL,
# otherwise, do read in data things
if ((is.null(input$data_file) || length(input$data_file) == 0) &&
!input$use_sample_data) {
return(NULL)
} else {
# if a data file has been uploaded and the user doesn't want to use sample
# data...
if (length(input$data_file) > 0 && !input$use_sample_data) {
# uncomment once readxl gets on CRAN
# readcall <- switch(file_ext(input$data_file$name),
# 'csv'='read.csv',
# 'xls'='read_excel',
# 'xlsx'='read_excel')
# make the call "read.csv". It is set up like this so that when the
# readxl is enabled, we can change the call depeding on the file tpe
# uploaded
# # this is sep us as a call rather than just the function so it is easy
# to print to the example code section of the app via deparse()
readcall <- 'read.csv'
load.call <- call(readcall, file=input$data_file$datapath)
} else {
# if there is no data selected and the user wants to use sample data dat
# should be the name of the dataset to use
load.call <- as.name(input$sample_data_buttons)
}
}
return(load.call)
})
LoadData <- reactive({
# Returns a data.frame with the appropriate data set or NULL.
# TODO : This should only run if GetLoadCall returns a name.
# TODO : This should probably be in the GetLoadCall function.
# The following loads the selected agricolae data set into the workspace,
# regardless if the user selects "Use sample data instead". The name of the
# data.frame is the same as input$sample_data_buttons.
eval(call('data', input$sample_data_buttons, package='agricolae',
envir=environment()))
# set "data" to the GetLoadCall object
# remeber that GetLoadCall is either NULL, the call to read.csv, or the name of
# the dataset the user wants.
# if we eval NULL, data is set to NULL, if we eval read.csv, we get the
# data the user submitted. If we eval the name of the data the user
# wanted, that data is assigned to data (because we read it into the
# workspace via the call to 'data' above)
data <- eval(GetLoadCall(), envir=environment())
return(data)
})
GetSimpleLoadExpr <- reactive({
# Returns a character containing the code an R user would use to load the
# data.
# if they aren't using sample data, deparse the call to get the character
# represntation of the call, otherwise, construct it from scratch
if (!input$use_sample_data) {
return(deparse(GetLoadCall()))
} else {
l <- "# load the agricolae package for the sample data"
l <- paste0(l, "\nlibrary('agricolae')")
l <- paste0(l, "\ndata('",input$sample_data_buttons, "')")
l <- paste0(l, '\nmy.data <- ', input$sample_data_buttons)
}
})
ReadCode <- reactive({
if (length(input$data_file) == 0 && !input$use_sample_data) {
return(NULL)
}
if (!input$use_sample_data) {
filestr <- gsub('(?<=file \\= ").*(?="\\))',
input$data_file$name, perl=TRUE,
GetSimpleLoadExpr())
filestr <- paste0('my.data <- ', filestr)
} else {
filestr <- GetSimpleLoadExpr()
}
return(filestr)
})
observe({
# Updates the load code editor.
updateAceEditor(session, 'code_used_read', value=ReadCode(), readOnly=TRUE)
})
output$data_table <- renderDataTable({LoadData()})
#############################################################################
# Analysis Tab
#############################################################################
#---------------------------------------------------------------------------#
# Functions used to run the analyses.
#---------------------------------------------------------------------------#
ConvertData <- reactive({
# convert variables to their respective types
raw.data <- LoadData()
col.names <- names(raw.data)
for(i in 1:ncol(raw.data)){
# each input ID was apended "_recode" so for each column of the raw.data take
# that value and recode it to the correct value based on the input id.
var_type <- input[[paste0(col.names[i], '_recode')]]
# since the input value will be either numeric or factor, you can paste
# "as." in front of it to create the function call we'll use. then convert
# that variable type and return the raw.data with the converted variables. Call
# it raw.data so it doensn't get mixed up with LoadData()
raw.data[,i] <- eval(call(paste0('as.', var_type), raw.data[,i]))
}
return(raw.data)
})
ComputeExponent <- reactive({
# Returns the exponent numeric to be used in the power transformation.
if (input$exp.design %in% c('LR', 'CRD1', 'RCBD1')) {
form = paste(input$dependent.variable, '~',
input$independent.variable.one)
} else {
form = paste(input$dependent.variable, '~',
input$independent.variable.one, '+',
input$independent.variable.two)
}
mean.data <- aggregate(as.formula(form), data = ConvertData(),
function(x) c(logmean = log10(mean(x)),
logvar = log10(var(x))))
power.fit <- lm(logvar ~ logmean,
data = as.data.frame(mean.data[[input$dependent.variable]]))
power <- 1 - summary(power.fit)$coefficients[2, 1] / 2
return(power)
})
TransformedDepVarColName <- function() {
# Returns the transformed dependent variable name.
dep.var <- input$dependent.variable
choices = c('None' = dep.var,
'Power' = paste0(dep.var, '.pow'),
'Logarithmic' = paste0(dep.var, '.log10'),
'Square Root' = paste0(dep.var, '.sqrt'))
return(choices[[input$transformation]])
}
AddTransformationColumns <- reactive({
# Returns the converted data frame with three new columns for the three
# transformations.
data <- ConvertData()
dep.var <- input$dependent.variable
trans.dep.var <- TransformedDepVarColName()
dep.var.col <- data[[dep.var]]
if (input$transformation == 'Power') {
data[[trans.dep.var]] <- dep.var.col^ComputeExponent()
} else if (input$transformation == 'Logarithmic') {
data[[trans.dep.var]] <- log10(dep.var.col)
} else if (input$transformation == 'Square Root') {
data[[trans.dep.var]] <- sqrt(dep.var.col)
}
return(data)
})
GenerateFormula <- reactive({
left.side <- paste(TransformedDepVarColName(), '~')
if (input$exp.design %in% c('LR', 'CRD1')) {
right.side <- input$independent.variable.one
} else if (input$exp.design == 'CRD2') {
right.side <- paste0(input$independent.variable.one, ' + ',
input$independent.variable.two, ' + ',
input$independent.variable.one, ':',
input$independent.variable.two)
} else if (input$exp.design == 'RCBD1') {
right.side <- paste0(input$independent.variable.one, ' + ',
input$independent.variable.blk)
} else if (input$exp.design == 'RCBD2') {
right.side <- paste0(input$independent.variable.one, ' + ',
input$independent.variable.two, ' + ',
input$independent.variable.one, ':',
input$independent.variable.two, ' + ',
input$independent.variable.blk)
} else if (input$exp.design == 'SPCRD') {
right.side <- paste0(input$independent.variable.one, ' + ',
input$independent.variable.two, ' + ',
input$independent.variable.one, ':',
input$independent.variable.two, ' + Error(',
input$independent.variable.one, ':',
input$independent.variable.blk, ')')
} else if (input$exp.design == 'SPRCBD') {
right.side <- paste0(input$independent.variable.one, ' + ',
input$independent.variable.two, ' + ',
input$independent.variable.one, ':',
input$independent.variable.two, ' + ',
input$independent.variable.blk, ' + Error(',
input$independent.variable.one, ':',
input$independent.variable.blk, ')')
}
form <- paste(left.side, right.side)
return(form)
})
GenerateFormulaWithoutError <- reactive({
f <- GenerateFormula()
if (input$exp.design %in% c('SPCRD', 'SPRCBD')) {
f <- gsub(' \\+ Error\\(.*:.*)', '', f)
}
return(f)
})
GetFitCall <- reactive({
# Returns the call used to run the analysis.
# The following line forces this reactive expression to take a dependency on
# the "Run Analysis" button. Thus this will run anytime it is clicked.
input$run_analysis
# isolate prevents this reactive expression from depending on any of the
# variables inside the isolated expression.
isolate({
fit <- call('aov',
formula = as.formula(GenerateFormula()),
data = as.name('my.data'))
# santizes the call (from global.R)
fit <- strip.args(fit)
})
return(fit)
})
EvalFit <- reactive({
# Returns the fit model.
# Run every time the "Run Analysis" button is pressed.
input$run_analysis
isolate({
my.data <- AddTransformationColumns()
model.fit <- eval(GetFitCall())
})
return(model.fit)
})
GetFitExpr <- reactive({
# Returns the char of the expression used to evaluate the fit.
# Run every time the "Run Analysis" button is pressed.
input$run_analysis
isolate({
x <- deparse(GetFitCall(), width.cutoff=500L)
})
return(x)
})
ModelFitWithoutError <- reactive({
# Returns the model fit from formulas with the Error() term removed.
input$run_analysis
isolate(exp.design <- input$exp.design)
if (exp.design %in% c('SPCRD', 'SPRCBD')) {
my.data <- AddTransformationColumns()
model.fit <- aov(formula = as.formula(GenerateFormulaWithoutError()),
data = my.data)
} else {
model.fit <- EvalFit()
}
return(model.fit)
})
GenerateIndividualFormulas <- reactive({
# Returns single variate formulas.
dep.var <- TransformedDepVarColName()
if (input$exp.design %in% c('LR', 'CRD1', 'RCBD1')) {
f <- paste0(dep.var, ' ~ ',
input$independent.variable.one)
l <- list()
l[[f]] <- as.formula(f)
return(l)
} else {
f1 <- paste0(dep.var, ' ~ ',
input$independent.variable.one)
f2 <- paste0(dep.var, ' ~ ',
input$independent.variable.two)
l <- list()
l[[f1]] <- as.formula(f1)
l[[f2]] <- as.formula(f2)
return(l)
}
})
GenerateTukeyFormula <- reactive({
dep.var <- TransformedDepVarColName()
return(paste0(GenerateFormulaWithoutError(),
' + ', dep.var, '.pred.sq'))
})
GenerateAnalysisCode <- reactive({
# Returns the R code a user would type to run the analysis.
if (input$run_analysis==0) {
return(NULL)
} else {
# code for converting columns to factors
factor.idx <- which(sapply(ConvertData(), is.factor))
if (length(factor.idx) > 0) {
factor.names <- names(ConvertData()[factor.idx])
code <- paste0('my.data$', factor.names, ' <- as.factor(my.data$',
factor.names, ')', collapse='\n')
code <- paste0('# convert categorical variables to factors\n', code)
}
# code for the transformation
dep.var <- input$dependent.variable
if (input$transformation == 'Power') {
code <- paste0(code, '\n\n# transform the dependent variable\n')
if (input$exp.design %in% c('LR', 'CRD1', 'RCBD1')) {
code <- paste0(code, 'mean.data <- aggregate(', dep.var, ' ~ ',
input$independent.variable.one)
} else {
code <- paste0(code, 'mean.data <- aggregate(', dep.var, ' ~ ',
input$independent.variable.one, ' + ',
input$independent.variable.two)
}
code <- paste0(code,
', data = my.data, function(x) ',
'c(logmean=log10(mean(x)), logvar=log10(var(x))))\n',
'power.fit <- lm(logvar ~ logmean, ',
'data = as.data.frame(mean.data$', dep.var, '))\n',
'power <- 1 - summary(power.fit)$coefficients[2, 1] / 2\n',
'my.data$', dep.var, '.pow <- my.data$', dep.var,
'^power')
} else if (input$transformation == 'Logarithmic') {
code <- paste0(code, '\n\n# transform the dependent variable\nmy.data$',
input$dependent.variable, '.log10 <- log10(my.data$',
input$dependent.variable, ')')
} else if (input$transformation == 'Square Root') {
code <- paste0(code, '\n\n# transform the dependent variable\nmy.data$',
input$dependent.variable, '.sqrt <- sqrt(my.data$',
input$dependent.variable, ')')
}
# code for the model fit and summary
code <- paste0(code, '\n\n# fit the model\n')
code <- paste0(code, 'model.fit <- ', GetFitExpr())
code <- paste0(code, '\n\n# print summary table\nsummary(model.fit)')
# code for the assumptions tests
if (!input$exp.design %in% c('SPCRD', 'SPRCBD')) {
code <- paste0(code,
'\n\n# assumptions tests\nshapiro.test(residuals(model.fit))')
}
if (input$exp.design != 'LR') {
formulas <- GenerateIndividualFormulas()
levene.calls <- paste0('leveneTest(', formulas, ', data = my.data)',
collapse = '\n')
code <- paste0(code, "\n\n# Levene's Test\nlibrary('car')\n", levene.calls)
}
trans.dep.var <- TransformedDepVarColName()
if (!input$exp.design %in% c('LR', 'CRD1', 'CRD2')) {
# TODO : I'm not sure this is the correct thing to do for split plot
# Tukey tests.
if (input$exp.design %in% c('SPCRD', 'SPRCBD')) {
fit.name <- 'model.fit.no.error'
fit.line <- paste0(fit.name, ' <- aov(',
GenerateFormulaWithoutError(), ', data = my.data)\n')
} else {
fit.name <- 'model.fit'
fit.line <- ''
}
code <- paste0(code, "\n\n# Tukey's Test for Nonadditivity\n", fit.line,
"my.data$", trans.dep.var,
".pred.sq <- predict(", fit.name, ")^2\n",
"tukey.one.df.fit <- lm(formula = ",
GenerateTukeyFormula(),
", data = my.data)\nanova(tukey.one.df.fit)")
}
return(code)
}
})
observe({
# Updates the analysis code in the editor.
input$run_analysis
updateAceEditor(session, 'code_used_model',
value=isolate(GenerateAnalysisCode()), readOnly=TRUE)
})
# TODO : It could be useful to break this up into each plot and utilize this
# code for actual evaluation later on to deduplicate the code.
MakePlotAnalysisCode <- reactive({
if (input$exp.design %in% c('SPCRD', 'SPRCBD')) {
code <- paste0("# Residuals vs. Fitted\nplot(model.fit.no.error, which = 1)")
code <- paste0(code, "\n\n# Kernel Density Plot",
"\nplot(density(residuals(model.fit.no.error)))")
} else {
code <- paste0("# Residuals vs. Fitted\nplot(model.fit, which = 1)")
code <- paste0(code, "\n\n# Kernel Density Plot\nplot(density(residuals(model.fit)))")
}
if (input$exp.design == 'LR') {
code <- paste0(code, "\n\n# Best Fit Line\nplot(formula = ",
GenerateFormula(), ", data = my.data)\nabline(model.fit)")
} else {
dep.var <- TransformedDepVarColName()
ind.var.one <- input$independent.variable.one
ind.var.two <- input$independent.variable.two
f1 <- paste0(dep.var, ' ~ ', ind.var.one)
main <- paste0("Effect of ", ind.var.one, " on ",
dep.var)
code <- paste0(code, "\n\n# Effects Box Plots\n",
"boxplot(", f1, ", data = my.data, main = '", main,
"', xlab = '", ind.var.one,
"', ylab = '", dep.var, "')")
if (!input$exp.design %in% c('LR', 'CRD1', 'RCBD1')) {
f2 <- paste0(dep.var, ' ~ ', ind.var.two)
main <- paste0("Effect of ", ind.var.two, " on ",
dep.var)
code <- paste0(code, "\nboxplot(", f2, ", data = my.data, main = '",
main, "', xlab = '", ind.var.two,
"', ylab = '", dep.var, "')")
}
}
if (!input$exp.design %in% c('LR', 'CRD1', 'RCBD1')) {
code <- paste0(code, "\n# Interaction Plots\n",
"interaction.plot(my.data$", ind.var.one, ", my.data$",
ind.var.two, ", my.data$", dep.var, ", xlab = '",
ind.var.one, "', trace.label = '", ind.var.two, "', ylab = '",
dep.var, "')")
code <- paste0(code, "\ninteraction.plot(my.data$", ind.var.two,
", my.data$", ind.var.one, ", my.data$", dep.var, ", xlab = '",
ind.var.two, "', trace.label = '", ind.var.one,
"', ylab = '", dep.var, "')")
}
return(code)
})
#---------------------------------------------------------------------------#
# UI elements for the analysis tab side panel.
#---------------------------------------------------------------------------#
output$select.design <- renderUI({
# Renders a dropdown for selecting the type of experimental design.
if(is.null(LoadData())){
h4('Please upload or select data first.')
} else {
choices <- c('Two Continous Variables' = 'LR',
'Completely Randomized Design (CRD) with One Treatment' = 'CRD1',
'Completely Randomized Design (CRD) with Two Treatments' = 'CRD2',
'Randomized Complete Block Design (RCBD) with One Treatment' = 'RCBD1',
'Randomized Complete Block Design (RCBD) with Two Treatments' = 'RCBD2',
'Split-Plot Completely Randomized Design' = 'SPCRD',
'Split-Plot Randomized Complete Block Design' = 'SPRCBD')
# TODO : Add in the two designs with random effects.
selectInput('exp.design',
'Select Your Experimental Design',
choices = choices,
selected = NULL)
# TODO : When this is selected it should clear all of the analysis related
# input variables so nothing lingers from previous analyses, e.g.
# independent.variable.two.
}
})
output$select.dependent <- renderUI({
# Renders a dropdown for selecting the dependent variable from the loaded
# data.
if (is.null(input$exp.design)) {
h4('Please select an experimental design first.')
input$dependent.variable = NULL
} else {
selectInput('dependent.variable',
'Select a dependent variable:',
choices = names(LoadData()),
selected = NULL)
}
})
output$select.independent <- renderUI({
# Renders a number of dropdowns for selecting in independent variables.
# TODO : This should check that the variable type panel has been run,
# otherwise `ConvertData()` will fail.
if (is.null(input$dependent.variable)) {
return(NULL)
} else {
all.col.names <- names(ConvertData())
choices = all.col.names[!(all.col.names %in% input$dependent.variable)]
if (input$exp.design == 'LR') {
selectInput('independent.variable.one',
'Select a single continous independent variable:',
choices = choices,
selected = NULL)
} else if (input$exp.design == 'CRD1') {
selectInput('independent.variable.one',
'Select a single independent factor variable:',
choices = choices,
selected = NULL)
} else if (input$exp.design == 'CRD2') {
input1 <- selectInput('independent.variable.one',
'Select the first independent factor variable:',
choices = choices,
selected = NULL)
input2 <- selectInput('independent.variable.two',
'Select the second independent factor variable:',
choices = choices,
selected = NULL)
return(list(input1, input2))
} else if (input$exp.design == 'RCBD1') {
input1 <- selectInput('independent.variable.one',
'Select the first independent factor variable:',
choices = choices,
selected = NULL)
input2 <- selectInput('independent.variable.blk',
'Select the blocking factor variable:',
choices = choices,
selected = NULL)
return(list(input1, input2))
} else if (input$exp.design == 'RCBD2') {
input1 <- selectInput('independent.variable.one',
'Select the first independent factor variable:',
choices = choices,
selected = NULL)
input2 <- selectInput('independent.variable.two',
'Select the second independent factor variable:',
choices = choices,
selected = NULL)
input3 <- selectInput('independent.variable.blk',
'Select the blocking factor variable:',
choices = choices,
selected = NULL)
return(list(input1, input2, input3))
} else if (input$exp.design == 'SPCRD') {
input1 <- selectInput('independent.variable.one',
'Select the main plot treatment:',
choices = choices,
selected = NULL)
input2 <- selectInput('independent.variable.two',
'Select the sub plot treatment:',
choices = choices,
selected = NULL)
input3 <- selectInput('independent.variable.blk',
'Select the repetition:',
choices = choices,
selected = NULL)
return(list(input1, input2, input3))
} else if (input$exp.design == 'SPRCBD') {
input1 <- selectInput('independent.variable.one',
'Select the main plot treatment:',
choices = choices,
selected = NULL)
input2 <- selectInput('independent.variable.two',
'Select the sub plot treatment:',
choices = choices,
selected = NULL)
input3 <- selectInput('independent.variable.blk',
'Select the blocking factor variable:',
choices = choices,
selected = NULL)
return(list(input1, input2, input3))
}
}
})
output$var_types_select <- renderUI({
# Renders a series of radio buttons for selecting a type for each varible:
# numeric or factor.
raw.data <- LoadData()
raw.data.col.names <- names(raw.data)
if (is.null(raw.data)) {
return(NULL)
}
class.recode <- c('character'='factor',
'factor'='factor',
'logical'='factor',
'numeric'='numeric',
'integer'='numeric')
btns <- list()
# a loop to create radio buttons for each variable in the data
for(i in 1:ncol(raw.data)){
clss <- class(raw.data[,i])
clss <- class.recode[clss]
btns[[i]] <- radioButtons(inputId=paste0(raw.data.col.names[i], '_recode'),
label=raw.data.col.names[i],
choices=c('Numeric'='numeric', 'Grouping'='factor'),
selected=clss,
inline=TRUE)
}
return(btns)
})
#---------------------------------------------------------------------------#
# UI elements for the analysis tab main panel.
#---------------------------------------------------------------------------#
output$formula <- renderText({
if(is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
GenerateFormula()
}
})
output$fit.summary.text <- renderPrint({
input$run_analysis
isolate({fit.summary <- summary(EvalFit())})
return(fit.summary)
})
output$fit.summary <- renderUI({
if(is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
list(h2('Model Fit Summary'),
if (input$exp.design != 'LR') { h3('ANOVA Table') } else{ NULL },
verbatimTextOutput('fit.summary.text'))
}
})
output$exponent <- renderUI({
# Renders a paragraph tag with the computed exponent value.
if (input$transformation == 'Power') {
header <- h2('Exponent from Power Transformation')
text <- p(as.character(ComputeExponent()))
return(list(header, text))
} else {
return(NULL)
}
})
output$shapiro.wilk.results.text <- renderPrint({
input$run_analysis
# NOTE : We don't do the Shapiro-Wilk test for the split plot designs
# because it isn't straight forward to implement.
if (!input$exp.design %in% c('SPCRD', 'SPRCBD')) {
isolate({fit <- EvalFit()})
return(shapiro.test(residuals(fit)))
} else {
return(cat(paste0("Shapiro-Wilk Normality Test is not performed because ",
"it is not straightforward for split-plot designs.")))
}
})
output$shapiro.wilk.results <- renderUI({
if(is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
list(h2('Shapiro-Wilk Normality Test Results'),
verbatimTextOutput('shapiro.wilk.results.text'))
}
})
output$levene.results.text <- renderPrint({
input$run_analysis
isolate({
formulas <- GenerateIndividualFormulas()
my.data <- AddTransformationColumns()
})
return(lapply(formulas, leveneTest, data = my.data))
})
output$levene.results <- renderUI({
if(is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
if (input$exp.design != 'LR') {
list(h2("Levene's Test for Homogeneity of Variance"),
verbatimTextOutput('levene.results.text'))
} else {
return(NULL)
}
}
})
output$tukey.results.text <- renderPrint({
input$run_analysis
isolate({
# TODO : Check to make sure this is what I'm supposed to use for the split
# plot results.
fit <- ModelFitWithoutError()
my.data <- AddTransformationColumns()
dep.var <- TransformedDepVarColName()
my.data[[paste0(dep.var, '.pred.sq')]] <- predict(fit)^2
f <- GenerateTukeyFormula()
tukey.one.df.fit <- lm(formula = as.formula(f), data = my.data)
})
return(anova(tukey.one.df.fit))
})
output$tukey.results <- renderUI({
if(is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
if (!input$exp.design %in% c('LR', 'CRD1', 'CRD2')) {
isolate({
dep.var <- TransformedDepVarColName()
pred.var <- paste0(dep.var, '.pred.sq')
})
list(h2("Tukey's Test for Nonadditivity"),
p(strong(paste0("Attention: Refer only to the '", pred.var, "' ",
"row in this table, ignore all other rows."))),
verbatimTextOutput('tukey.results.text'))
} else {
return(NULL)
}
}
})
output$plot.residuals.vs.fitted <- renderPlot({
input$run_analysis
model.fit <- ModelFitWithoutError()
plot(model.fit, which = 1)
})
output$residuals.vs.fitted.plot <- renderUI({
if (is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
list(h2('Residuals vs Fitted'),
plotOutput('plot.residuals.vs.fitted'))
}
})
output$plot.kernel.density <- renderPlot({
input$run_analysis
model.fit <- ModelFitWithoutError()
plot(density(residuals(model.fit)))
})
output$kernel.density.plot <- renderUI({
if (is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
list(h2('Kernel Density of the Residuals'),
plotOutput('plot.kernel.density'))
}
})
output$plot.best.fit <- renderPlot({
input$run_analysis
f <- paste0(input$dependent.variable, ' ~ ',
input$independent.variable.one)
my.data <- AddTransformationColumns()
plot(formula = as.formula(f), data = my.data)
model.fit <- ModelFitWithoutError()
abline(model.fit)
})
output$best.fit.plot <- renderUI({
if (is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
if (input$exp.design == 'LR') {
list(h2('Best Fit'),
plotOutput('plot.best.fit'))
} else {
return(NULL)
}
}
})
output$plot.boxplot.one <- renderPlot({
input$run_analysis
dep.var <- TransformedDepVarColName()
if (input$exp.design != 'LR') {
my.data <- AddTransformationColumns()
f1 <- paste0(dep.var, ' ~ ',
input$independent.variable.one)
boxplot(as.formula(f1), data = my.data,
main = paste0("Effect of ", input$independent.variable.one,
" on ", dep.var),
xlab = input$independent.variable.one,
ylab = dep.var)
}
})
output$plot.boxplot.two <- renderPlot({
input$run_analysis
dep.var <- TransformedDepVarColName()
if (!input$exp.design %in% c('LR', 'CRD1', 'RCBD1')) {
my.data <- AddTransformationColumns()
f2 <- paste0(dep.var, ' ~ ',
input$independent.variable.two)
boxplot(as.formula(f2), data = my.data,
main = paste0("Effect of ", input$independent.variable.two,
" on ", dep.var),
xlab = input$independent.variable.two,
ylab = dep.var)
}
})
output$boxplot.plot <- renderUI({
input$run_analysis
if (is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
if (input$exp.design != 'LR') {
if (!input$exp.design %in% c('CRD1', 'RCBD1')) {
elements <- list(h2('Effects Box Plots'),
plotOutput('plot.boxplot.one'),
plotOutput('plot.boxplot.two'))
} else {
elements <- list(h2('Effects Box Plots'),
plotOutput('plot.boxplot.one'))
}
return(elements)
} else {
return(NULL)
}
}
})
output$plot.interaction.one <- renderPlot({
input$run_analysis
isolate({
if (!input$exp.design %in% c('LR', 'CRD1', 'RCBD1')) {
dep.var <- TransformedDepVarColName()
ind.var.one <- input$independent.variable.one
ind.var.two <- input$independent.variable.two
my.data <- AddTransformationColumns()
interaction.plot(my.data[[ind.var.one]], my.data[[ind.var.two]],
my.data[[dep.var]], xlab = ind.var.one, trace.label =
ind.var.two, ylab = dep.var)
}
})
})
output$plot.interaction.two <- renderPlot({
input$run_analysis
isolate({
if (!input$exp.design %in% c('LR', 'CRD1', 'RCBD1')) {
dep.var <- TransformedDepVarColName()
ind.var.one <- input$independent.variable.one
ind.var.two <- input$independent.variable.two
my.data <- AddTransformationColumns()
interaction.plot(my.data[[ind.var.two]], my.data[[ind.var.one]],
my.data[[dep.var]], xlab = ind.var.two, trace.label =
ind.var.one, ylab = dep.var)
}
})
})
output$interaction.plot <- renderUI({
input$run_analysis
if (is.null(input$run_analysis) || input$run_analysis == 0) {
return(NULL)
} else {
if (!input$exp.design %in% c('LR', 'CRD1', 'RCBD1')) {
return(list(h2('Interaction Plots'),
plotOutput('plot.interaction.one'),
plotOutput('plot.interaction.two')))
} else {
return(NULL)
}
}
})
#############################################################################
# Post hoc tab
#############################################################################
MakePostHocPlot <- function(data, fit, dep.var, ind.var) {
lsd.results <- LSD.test(fit, ind.var)
summary.stats <- summarySE(data = data, dep.var, groupvars = ind.var)
if (length(ind.var) == 2) {
summary.stats$trt <- apply(summary.stats[ , ind.var], 1, paste,
collapse = ":")
x.label = paste(ind.var, collapse = ":")
} else {
summary.stats$trt <- summary.stats[[ind.var]]
x.label = ind.var
}
merged.table <- merge(summary.stats, lsd.results$groups, by = "trt")
ggplot(merged.table, aes(x = trt, y = means, ymin = 0,
ymax = 1.35 * max(means))) +
geom_bar(stat = "identity", fill = "gray50", colour = "black", width = 0.7) +
geom_errorbar(aes(ymax = means + se, ymin = means - se), width = 0.0,
size = 0.5, color = "black") +
geom_text(aes(label = M, y = means + se / 1.8, vjust = -2.5)) +
labs(x = x.label, y = dep.var) +
theme_bw() +
theme(panel.grid.major.x = element_blank(),
panel.grid.major.y = element_line(colour = "grey80"),
plot.title = element_text(size = rel(1.5),
face = "bold", vjust = 1.5),
axis.title = element_text(face = "bold"),
axis.title.y = element_text(vjust= 1.8),
axis.title.x = element_text(vjust= -0.5),
panel.border = element_rect(colour = "black"),
text = element_text(size = 20))
}
# TODO : Clean up this insane nested if statement! Sorry...
output$lsd.results <- renderUI({
input$run_post_hoc_analysis
if (is.null(input$run_post_hoc_analysis) || input$run_post_hoc_analysis == 0) {
return(NULL)
} else {
isolate({
exp.design <- input$exp.design
dep.var <- TransformedDepVarColName()
ind.var.one <- input$independent.variable.one
if (exp.design %in% c('CRD1', 'RCBD1')) {
ind.var.two <- NULL
} else {
ind.var.two <- input$independent.variable.two
}
ind.vars <- c(ind.var.one, ind.var.two)
my.data <- AddTransformationColumns()
fit <- EvalFit()
})
alpha <- 0.05
if (exp.design == 'LR') {
return(p('Post hoc tests are not run for simple linear regression.'))
} else if (exp.design %in% c('CRD1', 'RCBD1')) {
# NOTE : The "[1]" gets the first p-value so this relies on the order of
# the variables in the RCBD to always have the treatment first. It is a
# pain in the ass to detect the order and then extract. Why does R make
# this so difficult?
p.value <- summary(fit)[[1]]$'Pr(>F)'[1]
if (p.value < alpha) {
output$lsd.results.text <- renderPrint({
LSD.test(fit, ind.vars, console = TRUE)
})
output$lsd.bar.plot <- renderPlot({
MakePostHocPlot(my.data, fit, dep.var, ind.vars)
})
return(list(p(paste0(ind.vars, ' is significant (alpha=0.05).')),
verbatimTextOutput('lsd.results.text'),
plotOutput('lsd.bar.plot')))
} else {
return(p(paste0(ind.vars, ' is not significant.')))
}
} else if (exp.design %in% c('CRD2', 'RCBD2')) {
var.one.p.value <- summary(fit)[[1]]$'Pr(>F)'[1]
var.two.p.value <- summary(fit)[[1]]$'Pr(>F)'[2]
if (exp.design == 'CRD2') {
idx <- 3
} else {
idx <- 4
}
interaction.p.value <- summary(fit)[[1]]$'Pr(>F)'[idx]
if (interaction.p.value < alpha) {
text <- paste0("The interaction, ", paste(ind.vars, collapse = ":"),
", is significant (alpha = 0.05).")
if (var.one.p.value < alpha && var.two.p.value < alpha){
lsd.vars <- ind.vars
text <- paste0(text, " Both factors are significant.")
} else if (var.one.p.value < alpha) {
text <- paste0(text, " Only ", ind.var.one, " is significant.")
lsd.vars <- ind.var.one
} else if (var.two.p.value < alpha) {
text <- paste0(text, " Only ", ind.var.two, " is significant.")
lsd.vars <- ind.var.two
} else {
text <- paste0(text, " Neither factor is significant.")
return(p(text))
}
output$lsd.results.text <- renderPrint({
LSD.test(fit, lsd.vars, console = TRUE)
})
output$lsd.bar.plot <- renderPlot({
MakePostHocPlot(my.data, fit, dep.var, lsd.vars)
})
return(list(p(text),
verbatimTextOutput('lsd.results.text'),
plotOutput('lsd.bar.plot')))
} else {
# TODO : Implement what happens here.
return(p(paste0('The interaction is not significant and the post ',
'hoc analyses for this scenario are not ',
'implemented.')))
}
} else if (exp.design %in% c('SPCRD', 'SPRCBD')) {
# NOTE : This always seems to be [2] for both formulas.
interaction.p.value <- summary(fit)$'Error: Within'[[1]]$'Pr(>F)'[2]
if (interaction.p.value < alpha) {
stuff <- list()
for (ivars in list(ind.vars, rev(ind.vars))) {
f <- paste0(dep.var, ' ~ ', ivars[2])
if (exp.design == 'SPRCBD') {
f <- paste0(f, ' + ', input$independent.variable.blk)
}
for (level in levels(my.data[[ivars[1]]])) {
sub.data <- my.data[my.data[[ivars[1]]] == level, ]
sub.model.fit <- aov(as.formula(f), sub.data)
sub.p.value <- summary(sub.model.fit)[[1]][["Pr(>F)"]][1]
output.name <- paste0('lsd.results.text.', ivars[1], '.', level)
stuff[[paste0(output.name, '.heading')]] <-
h4(paste0('Subset of ', ivars[1], ':', level))
if (sub.p.value < alpha) {
stuff[[output.name]] <- pre(
paste(capture.output(LSD.test(sub.model.fit, ivars[2], console
= TRUE)), collapse = "\n"))
# TODO : These plots work except that the plots created in the
# first pass of this loop (for ivars...) are overwritten by
# those from the second pass. This also happened when I was
# using renderPrint/verbatimTextOutput for the text output and I
# couldn't debug it. It seems like the output variable is being
# overwritten or that the reactiveness of the functions does
# something weird.
#output[[paste0(output.name, '.plot')]] <- renderPlot({
#MakePostHocPlot(sub.data, sub.model.fit, dep.var, ivars[2])
#})
#stuff[[paste0(output.name, '.plot')]] <-
#plotOutput(paste0(output.name, '.plot'))
} else {
stuff[[output.name]] <- pre(paste0(ivars[2],
' effect not significant, thus no LSD is performed.\n'))
}
}
}
#TODO : Compare between subplot levels across main plot levels
return(stuff)
} else { # interaction is not significant
isolate({fit.without <- ModelFitWithoutError()})
text <- paste0("The interaction, ", paste(ind.vars, collapse = ":"),
", is not significant (alpha = 0.05).")
main.plot.p.value <- summary(fit)[[1]][[1]]$'Pr(>F)'[1]
sub.plot.p.value <- summary(fit)$'Error: Within'[[1]]$'Pr(>F)'[1]
if (main.plot.p.value < alpha) {
text <- paste0(text, " ", ind.var.one, " is significant.")
output$lsd.results.text.one <- renderPrint({
LSD.test(fit.without, ind.var.one, console = TRUE)
})
output$lsd.bar.plot.one <- renderPlot({
MakePostHocPlot(my.data, fit.without, dep.var, ind.var.one)
})
} else if (sub.plot.p.value < alpha) {
text <- paste0(text, " ", ind.var.two, " is significant.")
output$lsd.results.text.two <- renderPrint({
LSD.test(fit.without, ind.var.two, console = TRUE)
})
output$lsd.bar.plot.two <- renderPlot({
MakePostHocPlot(my.data, fit.without, dep.var, ind.var.two)
})
} else {
text <- paste0(text,
" Neither the main plot or sub plot is significant.")
return(p(text))
}
return(list(p(text),
verbatimTextOutput('lsd.results.text.one'),
plotOutput('lsd.bar.plot.one'),
verbatimTextOutput('lsd.results.text.two'),
plotOutput('lsd.bar.plot.two')))
}
}
}
})
#############################################################################
# Report tab
#############################################################################
output$download_report <- downloadHandler(
filename = function() {
input$file.name
},
content = function(file) {
template <- paste(readLines('report-template.Rmd'), collapse='\n')
filled.template <- gsub('replace_with_data_code', ReadCode(), template)
filled.template <- gsub('replace_with_analysis_code',
GenerateAnalysisCode(), filled.template)
filled.template <- gsub('replace_with_analysis_plot_code',
MakePlotAnalysisCode(), filled.template)
writeLines(filled.template, 'report.Rmd')
src <- normalizePath('report.Rmd')
file.copy(filled.template, 'report.Rmd')
out <- knit2html('report.Rmd', output=input$file.name)
file.copy(out, file)
}
)
})
|
db0d4e1de7748bdc02f2c76a396535fc732774f7
|
5dd990f03c615ba8900ce9cb0bf0bc111e4e503b
|
/man/flat_spots.Rd
|
3b122788abe6c652db2a6180c7dead9e9d2e9f96
|
[] |
no_license
|
Sprinterzzj/feasts
|
2c4ce92de7c3bc32def3b5da84af6a9d21d26a1d
|
64c0f66af547c0ae42b1d8a03990de1387a1bc91
|
refs/heads/master
| 2020-05-26T09:03:31.104268
| 2019-05-17T00:45:00
| 2019-05-17T00:45:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 353
|
rd
|
flat_spots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/features.R
\name{flat_spots}
\alias{flat_spots}
\title{Number of flat spots}
\usage{
flat_spots(x)
}
\arguments{
\item{x}{a vector}
\item{...}{Unused.}
}
\value{
A numeric value.
}
\description{
Number of flat spots in a time series
}
\author{
Earo Wang and Rob J Hyndman
}
|
f374358b8340585b7aa8669a780a15123f97cf3d
|
24cc593a9e7b7c16ff3ccf2215a448137bc20f9c
|
/man/EMtetrahedron.Rd
|
95ff68ead81dcff81207590d232ca04739959b8a
|
[] |
no_license
|
cran/colourvision
|
5c87425b154c7f3a77f9a54730eae538e8275f5b
|
f3d2de226b9378228217e51c85073259c53dcbf7
|
refs/heads/master
| 2021-11-26T03:29:37.382230
| 2021-08-01T22:10:11
| 2021-08-01T22:10:11
| 73,381,079
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,995
|
rd
|
EMtetrahedron.Rd
|
\name{EMtetrahedron}
\alias{EMtetrahedron}
\title{
Endler and Mielke (2005) tetrahedron colour space}
\description{
Plots Endler and Mielke (2005) tetrahedron colour space for tetrachromatic animals.}
\usage{
EMtetrahedron(x, y, z, s.col = "red", f.col = "black",
vnames = c("u","s","m","l"), type = "p",
radius = 0.01, add = F, xlab = "",
ylab = "", zlab = "", box = F, axes = F,
ylim = c(-0.75, 0.75), xlim = c(-0.75, 0.75),
zlim = c(-0.75, 0.75), aspect = T, vectors=FALSE, ...)
}
\arguments{
\item{x}{x coordinate of points to be plotted}
\item{y}{y coordinate of points to be plotted}
\item{z}{z coordinate of points to be plotted}
\item{s.col}{Colour to be used for plotted items. See plot3d in the rgl package.}
\item{f.col}{Colour of tetrahedron lines. See plot3d in the rgl package.}
\item{vnames}{Vector names.}
\item{type}{see plot3d function in the rgl package.}
\item{radius}{see plot3d function in the rgl package.}
\item{add}{see plot3d function in the rgl package.}
\item{xlab}{see plot3d function in the rgl package.}
\item{ylab}{see plot3d function in the rgl package.}
\item{zlab}{see plot3d function in the rgl package.}
\item{box}{see plot3d function in the rgl package.}
\item{axes}{see plot3d function in the rgl package.}
\item{ylim}{see plot3d function in the rgl package.}
\item{xlim}{see plot3d function in the rgl package.}
\item{zlim}{see plot3d function in the rgl package.}
\item{aspect}{see plot3d function in the rgl package.}
\item{vectors}{Whether vectors representing direction of photoreceptor outputs should be plotted.}
\item{...}{Other arguments passed to function plot3d in the rgl package.}
}
\references{
Endler, J. A., and P. Mielke. 2005. Comparing entire colour patterns as birds see them. Biol J Linn Soc 86:405-431.
}
\author{
Felipe M. Gawryszewski \email{f.gawry@gmail.com}
}
\seealso{
\code{\link{EMtriangle}}, \code{\link{EMmodel}}
}
|
765e9b8037111247827f9d3627e6fa84c0a853e1
|
fa8a814d3f6f049b03a557c04479ca344b047047
|
/Code_Raiho_et_al_2015/Deer_Model_Experiments_JAGS.R
|
de8c372d134b58b4fb45a6829b55a3cc1875de87
|
[] |
no_license
|
araiho/Deer
|
e97233944df201b3d931e9c0933ab61273253bba
|
9c0dae47cce0122aeb8388c4d0e82fdde425a061
|
refs/heads/master
| 2016-09-10T22:14:15.924267
| 2015-10-29T17:03:45
| 2015-10-29T17:03:45
| 29,789,837
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,409
|
r
|
Deer_Model_Experiments_JAGS.R
|
var N[4,T,Park], N.3yr[4,T,Park], N.1yr[4,T,Park], N.ster[4,T,Park], N.cull[4,T,Park], mu[4,T,Park], mu.3yr[4,T,Park], mu.1yr[4,T,Park], mu.ster[4,T,Park], mu.cull[4,T,Park]
model{
#####
##### Priors #####
#####
a ~ dgamma(mu.a^2/sd.a^2, mu.a/sd.a^2)
b ~ dgamma(mu.b^2/sd.b^2, mu.b/sd.b^2)
mean.adult.female<- a/(a+b)
a1 ~ dgamma(mu.a1^2/sd.a1^2, mu.a1/sd.a1^2)
b1 ~ dgamma(mu.b1^2/sd.b1^2, mu.b1/sd.b1^2)
mean.fawn <- a1/(a1+b1)
a2 ~ dgamma(mu.a2^2/sd.a2^2, mu.a2/sd.a2^2)
b2 ~ dgamma(mu.b2^2/sd.b2^2, mu.b2/sd.b2^2)
mean.adult.male <- a2/(a2+b2)
W<-65
r ~ dnorm(2*3.09*W^-.33,1/((.1304)^2))
beta ~ dunif(0,100)
ratio ~ dbeta(312,312) # sex ratio # females:males
sigma.p ~ dunif(0,2)
tau.p <- 1/(sigma.p)^2
#####
##### Initial Conditions #####
#####
for(p in 1:Park){
s.adult.female[p] ~ dbeta(a,b)T(0,.99)
s.fawn[p] ~ dbeta(a1,b1)T(0,.99)
s.adult.male[p] ~ dbeta(a2,b2)T(0,.99)
prop[1:3,p] ~ ddirch(N.init[1:3,1,p]+1)
number.park[p] ~ dnorm(N.obs.init[p,5],1/(N.obs.init[p,8])^2)
N[1:3,1,p] <- prop[1:3,p]*number.park[p]*area[p]
sumNpark[1,p]<-sum(N[1:3,1,p])
sumNpark.3yr[1,p]<-sum(N[1:3,1,p])
sumNpark.1yr[1,p]<-sum(N[1:3,1,p])
sumNpark.ster[1,p]<-sum(N[1:3,1,p])
sumNpark.cull[1,p]<-sum(N[1:3,1,p])
denNpark[1,p]<-sumNpark[1,p]/area[p]
denNpark.3yr[1,p]<-sumNpark[1,p]/area[p]
denNpark.1yr[1,p]<-sumNpark[1,p]/area[p]
denNpark.ster[1,p]<-sumNpark[1,p]/area[p]
denNpark.cull[1,p]<-sumNpark[1,p]/area[p]
N.cull[1:3,1,p] <- N[1:3,1,p]
N.ster[1:3,1,p] <- N[1:3,1,p]
N.1yr[1:3,1,p] <- N[1:3,1,p]
N.3yr[1:3,1,p] <- N[1:3,1,p]
}
sumN[1,1] <-sum(sumNpark[1,1:Park])
sumN.3yr[1,1] <-sum(sumNpark[1,1:Park])
sumN.1yr[1,1] <-sum(sumNpark[1,1:Park])
sumN.ster[1,1] <-sum(sumNpark[1,1:Park])
sumN.cull[1,1] <-sum(sumNpark[1,1:Park])
sumNstage[1,1] <- sum(N[1,1,1:Park])
sumNstage[2,1] <- sum(N[2,1,1:Park])
sumNstage[3,1] <- sum(N[3,1,1:Park])
for(p in 1:Park){
f.adult[1,p] <- min(5,exp(r-(r/beta)*denNpark[1,p]))
M[1,2,p,1]<-s.adult.female[p]*f.adult[1,p]
M[2,1,p,1]<-s.fawn[p]*ratio
M[2,2,p,1]<-s.adult.female[p]
M[3,1,p,1]<-s.fawn[p]*(1-ratio)
M[3,3,p,1]<-s.adult.male[p]
}
#####
##### Process Model #####
#####
for (i in 1:length(N.obsno1s[,1])-40){
f.adult[N.obsno1s[i,4],N.obsno1s[i,3]] <- min(5,exp(r-(r/beta)*denNpark[N.obsno1s[i,4]-1,N.obsno1s[i,3]]))
M[1,2,N.obsno1s[i,3],N.obsno1s[i,4]]<-s.adult.female[N.obsno1s[i,3]]*f.adult[N.obsno1s[i,4],N.obsno1s[i,3]]
M[2,1,N.obsno1s[i,3],N.obsno1s[i,4]]<-s.fawn[N.obsno1s[i,3]]*ratio
M[2,2,N.obsno1s[i,3],N.obsno1s[i,4]]<-s.adult.female[N.obsno1s[i,3]]
M[3,1,N.obsno1s[i,3],N.obsno1s[i,4]]<-s.fawn[N.obsno1s[i,3]]*(1-ratio)
M[3,3,N.obsno1s[i,3],N.obsno1s[i,4]]<-s.adult.male[N.obsno1s[i,3]]
mu[,N.obsno1s[i,4],N.obsno1s[i,3]] <- M[,,N.obsno1s[i,3],N.obsno1s[i,4]]%*%N[,N.obsno1s[i,4]-1,N.obsno1s[i,3]]
N[1,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu[1,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N[2,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu[2,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N[3,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu[3,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
sumNpark[N.obsno1s[i,4],N.obsno1s[i,3]] <- sum(N[1:4,N.obsno1s[i,4],N.obsno1s[i,3]])
sumNpark.3yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- sum(N[1:4,N.obsno1s[i,4],N.obsno1s[i,3]])
sumNpark.1yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- sum(N[1:4,N.obsno1s[i,4],N.obsno1s[i,3]])
sumNpark.ster[N.obsno1s[i,4],N.obsno1s[i,3]] <- sum(N[1:4,N.obsno1s[i,4],N.obsno1s[i,3]])
sumNpark.cull[N.obsno1s[i,4],N.obsno1s[i,3]] <- sum(N[1:4,N.obsno1s[i,4],N.obsno1s[i,3]])
denNpark[N.obsno1s[i,4],N.obsno1s[i,3]] <- sumNpark[N.obsno1s[i,4],N.obsno1s[i,3]]/area[N.obsno1s[i,3]]
denNpark.3yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- sumNpark[N.obsno1s[i,4],N.obsno1s[i,3]]/area[N.obsno1s[i,3]]
denNpark.1yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- sumNpark[N.obsno1s[i,4],N.obsno1s[i,3]]/area[N.obsno1s[i,3]]
denNpark.ster[N.obsno1s[i,4],N.obsno1s[i,3]] <- sumNpark[N.obsno1s[i,4],N.obsno1s[i,3]]/area[N.obsno1s[i,3]]
denNpark.cull[N.obsno1s[i,4],N.obsno1s[i,3]] <- sumNpark[N.obsno1s[i,4],N.obsno1s[i,3]]/area[N.obsno1s[i,3]]
N.3yr[1:3,N.obsno1s[i,4],N.obsno1s[i,3]] <- N[1:3,N.obsno1s[i,4],N.obsno1s[i,3]]
N.1yr[1:3,N.obsno1s[i,4],N.obsno1s[i,3]] <- N[1:3,N.obsno1s[i,4],N.obsno1s[i,3]]
N.ster[1:3,N.obsno1s[i,4],N.obsno1s[i,3]] <- N[1:3,N.obsno1s[i,4],N.obsno1s[i,3]]
N.cull[1:3,N.obsno1s[i,4],N.obsno1s[i,3]] <- N[1:3,N.obsno1s[i,4],N.obsno1s[i,3]]
}
#####
##### Forecasting and Model Experiments #####
#####
treated.fawns <- 0
effect <- 1-(1/3)*exp((-1/3)*1)
for (i in length(N.obsno1s[,1])-39:length(N.obsno1s[,1])){
#####
##### Average of 3 Year Effectiveness #####
#####
f.adult.3yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- min(5,exp(r-(r/beta)*denNpark.3yr[N.obsno1s[i,4]-1,N.obsno1s[i,3]]))
M.3yr[1,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*f.adult.3yr[N.obsno1s[i,4],N.obsno1s[i,3]]
M.3yr[2,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*ratio*(1-treated.fawns)
M.3yr[2,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*(1-treated.does)
M.3yr[3,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*(1-ratio)
M.3yr[3,3,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.male[N.obsno1s[i,3]]
M.3yr[4,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*ratio*treated.fawns
M.3yr[4,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*treated.does
M.3yr[2,4,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*(1-effect)*(1-treated.does)
M.3yr[4,4,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*(treated.does)+s.adult.female[N.obsno1s[i,3]]*(1-treated.does)*effect
mu.3yr[,N.obsno1s[i,4],N.obsno1s[i,3]] <- M.3yr[,,N.obsno1s[i,3],N.obsno1s[i,4]]%*%N.3yr[,N.obsno1s[i,4]-1,N.obsno1s[i,3]]
N.3yr[1,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.3yr[1,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.3yr[2,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.3yr[2,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.3yr[3,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.3yr[3,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.3yr[4,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.3yr[4,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
sumNpark.3yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- sum(N.3yr[1:4,N.obsno1s[i,4],N.obsno1s[i,3]])
denNpark.3yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- sumNpark.3yr[N.obsno1s[i,4],N.obsno1s[i,3]]/area[N.obsno1s[i,3]]
#####
##### Average of 1 Year Effectiveness #####
#####
f.adult.1yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- min(5,exp(r-(r/beta)*denNpark.1yr[N.obsno1s[i,4]-1,N.obsno1s[i,3]]))
M.1yr[1,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*f.adult.1yr[N.obsno1s[i,4],N.obsno1s[i,3]]
M.1yr[2,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*ratio*(1-treated.fawns)
M.1yr[2,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*(1-treated.does)
M.1yr[3,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*(1-ratio)
M.1yr[3,3,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.male[N.obsno1s[i,3]]
M.1yr[2,4,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*(1-treated.does)
M.1yr[4,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*ratio*treated.fawns
M.1yr[4,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*treated.does
M.1yr[4,4,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*treated.does
mu.1yr[,N.obsno1s[i,4],N.obsno1s[i,3]] <- M.1yr[,,N.obsno1s[i,3],N.obsno1s[i,4]]%*%N.1yr[,N.obsno1s[i,4]-1,N.obsno1s[i,3]]
N.1yr[1,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.1yr[1,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.1yr[2,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.1yr[2,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.1yr[3,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.1yr[3,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.1yr[4,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.1yr[4,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
sumNpark.1yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- sum(N.1yr[1:4,N.obsno1s[i,4],N.obsno1s[i,3]])
denNpark.1yr[N.obsno1s[i,4],N.obsno1s[i,3]] <- sumNpark.1yr[N.obsno1s[i,4],N.obsno1s[i,3]]/area[N.obsno1s[i,3]]
#####
##### Sterilization #####
#####
f.adult.ster[N.obsno1s[i,4],N.obsno1s[i,3]] <- min(5,exp(r-(r/beta)*denNpark.ster[N.obsno1s[i,4]-1,N.obsno1s[i,3]]))
M.ster[1,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*f.adult.ster[N.obsno1s[i,4],N.obsno1s[i,3]]*(1-treated.does)
M.ster[2,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*ratio*(1-treated.fawns)
M.ster[2,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*(1-treated.does)
M.ster[3,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*(1-ratio)
M.ster[3,3,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.male[N.obsno1s[i,3]]
M.ster[4,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*ratio*treated.fawns
M.ster[4,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*treated.does
M.ster[4,4,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]
mu.ster[,N.obsno1s[i,4],N.obsno1s[i,3]] <- M.ster[,,N.obsno1s[i,3],N.obsno1s[i,4]]%*%N.ster[,N.obsno1s[i,4]-1,N.obsno1s[i,3]]
N.ster[1,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.ster[1,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.ster[2,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.ster[2,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.ster[3,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.ster[3,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.ster[4,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.ster[4,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
sumNpark.ster[N.obsno1s[i,4],N.obsno1s[i,3]] <- sum(N.ster[1:4,N.obsno1s[i,4],N.obsno1s[i,3]])
denNpark.ster[N.obsno1s[i,4],N.obsno1s[i,3]] <- sumNpark.ster[N.obsno1s[i,4],N.obsno1s[i,3]]/area[N.obsno1s[i,3]]
#####
##### Culling #####
#####
f.adult.cull[N.obsno1s[i,4],N.obsno1s[i,3]] <- min(5,exp(r-(r/beta)*denNpark.cull[N.obsno1s[i,4]-1,N.obsno1s[i,3]]))
M.cull[1,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*f.adult.cull[N.obsno1s[i,4],N.obsno1s[i,3]]*(1-treated.does)
M.cull[2,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*ratio*(1-treated.fawns)
M.cull[2,2,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.female[N.obsno1s[i,3]]*(1-treated.does)
M.cull[3,1,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.fawn[N.obsno1s[i,3]]*(1-ratio)
M.cull[3,3,N.obsno1s[i,3],N.obsno1s[i,4]] <- s.adult.male[N.obsno1s[i,3]]
mu.cull[,N.obsno1s[i,4],N.obsno1s[i,3]] <- M.cull[,,N.obsno1s[i,3],N.obsno1s[i,4]]%*%N.cull[,N.obsno1s[i,4]-1,N.obsno1s[i,3]]
N.cull[1,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.cull[1,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.cull[2,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.cull[2,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
N.cull[3,N.obsno1s[i,4],N.obsno1s[i,3]] ~ dlnorm(log(max(1,mu.cull[3,N.obsno1s[i,4],N.obsno1s[i,3]])),tau.p)
sumNpark.cull[N.obsno1s[i,4],N.obsno1s[i,3]] <- sum(N.cull[1:3,N.obsno1s[i,4],N.obsno1s[i,3]])
denNpark.cull[N.obsno1s[i,4],N.obsno1s[i,3]] <- sumNpark.cull[N.obsno1s[i,4],N.obsno1s[i,3]]/area[N.obsno1s[i,3]]
}
for(i in 2:T){
sumN.3yr[1,i] <-mean(denNpark.3yr[i,])
sumN.1yr[1,i] <-mean(denNpark.1yr[i,])
sumN.ster[1,i] <-mean(denNpark.ster[i,])
sumN.cull[1,i] <-mean(denNpark.cull[i,])
}
#####
##### Data Model #####
#####
for (i in 1:length(N.obs[,1])){
density[N.obs[i,4],N.obs[i,3]] <- sumNpark[N.obs[i,4],N.obs[i,3]]/area[N.obs[i,3]]
N.obs[i,5] ~ dnorm(density[N.obs[i,4],N.obs[i,3]],1/(N.obs[i,8])^2)T(0,500)
N.obs.new[i] ~ dnorm(density[N.obs[i,4],N.obs[i,3]],1/(N.obs[i,8])^2)T(0,500)
c[1,N.obs[i,4],N.obs[i,3]] <- N[1,N.obs[i,4],N.obs[i,3]]/sum(N[,N.obs[i,4],N.obs[i,3]])
c[2,N.obs[i,4],N.obs[i,3]] <- N[2,N.obs[i,4],N.obs[i,3]]/sum(N[,N.obs[i,4],N.obs[i,3]])
c[3,N.obs[i,4],N.obs[i,3]] <- N[3,N.obs[i,4],N.obs[i,3]]/sum(N[,N.obs[i,4],N.obs[i,3]])
}
for(i in 1:length(y.alpha[,1])){
y.alpha[i,4:6] ~ dmulti(c[1:3,y.alpha[i,2],y.alpha[i,1]],y.alpha[i,8])
}
#####
###### Posterior Predictive Checks #####
######
for(i in 1:length(N.obs[,1])){
sq[i] <- (N.obs[i,5] - sumNpark[N.obs[i,4],N.obs[i,3]] / area[N.obs[i,3]])^2
sq.new[i] <- (N.obs.new[i] - sumNpark[N.obs[i,4],N.obs[i,3]] / area[N.obs[i,3]])^2
}
fit <- sum(sq[])
fit.new <- sum(sq.new[])
pvalue.fit <- step(fit.new-fit)
f.adult.lambda <- min(5,exp(r-(r/beta)*0))/2
M.lambda.cull[1,2] <- mean.adult.female*f.adult.lambda*(1-treated.does)
M.lambda.cull[2,1] <- mean.fawn*ratio
M.lambda.cull[2,2] <- mean.adult.female*(1-treated.does)
M.lambda.ster[1,2] <- mean.adult.female*f.adult.lambda*(1-treated.does)
M.lambda.ster[2,1] <- mean.fawn*ratio
M.lambda.ster[2,2] <- mean.adult.female*(1-treated.does)
M.lambda.ster[3,2] <- mean.adult.female*treated.does
M.lambda.ster[3,3] <- mean.adult.female
M.lambda.1yr[1,2] <- mean.adult.female*f.adult.lambda
M.lambda.1yr[2,1] <- mean.fawn*ratio
M.lambda.1yr[2,2] <- mean.adult.female*(1-treated.does)
M.lambda.1yr[2,3] <- mean.adult.female*(1-treated.does)
M.lambda.1yr[3,2] <- mean.adult.female*treated.does
M.lambda.1yr[3,3] <- mean.adult.female*treated.does
M.lambda.3yr[1,2] <- mean.adult.female*f.adult.lambda
M.lambda.3yr[2,1] <- mean.fawn*ratio
M.lambda.3yr[2,2] <- mean.adult.female*(1-treated.does)
M.lambda.3yr[2,3] <- mean.adult.female*(1-treated.does)*(1-effect)
M.lambda.3yr[3,2] <- mean.adult.female*treated.does
M.lambda.3yr[3,3] <- mean.adult.female*treated.does+mean.adult.female*(1-treated.does)*effect
}
|
80ffc1e6362dc27297fb3b2c0803c5b9dc673fd4
|
2c611b88d56740148087f6f7e76205ff11b1db20
|
/cachematrix_test.R
|
025bfb3a3b5bbce55c74891def8f5878b52cc120
|
[] |
no_license
|
paulcautereels/ProgrammingAssignment2
|
11f4fb0b646b7d8dfb58ad15c9c7d3b788418f02
|
35576b6538c344a8112b640b85a6d3c18e24a406
|
refs/heads/master
| 2021-01-15T10:35:49.963213
| 2014-11-21T18:16:52
| 2014-11-21T18:16:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
r
|
cachematrix_test.R
|
source("cachematrix.R")
# create 2 regular matrices
m1 <- matrix(c(1,2,3,4), ncol=2, nrow = 2)
m2 <- matrix(c(2,3,2,3,2,2,2,2,3), ncol=3, nrow = 3)
# create the 2 caching version of these matrices
cm1 <- makeCacheMatrix(m1)
cm2 <- makeCacheMatrix(m2)
# take the inverse of each matrix twice
## the first time the inverse is calculated
## the second time the cached version is returned
print(cacheSolve(cm1))
print(cacheSolve(cm2))
print(cacheSolve(cm1))
print(cacheSolve(cm2))
|
719e8317d456eb2773797b13daba6fa9f9f8698a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PlayerRatings/examples/hist.rating.Rd.R
|
cc5a0620642e2579c9d74f1c2533ebbec829177a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 378
|
r
|
hist.rating.Rd.R
|
library(PlayerRatings)
### Name: hist.rating
### Title: Histogram Plotting for a Rating Object
### Aliases: hist.rating
### Keywords: hplot
### ** Examples
afl <- aflodds[,c(2,3,4,7)]
robj <- steph(afl)
hist(robj, xlim = c(1900,2500), density=TRUE)
afl <- aflodds[,c(2,3,4,7)]
robj <- steph(afl, history=TRUE)
hist(robj, history=TRUE, xlim = c(1900,2500), density=TRUE)
|
a73bb3dae5a7c4bb19f154440b11893e30a1a5af
|
8706b2ca975bd6c9885a1fef05fa0985870f1573
|
/R/CACEcov.R
|
4d9d0dd628e924227b576ad7deba22636fb3f8c7
|
[] |
no_license
|
kosukeimai/experiment
|
b453551758798c1155110ad62dd96dffe343f009
|
277bff9e30b9af86a163cd24ee27aa94a73f4389
|
refs/heads/master
| 2022-05-01T03:38:52.934007
| 2022-04-08T12:22:48
| 2022-04-08T12:22:48
| 90,753,473
| 13
| 9
| null | 2018-08-30T01:09:57
| 2017-05-09T14:13:13
|
C
|
UTF-8
|
R
| false
| false
| 2,292
|
r
|
CACEcov.R
|
###
### Calculate the CACE with covariates and optional clustering using
### two-stage least squares
###
CACEcov <- function(Y, D, Z, X, grp = NULL, data = parent.frame(),
robust = FALSE, fast = TRUE){
call <- match.call()
Y <- matrix(eval(call$Y, data), ncol = 1)
N <- nrow(Y)
D <- matrix(eval(call$D, data), ncol = 1)
X <- model.matrix(X, data = data)
Z <- cbind(X, matrix(eval(call$Z, data), nrow = N))
X <- cbind(X, D)
grp <- eval(call$grp, data)
if (!is.null(grp)) {
sgrp <- sort(grp, index.return = TRUE)
grp <- grp[sgrp$ix]
X <- X[sgrp$ix,]
Z <- Z[sgrp$ix,]
D <- D[sgrp$ix,]
Y <- Y[sgrp$ix,]
}
dhat <- fitted(tmp <- lm(D ~ -1 + Z))
beta <- coef(lm(Y ~ -1 + X[,-ncol(X)] + dhat))
ZZinv <- (vcov(tmp)/(summary(tmp)$sigma^2))
XZ <- t(X) %*% Z
XPzXinv <- solve(XZ %*% ZZinv %*% t(XZ))
epsilon <- c(Y - X %*% beta)
est <- beta[length(beta)]
if (is.null(grp)) {
if (robust) {
if (fast)
ZOmegaZ <- t(Z) %*% diag(epsilon^2) %*% Z
else {
ZOmegaZ <- matrix(0, ncol = ncol(Z), nrow = ncol(Z))
for (i in 1:nrow(Z))
ZOmegaZ <- ZOmegaZ + crossprod(matrix(Z[i,], nrow = 1)) * epsilon[i]^2
}
var <- XPzXinv %*% XZ %*% ZZinv
var <- var %*% ZOmegaZ %*% t(var)
} else {
sig2 <- c(crossprod(epsilon))/ N
var <- sig2 * XPzXinv
}
} else {
n.grp <- length(unique(grp))
if (fast) {
Omega <- matrix(0, ncol = N, nrow = N)
counter <- 1
for (i in 1:n.grp) {
n.grp.obs <- sum(grp == unique(grp)[i])
Omega[counter:(counter+n.grp.obs-1),counter:(counter+n.grp.obs-1)] <-
epsilon[grp == unique(grp)[i]] %*% t(epsilon[grp == unique(grp)[i]])
counter <- counter + n.grp.obs
}
ZOmegaZ <- t(Z) %*% Omega %*% Z
} else {
ZOmegaZ <- matrix(0, ncol = ncol(Z), nrow = ncol(Z))
for (i in 1:n.grp) {
ZOmegaZ <- ZOmegaZ + t(Z[grp == unique(grp)[i],]) %*%
(epsilon[grp == unique(grp)[i]] %*% t(epsilon[grp == unique(grp)[i]])) %*%
Z[grp == unique(grp)[i],]
}
}
var <- XPzXinv %*% XZ %*% ZZinv
var <- var %*% ZOmegaZ %*% t(var)
}
names(est) <- "CACE"
return(list(est = est, var = var[nrow(var),ncol(var)]))
}
|
427fe1b9cd8f8b3af1be040496061178bc8d5ca0
|
a3a95b468a64492be0d6067a14b350b4d281a123
|
/man/EQ5Dmap_table5.df.Rd
|
b07153e89c02234d3bcaa894884c031440b79753
|
[] |
no_license
|
sheejamk/valueEQ5D
|
25555943195782d2b1f1ee6edbb16afe6a9edfb7
|
24a8cba859996fd8c61b38924c4362702c0dc1a8
|
refs/heads/master
| 2022-10-19T23:07:01.287073
| 2022-10-03T20:20:57
| 2022-10-03T20:20:57
| 197,168,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 398
|
rd
|
EQ5Dmap_table5.df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{EQ5Dmap_table5.df}
\alias{EQ5Dmap_table5.df}
\title{EQ-5D-5L mapping 5Lto 3L values}
\format{
A 31250 by 21 dataframe
}
\source{
Hernandez Alava et al (2017) <doi.org/10.1016/j.jval.2016.11.006>
}
\usage{
EQ5Dmap_table5.df
}
\description{
EQ-5D-5L mapping 5Lto 3L values
}
\keyword{datasets}
|
d22228b2afac1a4141b1aaceba40ceee67fdfce0
|
41615de6999b278c76ff3a60db4af0d4dd7c7861
|
/ecozoneBuffer.R
|
1db09b3dc75b9440c6fdef0096f6a8ead0b0f379
|
[] |
no_license
|
dcyr/Montmorency-Hereford
|
b33b744e2b9017d4c085bbb546ae9061eb2c1257
|
003298b165dee1775753f821c95ad548835f6a49
|
refs/heads/master
| 2022-07-17T18:11:25.144495
| 2022-06-22T15:08:02
| 2022-06-22T15:08:02
| 206,645,965
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 2,523
|
r
|
ecozoneBuffer.R
|
################################################################################
################################################################################
### Some code to produce a area extent within which we fetch NFI plots for
### study area calibration
### Dominic Cyr
#############
rm(list = ls())
home <- path.expand("~")
home <- gsub("/Documents", "", home) # necessary on my Windows machine
setwd(paste(home, "Sync/Travail/ECCC/Landis-II/Montmorency-Hereford", sep ="/"))
wwd <- paste(getwd(), Sys.Date(), sep = "/")
dir.create(wwd)
setwd(wwd)
rm(wwd)
require(raster)
require(rgeos)
require(dplyr)
areas <- "ForMont" #c("Hereford", "Maskinonge")
bufferMeters <- 200000
### input path (LANDIS)
inputPathGIS <- paste(home, "Sync/Travail/ECCC/GIS", sep = "/")
inputPathLandis <- "../inputsLandis"
source("../scripts/gdal_polygonizeR.R")
ecoregions <- get(load(paste(inputPathGIS, "CanEcologicalFramework/ecoregions.RData", sep = "/")))
for (a in areas) {
landtypes <- raster(paste0(inputPathLandis, "/landtypes_", a, ".tif"))
eco <- spTransform(ecoregions, CRSobj = crs(landtypes))
##
r <- extend(landtypes, extent(landtypes)+(bufferMeters*2))
## buffer du territoire ŕ l'étude
b <- r
b <- buffer(b, width = bufferMeters,
doEdge=T)
# create polygon from buffer
############################################################################
## extract ecoregions that intersect study area
## rasterize polygons (long...)
ecoC <- crop(eco, landtypes)
ecoL <- rasterize(ecoC, landtypes)
ecoL[is.na(landtypes)] <- NA
RAT <- levels(ecoL)[[1]]
vals <- unique(RAT[match(values(ecoL), RAT$ID), "ECOREGION"])
vals <- vals[!is.na(vals)]
##
ecoC <- crop(eco, b)
ecoB <- rasterize(ecoC, b)
ecoB[is.na(b)] <- NA
## remove ecoregions that do not intersect from raster
index <- which(levels(ecoB)[[1]][,"ECOREGION"] %in% vals)
ecoB[!(ecoB %in% index)] <- NA
# plot(ecoB)
# plot(is.na(ecoB))
lvls <- levels(ecoB)[[1]]
bPoly <- gdal_polygonizeR(ecoB)
bPoly <- merge(bPoly, lvls[,c("ID", "REGION_NAM", "REGION_NOM")],
by.x = "DN", by.y = "ID")
save(bPoly, file = paste0("ecoregion_Buffer", bufferMeters/1000, "km_", a, ".RData"))
writeOGR(bPoly, ".", paste0("ecoregion_Buffer", bufferMeters/1000, "km_", a),
driver="ESRI Shapefile", overwrite_layer = T)
}
|
46e2e99c72dfd23a7d0f43ff8adbe5aa3a3a9fd9
|
870a947827505133bc5c4ea99d7374da9296ad5e
|
/SimpleXgbScript.R
|
0fcac4c1646667ac360e50bb8758d9c7902b6f43
|
[] |
no_license
|
Frin63/Kaggle-San-Francisco-Crime-Classification
|
fb4c8578eefba361e6579289d287347cfefb930f
|
9c34abf01111ef97d72744e0b19e3a075e6b83e3
|
refs/heads/master
| 2021-01-15T22:47:31.033360
| 2017-08-10T11:49:28
| 2017-08-10T11:49:28
| 99,917,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 767
|
r
|
SimpleXgbScript.R
|
library(xgboost)
data("agaricus.train", package = 'xgboost')
data("agaricus.test", package = 'xgboost')
train <- agaricus.train
test <- agaricus.test
str(train)
dim(train$data)
dim(test$data)
class(train$data)[1]
class(train$label)
bstSpars <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic")
# alternative
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
bstDmatrix <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 2)
#predict
pred <- predict(bstDmatrix, test$data)
head(pred)
prediction <- as.numeric(pred > 0.5)
head(prediction)
#Measuring mode performance
err <- mean(prediction != test$label)
|
1d04ef94f32a5ec64f4f4780f20bdaa1151681e9
|
554eed183608577ff1116d5e4f16d40b1143fbae
|
/Plot3.R
|
b55ea721f6140af4058d08741f1c2f4947b93ef0
|
[] |
no_license
|
Oli2/Exploratory-Data-Analysis
|
54e34eff38c204fcc36384e904afc540ee883dba
|
725484505f73488ff990b774965f74f1796f5641
|
refs/heads/master
| 2016-08-12T15:29:17.180457
| 2016-02-06T11:07:11
| 2016-02-06T11:07:11
| 51,164,456
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,342
|
r
|
Plot3.R
|
#first add the part which will download and unzip the file in the working
# directory
library(dplyr)
library(lubridate)
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#creating the temp file which will be used to download and unzip the url
temp <- tempfile()
download.file(url, temp)
file <- unzip(temp)
unlink(temp)
df <-read.csv("./household_power_consumption.txt", sep = ";", stringsAsFactors = FALSE)
df1<- tbl_df(df)
df1$Date<- paste(df1$Date, df$Time, sep = ' ')
df1<- select(df1, -Time)
df1$Date <- dmy_hms(df1$Date)
df2 <- filter(df1, Date >= "2007-02-01" & Date < "2007-02-03")
df2$Global_active_power <- as.numeric(df2$Global_active_power)
df2$Sub_metering_1 <- as.numeric(df2$Sub_metering_1)
df2$Sub_metering_2 <- as.numeric(df2$Sub_metering_2)
df2$Sub_metering_3 <- as.numeric(df2$Sub_metering_3)
png(filename = "Plot3.png", height = 480, width = 480, units = "px", pointsize = 12)
plot(df2$Date, df2$Sub_metering_1, type = "l", xlab = '', ylab = "Enegry sub metering")
points(df2$Date, df2$Sub_metering_2, type = "l", col="red")
points(df2$Date, df2$Sub_metering_3, type = "l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),cex = 0.5,lty = c(1,1,1), lwd = c(2.5,2.5,2.5), col = c("black", "red", "blue"))
dev.off()
|
0204a4f6ca253d0fbb124044570c8316cdd48400
|
a495b873b0c82b31b5a75ca3547c9febf3af5ddc
|
/array/DNAm/analysis/methodsDevelopment/compareNormalisation.r
|
8efff36fe009faf50a85948bf15a4f3db1540481
|
[
"Artistic-2.0"
] |
permissive
|
ejh243/BrainFANS
|
192beb02d8aecb7b6c0dc0e59c6d6cf679dd9c0e
|
5d1d6113b90ec85f2743b32a3cc9a428bd797440
|
refs/heads/master
| 2023-06-21T17:42:13.002737
| 2023-06-20T12:35:38
| 2023-06-20T12:35:38
| 186,389,634
| 3
| 2
|
Artistic-2.0
| 2022-02-08T21:45:47
| 2019-05-13T09:35:32
|
R
|
UTF-8
|
R
| false
| false
| 4,339
|
r
|
compareNormalisation.r
|
##---------------------------------------------------------------------#
##
## Title: Comparision of normalisation strategies
##
## Purpose of script: compare effects of normalisation across all samples and within cell type.
##
## Author: Eilis Hannon
##
## Date Created: 2020
##
##---------------------------------------------------------------------#
#----------------------------------------------------------------------#
# NOTES
#----------------------------------------------------------------------#
# uses wateRmelon metrics to compre normalised datasets
#----------------------------------------------------------------------#
# DEFINE PARAMETERS
#----------------------------------------------------------------------#
args<-commandArgs(trailingOnly = TRUE)
dataDir <- args[1]
gdsFile <-file.path(dataDir, "2_gds/raw.gds")
normgdsFile<-sub("\\.gds", "Norm.gds", gdsFile)
resPath<-file.path(dataDir, "4_analysis/methodsDevelopment/")
#----------------------------------------------------------------------#
# LOAD PACKAGES
#----------------------------------------------------------------------#
library(bigmelon)
#----------------------------------------------------------------------#
# IMPORT DATA
#----------------------------------------------------------------------#
setwd(dataDir)
gfile<-openfn.gds(normgdsFile, readonly = FALSE)
normbetas<-index.gdsn(gfile, "normbeta")[,]
celltypeNormbeta<-index.gdsn(gfile, "celltypenormbeta")[,]
rawbetas<-betas(gfile)[,]
closefn.gds(gfile)
## remove NAs
rawbetas<-na.omit(rawbetas)
normbetas<-na.omit(normbetas)
celltypeNormbeta<-na.omit(celltypeNormbeta)
QCmetrics<-read.csv(paste0(qcOutFolder,"/QCMetricsPostCellTypeClustering.csv"), stringsAsFactors = FALSE)
QCmetrics<-QCmetrics[match(colnames(rawbetas), QCmetrics$Basename),]
cellTypes<-sort(unique(QCmetrics$Cell.type))
cellCols<-brewer.pal(n = length(cellTypes), name = "Paired")
#----------------------------------------------------------------------#
# CALCULATE NORMALISATION METRICS
#----------------------------------------------------------------------#
matOut<-matrix(data = NA, nrow = 3,ncol = 3)
rownames(matOut)<-c("raw", "normTog", "normSep")
colnames(matOut)<-c("iDMR", "genki", "seabi")
if(length( grep("rs", rownames(rawbetas))) > 0){
matOut[1,2]<-mean(genki(rawbetas))
}
if(length( grep("rs", rownames(normbetas))) > 0){
matOut[2,2]<-mean(genki(normbetas))
}
if(length( grep("rs", rownames(celltypeNormbeta))) > 0){
matOut[3,2]<-mean(genki(celltypeNormbeta))
}
## filter to common probes
probes<-intersect(intersect(rownames(rawbetas), rownames(normbetas)), rownames(celltypeNormbeta))
rawbetas<-rawbetas[probes,]
normbetas<-normbetas[probes,]
celltypeNormbeta<-celltypeNormbeta[probes,]
probeAnno<-fData(gfile)
probeAnno<-probeAnno[probes,]
x.probes<-probeAnno$chr == "chrX"
idmr<-intersect(iDMR(), rownames(rawbetas))
matOut[1,1]<-dmrse_row(rawbetas, idmr)
matOut[2,1]<-dmrse_row(normbetas, idmr)
matOut[3,1]<-dmrse_row(celltypeNormbeta, idmr)
matOut[1,3]<-seabi(rawbetas, sex = QCmetrics$Sex, X = x.probes)
matOut[2,3]<-seabi(normbetas, sex = QCmetrics$Sex, X = x.probes)
matOut[3,3]<-seabi(celltypeNormbeta, sex = QCmetrics$Sex, X = x.probes)
write.csv(matOut, paste0(qcOutFolder, "CompareNormalisationStrategies.csv"))
pheno<-QCmetrics
#----------------------------------------------------------------------#
# CALCULATE SAMPLE LEVEL NORMALISATION METRIC
#----------------------------------------------------------------------#
qualDat.tog<-qual(rawbetas, normbetas)
qualDat.sep<-qual(rawbetas, celltypeNormbeta)
## look at normalisation effects
pdf(paste0(qcOutFolder, "CompareNormalisationStrategies.pdf"), height = 6, width = 10)
par(mfrow = c(1,2))
boxplot(qualDat.tog$rmsd ~ QCmetrics$Cell.type, ylab = "root mean square error", main = "Normalised together", xlab = "Cell type", col = cellCols)
boxplot(qualDat.sep$rmsd ~ QCmetrics$Cell.type, ylab = "root mean square error", main = "Normalised separately", xlab = "Cell type", col = cellCols)
## look at distribution of beta values
densityPlot(normbetas, sampGroups = QCmetrics$Cell.type,pal = cellCols)
densityPlot(celltypeNormbeta, sampGroups = QCmetrics$Cell.type,pal = cellCols)
dev.off()
|
8f56ef78078f2dcef93818f4262890f70e2b6ac7
|
85e7b5740b9d931f040cbb61d9c338b4d27cab1a
|
/MLR_R.R
|
c7a5553424c30af82493971260392eea0be9522d
|
[] |
no_license
|
keyur-gohel/StartUp-Profit-Prediction-
|
c6150fd3c9b3df284c8c1d3ed607c7227364ca55
|
a541d991a0f6fd469ee83a766b7fd422ea25dcf6
|
refs/heads/master
| 2023-07-17T15:29:23.745014
| 2021-08-17T09:50:38
| 2021-08-17T09:50:38
| 397,197,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,410
|
r
|
MLR_R.R
|
# Multiple Lineaar Regression
# Data Preprocessing
# Preparing Dataset
dataset = read.csv('50_Startups.csv')
# Encoding Categorized Data
dataset$State = factor(dataset$State,
levels = c('New York', 'California', 'Florida'),
labels = c(1, 2, 3))
# Spliting Dataset into Training And Test set
library(caTools)
set.seed(123)
split = sample.split(dataset$Profit, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Fitting Model To Training set
regressor = lm(formula = Profit ~ .,
data = training_set)
# Predicting values
y_pred = predict(regressor, newdata = test_set)
# Peacticing-------------------------
#regressor1 = lm(formula = Profit ~ R.D.Spend,
# data = training_set)
#y1_pred = predict(regressor1, newdata = test_set)
# BAckward Elimination
regressor_be = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend + State ,
data = dataset)
summary(regressor_be)
regressor_be = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend ,
data = dataset)
summary(regressor_be)
regressor_be = lm(formula = Profit ~ R.D.Spend + Marketing.Spend ,
data = dataset)
summary(regressor_be)
regressor_be = lm(formula = Profit ~ R.D.Spend ,
data = dataset)
summary(regressor_be)
|
ea00acb0ac2f2775c2bc34c06036514dea610e86
|
81065561fcf172679aa3c04c727721050d32724f
|
/proc/treat-alerts-from-bestfscorers.R
|
1eb7bf2046298d7b34529a2b62eeed2a5ca44b04
|
[
"MIT"
] |
permissive
|
cdc08x/automated-flight-diversion-detection
|
018f9c838c43a565210d92d92e5bbd45529c55bc
|
4c6f2f208e4f54492905f6f550c0c37b4635d360
|
refs/heads/master
| 2023-07-14T21:52:45.734583
| 2021-08-18T14:32:23
| 2021-08-18T14:32:23
| 200,355,999
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,962
|
r
|
treat-alerts-from-bestfscorers.R
|
data <- read.csv2(
file="../etc/alerts-of-bestfscorers-on-noks.csv",
sep=";")
data$filenameNoExtension <- gsub('.xml', '', data$filename)
data$flightArea <- ifelse(grepl(pattern ="^[0-9][0-9]*$", x = data$filenameNoExtension), "EU", "US")
data$realFlightId <- ifelse(data$flightArea == "EU", data$filenameNoExtension, data$flightId)
data[c("filename","flightId","realFlightId","flightArea")]
data$firstEventDateTime <- as.POSIXct(as.character(data$firstEventDateTime))
data$lastEventDateTime <- as.POSIXct(as.character(data$lastEventDateTime))
data$predictionDateTime <- as.POSIXct(as.character(data$predictionDateTime))
data$timeSaved <- as.numeric(data$lastEventDateTime - data$predictionDateTime)
data$timeSavedHhMmSs <- sprintf("%s%02d:%02d:%02d:%02d", # "%s%02d:%02d:%02d",
ifelse(data$timeSaved < 0, "-", ""), # sign
abs(data$timeSaved) %/% 86400, # days
abs(data$timeSaved) %% 86400 %/% 3600, # hours
abs(data$timeSaved) %% 3600 %/% 60, # minutes
abs(data$timeSaved) %% 60 %/% 1) # seconds
# data[1:20,c("firstEventDateTime","lastEventDateTime","predictionDateTime","timeSavedHhMmSs","timeSaved")]
print("Time saved w.r.t. the actual landing")
meanTimeSaved <- mean(data$timeSaved)
medianTimeSaved <- median(data$timeSaved)
minTimeSaved <- min(data$timeSaved)
maxTimeSaved <- max(data$timeSaved)
sprintf("Average: %s%02d:%02d:%02d:%02d", # "%s%02d:%02d:%02d",
ifelse(meanTimeSaved < 0, "-", ""), # sign
abs(meanTimeSaved) %/% 86400, # days
abs(meanTimeSaved) %% 86400 %/% 3600, # hours
abs(meanTimeSaved) %% 3600 %/% 60, # minutes
abs(meanTimeSaved) %% 60 %/% 1) # seconds
sprintf("Median: %s%02d:%02d:%02d:%02d", # "%s%02d:%02d:%02d",
ifelse(medianTimeSaved < 0, "-", ""), # sign
abs(medianTimeSaved) %/% 86400, # days
abs(medianTimeSaved) %% 86400 %/% 3600, # hours
abs(medianTimeSaved) %% 3600 %/% 60, # minutes
abs(medianTimeSaved) %% 60 %/% 1) # seconds
sprintf("Minimum: %s%02d:%02d:%02d:%02d", # "%s%02d:%02d:%02d",
ifelse(minTimeSaved < 0, "-", ""), # sign
abs(minTimeSaved) %/% 86400, # days
abs(minTimeSaved) %% 86400 %/% 3600, # hours
abs(minTimeSaved) %% 3600 %/% 60, # minutes
abs(minTimeSaved) %% 60 %/% 1) # seconds
sprintf("Maximum: %s%02d:%02d:%02d:%02d", # "%s%02d:%02d:%02d",
ifelse(maxTimeSaved < 0, "-", ""), # sign
abs(maxTimeSaved) %/% 86400, # days
abs(maxTimeSaved) %% 86400 %/% 3600, # hours
abs(maxTimeSaved) %% 3600 %/% 60, # minutes
abs(maxTimeSaved) %% 60 %/% 1) # seconds
# print("Alerts")
# data[c("filename","flightId","realFlightId","departureCode","arrivalCode","predictionDateTime","timeSaved","timeSavedHhMmSs")]
# print("Predicted diversions' filenames")
# data$filename
# print("Predicted diversions' flightIds")
# data$flightId
flightCodesForAlertsEUFile <- "../flights/flightCodes-forAlerts-EU.csv"
# print("Queries for diverted flightIds")
# paste("SELECT trackId, flightNumber, origin, destination FROM FlightEvents WHERE trackId in ('", paste( data[data$flightArea == "EU", c("realFlightId")], collapse="','" ), "') GROUP BY trackId, flightNumber",
## " INTO OUTFILE '",
## "../flights/flightCodes-forAlerts-EU.csv",
## "' ",
## "FIELDS TERMINATED BY ',' ",
## "ENCLOSED BY '' ",
## "LINES TERMINATED BY '\n'",
# ";",
# sep="")
plannedFlightTimesUS <- read.csv2(
file="../flights/plannedfFlightTimes-forAlerts-US.csv",
sep=","
)
plannedFlightTimesUS$flightTime <- as.difftime(as.character(plannedFlightTimesUS$flightTime), format="%H:%M:%S", units = "mins")
plannedFlightTimesEU <- read.csv2(
file="../flights/plannedfFlightTimes-forAlerts-EU.csv",
sep=","
)
plannedFlightTimesEU$flightTime <- as.difftime(as.character(plannedFlightTimesEU$flightTime), format="%H:%M:%S", units = "mins")
plannedFlightTimesEU$stopFlightTime <- as.difftime(as.character(plannedFlightTimesEU$stopFlightTime), format="%H:%M:%S", units = "mins")
dataEU <- data[data$flightArea == "EU",]
dataUS <- data[data$flightArea == "US",]
dataEU <- merge(x = dataEU, y = plannedFlightTimesEU[c("trackId","flightTime")], by.x = "realFlightId", by.y = "trackId", all.x = TRUE)
dataUS <- merge(x = dataUS, y = plannedFlightTimesUS[c("filenameNoXml","flightTime")], by.x = "filenameNoExtension", by.y = "filenameNoXml", all.x = TRUE)
data <- rbind(dataEU,dataUS)
data$timeSavedWrtETA <- data$firstEventDateTime + data$flightTime - data$predictionDateTime
meanTimeSavedWrtETA <- as.numeric(mean(data$timeSavedWrtETA[!is.na(data$timeSavedWrtETA)])) * 60
medianTimeSavedWrtETA <- as.numeric(median(data$timeSavedWrtETA[!is.na(data$timeSavedWrtETA)])) * 60
minTimeSavedWrtETA <- as.numeric(min(data$timeSavedWrtETA[!is.na(data$timeSavedWrtETA)])) * 60
maxTimeSavedWrtETA <- as.numeric(max(data$timeSavedWrtETA[!is.na(data$timeSavedWrtETA)])) * 60
print("Time saved w.r.t. the expected landing")
sprintf("Average: %s%02d:%02d:%02d:%02d", # "%s%02d:%02d:%02d",
ifelse(meanTimeSavedWrtETA < 0, "-", ""), # sign
abs(meanTimeSavedWrtETA) %/% 86400, # days
abs(meanTimeSavedWrtETA) %% 86400 %/% 3600, # hours
abs(meanTimeSavedWrtETA) %% 3600 %/% 60, # minutes
abs(meanTimeSavedWrtETA) %% 60 %/% 1) # seconds
sprintf("Median: %s%02d:%02d:%02d:%02d", # "%s%02d:%02d:%02d",
ifelse(medianTimeSavedWrtETA < 0, "-", ""), # sign
abs(medianTimeSavedWrtETA) %/% 86400, # days
abs(medianTimeSavedWrtETA) %% 86400 %/% 3600, # hours
abs(medianTimeSavedWrtETA) %% 3600 %/% 60, # minutes
abs(medianTimeSavedWrtETA) %% 60 %/% 1) # seconds
sprintf("Minimum: %s%02d:%02d:%02d:%02d", # "%s%02d:%02d:%02d",
ifelse(minTimeSavedWrtETA < 0, "-", ""), # sign
abs(minTimeSavedWrtETA) %/% 86400, # days
abs(minTimeSavedWrtETA) %% 86400 %/% 3600, # hours
abs(minTimeSavedWrtETA) %% 3600 %/% 60, # minutes
abs(minTimeSavedWrtETA) %% 60 %/% 1) # seconds
sprintf("Maximum: %s%02d:%02d:%02d:%02d", # "%s%02d:%02d:%02d",
ifelse(maxTimeSavedWrtETA < 0, "-", ""), # sign
abs(maxTimeSavedWrtETA) %/% 86400, # days
abs(maxTimeSavedWrtETA) %% 86400 %/% 3600, # hours
abs(maxTimeSavedWrtETA) %% 3600 %/% 60, # minutes
abs(maxTimeSavedWrtETA) %% 60 %/% 1) # seconds
# data[data$filenameNoExtension == "24936159",]
data[order(data$flightTime-data$timeSavedWrtETA),c("realFlightId","filename","departureCode","arrivalCode","firstEventDateTime","lastEventDateTime","flightTime","predictionDateTime","timeSavedWrtETA","timeSavedHhMmSs")]
# write.csv2(data[order(data$flightTime-data$timeSavedWrtETA),c("realFlightId","filename","departureCode","arrivalCode","flightTime","timeSavedWrtETA","timeSavedHhMmSs")])
names(data)
|
fa62bb6ef21694f2418a02013b95b1363d2791e7
|
70fc84a220fff7fc26d6610abb2c5995e613947d
|
/Scripts/PlottingScripts/Chapter 2 Plotting Script.r
|
332fd952fd79b8d81930c0f63a808bb7fba08dec
|
[] |
no_license
|
mohriner/flowBook
|
b8b33a90a3cc9ae52b66ae49abec8f15adeb4bfd
|
faaec23176acf7af339fdda5b119f4aa586bbbda
|
refs/heads/master
| 2020-04-01T21:57:44.605875
| 2020-02-13T15:29:46
| 2020-02-13T15:29:46
| 153,684,015
| 13
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,285
|
r
|
Chapter 2 Plotting Script.r
|
source("Scripts/loadLibraries.r")
source("Scripts/PlottingScripts/PlotVerse.r")
load("DerivedData/FlowBookCorpus.rdata")
meta = read.delim("SourceData/verseMetadata.txt",header=T)
# Example 2.2. Chronological and geographical distribution of the corpus sample ----
# (n=225) and sub-sample (n=75)
a = meta %>% filter(grepl("genre",tranche)==T) %>%
select(artistOriginYear,region)
b = meta %>% filter(grepl("genre1",tranche)==T) %>%
select(artistOriginYear,region)
quartz(width=4.55,height=4.55)
par(mfrow=c(2,2),mar = c(3.5,3.5,3,0),mgp = c(2.5,1,0),cex = .65,las=1,bty='n',
family="Times New Roman")
# Plot 1
a$artistOriginYear %>% hist(main="",ylim=c(0,50))
title(main = "Year of career beginning",font.main=1,line=2)
title(main = "(Population)",font.main=1,line=1)
# Plot 2
b$artistOriginYear %>% hist(main="",ylim=c(0,15))
title(main = "Year of career beginning",font.main=1,line=2)
title(main = "(Sample)",font.main=1,line=1)
# Plot 3
a1 = table(a$region)
a1 = a1[which(a1>0)]
a1 = sort(a1,decreasing=T)
barplot(a1,xaxt="n",ylab="Frequency")
text(x = seq(.5,length(a1),length.out = length(a1)),y = rep(-12,length(a1)),
names(a1),
srt=45,xpd=T)
title(main = "Geographic region",font.main=1,line=2)
title(main = "(Population)",font.main=1,line=1)
# Plot 4
b1 = c(as.character(b$region),"east","south","west","midwest","non-US")
b1 = table(b1)-1
b1 = sort(b1,decreasing=T)
barplot(b1,xaxt="n",ylab="Frequency")
text(x = seq(.5,length(b1),length.out = length(b1)),y = rep(-5,length(b1)),
names(b1),
srt=45,xpd=T)
title(main = "Geographic region",font.main=1,line=2)
title(main = "(Population)",font.main=1,line=1)
quartz.save(file="Examples/Chapter 2/Example 2.2.pdf",type="pdf");dev.off()
remove(list=c("a","b","a1","b1"))
# Example 2.5 Bubba Sparxxx, Deliverance, mm. 1-4 -------------------------
PlotVerse("deliverance",m.range = 0:3,plot.accent = F,plot.rhymeClasses = F,plot.phrase=F)
quartz.save(file="Examples/Chapter 2/Example 2.5.pdf",type="pdf");dev.off()
# Example 2.4. 8.Lil’ Wayne featuring Nikki, “Weezy Baby” (2005, 0:37–0:50, Excerpt 1.6) ----
PlotVerse("weezyBaby",m.range = 0:1,plot.accent = F,plot.rhymeClasses = F,plot.phrase=F)
# There's more to making this example, done in Excel and Word, so it isn't saved here.
|
0c40ba231bfe56c5ac6415b75cf0a75f7327ad27
|
1aced36f0193b1cd8f5cc6b180d40b60d08ce994
|
/man/compare.Rd
|
ba865f60cc91fec1e1f76962124bebe41973eb59
|
[] |
no_license
|
gasse/bnlearn-clone-3.1
|
28ffd5b50ca97ec7fe54fa6037bc4a7757a81315
|
2bd270be53eafb08e13bdadce448db27442e2978
|
refs/heads/master
| 2020-12-24T18:03:29.368563
| 2013-11-14T16:34:04
| 2013-11-14T16:34:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,478
|
rd
|
compare.Rd
|
\name{compare}
\alias{compare}
\alias{all.equal.bn}
\alias{shd}
\alias{hamming}
\title{ Compare two different Bayesian networks }
\description{
Compare two different Bayesian networks; compute the Structural
Hamming Distance (SHD) between them or the Hamming distance
between their skeletons.
}
\usage{
compare(target, current, arcs = FALSE)
\method{all.equal}{bn}(target, current, ...)
shd(learned, true, debug = FALSE)
hamming(learned, true, debug = FALSE)
}
\arguments{
\item{target, learned}{an object of class \code{bn}.}
\item{current, true}{another object of class \code{bn}.}
\item{\dots}{extra arguments from the generic method (currently ignored).}
\item{debug}{a boolean value. If \code{TRUE} a lot of debugging output
is printed; otherwise the function is completely silent.}
\item{arcs}{a boolean value. See below.}
}
\value{
\code{compare} returns a list containing the number of true positives
(\code{tp}, the number of arcs in \code{current} also present in
\code{target}), of false positives (\code{fp}, the number of arcs in
\code{current} not present in \code{target}) and of false negatives
(\code{tn}, the number of arcs not in \code{current} but present in
\code{target}) if \code{arcs} is \code{FALSE}; or the corresponding
arc sets if \code{arcs} is \code{TRUE}.
\code{all.equal} returns either \code{TRUE} or a character string
describing the differences between \code{target} and \code{current}.
\code{shd} and \code{hamming} return a non-negative integer number.
}
\examples{
data(learning.test)
e1 = model2network("[A][B][C|A:B][D|B][E|C][F|A:E]")
e2 = model2network("[A][B][C|A:B][D|B][E|C:F][F|A]")
shd(e2, e1, debug = TRUE)
# * arcs between A and F do not match.
# > the learned network contains A - F.
# > the true network contains A -> F.
# * arcs between E and F do not match.
# > the learned network contains F -> E.
# > the true network contains E -> F.
# [1] 2
unlist(compare(e1,e2))
# tp fp fn
# 5 1 1
compare(target = e1, current = e2, arcs = TRUE)
# $tp
# from to
# [1,] "A" "C"
# [2,] "B" "C"
# [3,] "B" "D"
# [4,] "C" "E"
# [5,] "A" "F"
#
# $fp
# from to
# [1,] "F" "E"
#
# $fn
# from to
# [1,] "E" "F"
}
\references{
Tsamardinos I, Brown LE, Aliferis CF (2006). "The Max-Min Hill-Climbing
Bayesian Network Structure Learning Algorithm". \emph{Machine Learning},
\strong{65}(1), 31-78.
}
\author{ Marco Scutari }
\keyword{graphs}
|
177a74e83d025ff40eb32481d833446c9d9ecfd1
|
650e9a743bb3a72bc39c32509716651a1febe0c8
|
/man/load_elec_data.Rd
|
3a6aedc4045e0255690791aa6442d3e12348d539
|
[
"MIT"
] |
permissive
|
JGCRI/superwell
|
dc14b4b83933f8efdf5fc4fc6c29c4bb9100cb10
|
4d5e0c55936e784b438821769663ae1dfbb45910
|
refs/heads/main
| 2023-08-03T15:08:20.436680
| 2023-06-25T01:58:57
| 2023-06-25T01:58:57
| 199,896,734
| 2
| 2
|
MIT
| 2023-06-25T01:59:00
| 2019-07-31T17:00:59
|
R
|
UTF-8
|
R
| false
| true
| 535
|
rd
|
load_elec_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/superwell.R
\name{load_elec_data}
\alias{load_elec_data}
\title{Load electricity cost data for each countries}
\usage{
load_elec_data(el_cost_file)
}
\arguments{
\item{el_cost_file}{Full path with file name and extension to the input GCAM electrical rates YAML file}
}
\value{
electricity energy cost for a country
}
\description{
Read in the cost of electricity by GCAM basin (USD/Unit for 172 countries)
}
\author{
Superwell team; hassan.niazi@pnnl.gov
}
|
ebd56091fe1c77a09db1d3754633551525d200fb
|
ce645e050e24c0b982ae6c3b45eb686bcfa36202
|
/Scripts/05a_predicted_versus_observed.R
|
f1b912b51a9049d5d9d6bf294d630807c01eaf33
|
[] |
no_license
|
dmcalli2/adverse_events_older_people
|
fe2d008fc53a3e3737a4459a06c99d53665bd37b
|
9ec0a84e32d2d35b5e2f2e37d9cd44238c18b7ed
|
refs/heads/master
| 2023-05-10T00:30:55.312685
| 2021-06-17T16:33:37
| 2021-06-17T16:33:37
| 218,378,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,590
|
r
|
05a_predicted_versus_observed.R
|
library(tidyverse)
# library(msm)
library(truncnorm)
## data ----
coefs <- read_csv("SAIL_data/age_18_plus_fp_coef.csv")
vcov <- read_csv("SAIL_data/age_18_plus_fp_vcov.csv")
vcov <- subset(vcov, select = c(2:5))
names(vcov) <- coefs$X1
vcov <- as.matrix(vcov)
dfs <- readRDS("Processed_data/age_sex_bmi_ae_sae.Rds")
list2env(dfs, envir = .GlobalEnv)
rm(dfs)
tots$sae[tots$nct_id == "NCT00134160"] <- 144
tots$sae[tots$nct_id == "NCT00454662"] <- 594
## Functions ----
mean.tnorm<-function(mu,sd,lower,upper){
##return the expectation of a truncated normal distribution
lower.std=(lower-mu)/sd
upper.std=(upper-mu)/sd
mean=mu+sd*(dnorm(lower.std)-dnorm(upper.std))/
(pnorm(upper.std)-pnorm(lower.std))
return(mean)
}
var.tnorm<-function(mu,sd,lower,upper){
##return the variance of a truncated normal distribution
lower.std=(lower-mu)/sd
upper.std=(upper-mu)/sd
variance=sd^2*(1+(lower.std*dnorm(lower.std)-upper.std*dnorm(upper.std))/
(pnorm(upper.std)-pnorm(lower.std))-((dnorm(lower.std)-dnorm(upper.std))/
(pnorm(upper.std)-pnorm(lower.std)))^2)
return(variance)
}
predict_fn <- function(coefs, vcov, age, GNDR_CD, output){
## calculate effect for a set of covariates
cov <- c(1, I((age/100)^0.5), I((age/100)^2), GNDR_CD)
lp <- coefs[1] + I((age/100)^0.5)*coefs[2]+ I((age/100)^2)*coefs[3] + GNDR_CD*coefs[4]
se <- c(t(cov) %*% vcov %*% cov)
pred <- exp(lp)*365
lci <- exp(lp-1.96*se)*365
uci <- exp(lp+1.96*se)*365
if(output=="point"){
pred
}
else if(output=="lower"){
lci
}
else if (output=="upper"){
uci
}
else if (output == "verbose"){
list(lp = lp, se = se, lci = lci, uci = uci)
}
else{
NA
}
}
get_parameters <- function(maxage, minage, meanage, sdage){
lower <- minage
upper <- maxage
upper <- ifelse(is.na(upper), 100L, upper)
trial_mean <- meanage
trial_sd <- sdage
trial_var <- sdage^2
#
# ## Create grid
mu_x <- seq(lower, upper, 0.05)
sd_x <- seq(1, upper-lower, 0.05)
full_grid <- expand.grid(mu_x = mu_x, sd_x = sd_x)
#
# ## Calculate for all values of grid, is vectorised so is fast
full_grid$mean_x <- mean.tnorm(full_grid$mu_x, full_grid$sd_x, lower, upper)
full_grid$var_x <- var.tnorm(full_grid$mu_x, full_grid$sd_x, lower, upper)
# print(nrow(full_grid))
#
# ## Identify closest values
full_grid <- full_grid %>%
as_tibble() %>%
mutate(mean_diff = abs(trial_mean - mean_x),
var_diff = abs(trial_var - var_x),
total_diff = mean_diff + var_diff) %>%
arrange(total_diff, mean_diff, var_diff)
# ## Append original parameters
estimate <- full_grid %>%
slice(1:10) %>%
mutate(trial_mean = trial_mean,
trial_var = trial_var,
trial_lower = lower,
trial_upper = upper,
trial_sd = trial_sd) %>%
select(trial_mean, mean_x, trial_var, var_x, mu_x, sd_x, trial_sd, everything())
# # Produce estimates for distribution
freqs <- msm::ptnorm(seq(0, 100, 1),
estimate$mu_x[1], estimate$sd_x[1],
estimate$trial_lower[1], estimate$trial_upper[1])
tibble(age = 0:100, age_p = freqs - lag(freqs, 1, default = 0))
}
## Restrict all trials ----
trials <- trials %>%
filter(!type_comparison %in% c("same_class7", "diff_class7"),
!phase == "Phase 2/Phase 3")
tots <- tots %>%
filter(!is.na(sae)) %>%
inner_join(trials %>% select(nct_id, aliskiren, minimum_age, maximum_age, hard_outcome, type_comparison, fu_days, phase))
tots$subjects_at_risk[tots$nct_id == "NCT00553267"] <- 947
tots <- tots %>%
mutate(pt = fu_days * (subjects_at_risk-sae) + 0.5 * sae*fu_days,
older = as.integer(minimum_age >=60),
rate = 365*sae/pt)
names(tots)
tots <- tots %>%
filter(!is.na(male),
!is.na(female),
!is.na(age_m),
!is.na(age_s))
# replace missing maximum ages with infinity. All have minimum ages
tots <- tots %>%
mutate(maximum_age = if_else(is.na(maximum_age), 120, as.double(maximum_age)))
# # get parameters of truncated normal distribution, quite slow so run only when have to ----
# tots$age_dist <- pmap(list(tots$age_m, tots$age_s, tots$minimum_age, tots$maximum_age),
# function(mean, sd, min, max) get_parameters(maxage = max, minage = min, meanage = mean, sdage = sd))
# saveRDS(tots, "Scratch_data/age_distributions2.Rds")
tots <- readRDS("Scratch_data/age_distributions2.Rds")
## limit data to minimum and maximum ages across all datasets then estimate rates for each of these ----
min_age <- map_dbl(tots$age_dist, ~ .x %>% filter(.x$age_p >=0.01) %>% pull(age) %>% min()) %>% min()
max_age <- map_dbl(tots$age_dist, ~ .x %>% filter(.x$age_p >0.01) %>% pull(age) %>% max()) %>% max()
mycovs <- tibble(age = min_age:max_age) %>%
mutate(age1 = (age/100)^0.5,
age2 = (age/100)^2)
smpl_coefs <- mvtnorm::rmvnorm(10000, coefs$model3.coefficients, sigma = vcov)
colnames(smpl_coefs) <- coefs$X1
# estimate linear predictors for men and women
mycovs$men <- map2(mycovs$age1, mycovs$age2, function(age1, age2){
smpl_coefs[, "(Intercept)"] +
smpl_coefs[, "I((age/100)^0.5)"] *age1 +
smpl_coefs[, "I((age/100)^2)"] *age2 +
0})
mycovs$women <- map2(mycovs$age1, mycovs$age2, function(age1, age2){
smpl_coefs[, "(Intercept)"] +
smpl_coefs[, "I((age/100)^0.5)"] *age1 +
smpl_coefs[, "I((age/100)^2)"] *age2 +
smpl_coefs[, "GNDR_CD"]})
mycovs$both <- map2(mycovs$men, mycovs$women, ~ tibble(iter = 1:10000, men = .x, women = .y))
mycovs <- mycovs %>%
select(-men, -women)
mycovs <- mycovs %>%
unnest(cols = both)
mycovs <- mycovs %>%
arrange(iter)
# convert to rate per person-year
mycovs <- mycovs %>%
mutate_at(vars(men, women), function(x) exp(x))
mycovs <- mycovs %>%
select(age, iter, men, women)
tots$smpls <- map(tots$age_dist, ~ inner_join(.x, mycovs))
tots$smpls <- map(tots$smpls, ~ .x %>%
group_by(iter) %>%
summarise(men = weighted.mean(men, age_p),
women = weighted.mean(women, age_p)) %>%
ungroup())
tots$age_dist <- NULL
tots <- tots %>%
mutate(male_p = male/(male + female))
tots$smpls <- map2(tots$smpls, tots$male_p, ~ .x %>%
mutate(both = .y*men + (1-.y) * women))
## sample from beta distribution and multiple by number at risk to get CI for obserVED SAE
tots$sae_smpls <- map2(tots$sae, tots$subjects_at_risk, ~ rbeta(10000, .x + 0.005, .y-.x+0.005) * .y)
## or sample from Poisson
tots$sae_smpls_pois <- map(tots$sae, ~ rpois(10000, .x))
# compare beta and Poisson
pois <- map(tots$sae_smpls_pois, ~ tibble(m = mean(.x), s = sd(.x))) %>%
bind_rows(.id = "trial")
beta <- map(tots$sae_smpls, ~ tibble(m_beta = mean(.x), s_beta = sd(.x)))%>%
bind_rows(.id = "trial")
cmpr <- bind_cols(pois, beta %>% select(-trial))
plot(cmpr$m, cmpr$m_beta)
abline(a = 0, b = 1)
plot(cmpr$s, cmpr$s_beta)
abline(a = 0, b = 1)
## Divide by person time to get rate
tots$sae_smpls <- map2(tots$sae_smpls, tots$pt, ~ 365 * .x/.y)
## Produce ratios ----
tots$smpls <- map(tots$smpls, ~ .x$both*365)
tots$ratios <- map2(tots$sae_smpls, tots$smpls, ~ .y/.x)
## Summarise rates
tots$rate_mean <- map_dbl(tots$smpls, mean)
tots$rate_se <- map_dbl(tots$smpls, sd)
tots$rate_lci <- map_dbl(tots$smpls, ~ quantile(.x, 0.025))
tots$rate_uci <- map_dbl(tots$smpls, ~ quantile(.x, 0.975))
## Summarise ratios
tots$ratio_mean <- map_dbl(tots$ratios, mean)
tots$ratio_se <- map_dbl(tots$ratios, sd)
tots$ratio_lci <- map_dbl(tots$ratios, ~ quantile(.x, 0.025))
tots$ratio_uci <- map_dbl(tots$ratios, ~ quantile(.x, 0.975))
tots <- tots %>%
select(nct_id, subjects_at_risk, sae, fu_days, pt, rate,
rate_mean, rate_se, rate_lci, rate_uci,
ratio_mean, ratio_se, ratio_lci, ratio_uci,
older, phase)
tots$v_line <- 1
tots$label <- ifelse(tots$older==1, "Older-people trials", "Standard trials")
a <- ggplot(tots, aes(x = nct_id, y = ratio_mean, colour = phase))+
geom_point()+
geom_pointrange(aes(ymin = ratio_lci, ymax = ratio_uci))+
facet_grid(label~., scales = "free", space = "free")+
geom_hline(aes(yintercept = v_line))+
coord_flip()+
ggtitle("Ratio of observed to expected counts")+
ylab("Observed count / Expected count")+
xlab("Trial ID")
a
a <- a +
coord_flip(ylim = c(0.9, 15))
a
saveRDS(tots, "Data/SAE_ratio_observed_expected.Rds")
tots <- readRDS("Data/SAE_ratio_observed_expected.Rds")
tots$ratio_uci <- ifelse(tots$ratio_uci>20, 20, tots$ratio_uci)
a <- ggplot(tots%>%filter(ratio_mean<15), aes(x = reorder(nct_id, ratio_mean), y = ratio_mean, colour = label))+
geom_point()+
geom_pointrange(aes(ymin = ratio_lci, ymax = ratio_uci))+
scale_color_manual(values = c("#00BFC4", "#F8766D"))+
facet_grid(label~., scales = "free", space = "free")+
geom_hline(aes(yintercept = v_line))+
coord_flip()+
ggtitle("Ratio of expected to observed SAE count")+
ylab("Expected hospitalisations and deaths / Observed SAEs")+
xlab("Trial ID")+
labs(colour = "Older vs Standard trials", shape = "Phase")
a
a <- a +
coord_flip()
a
|
1fe4347598dfb36edbad41b186b9269ec0cc0bd0
|
5bb91efaaac1ebc48723a2504e95333851f6a281
|
/Rfunctions/CalculateAltitude.R
|
8bc375a1dbe0317dc87e2f569a0c6491cd645cc8
|
[] |
no_license
|
RikvH/Project_WijdoenMEE
|
365834d82e099021b7d5dbaa7fbd27ed5e7681e3
|
bad4aa55ae96acc1fde8d24131be166bcc287b54
|
refs/heads/master
| 2021-01-13T05:31:12.409323
| 2017-02-02T16:29:05
| 2017-02-02T16:29:05
| 80,189,608
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 624
|
r
|
CalculateAltitude.R
|
###################################
## Wij doen MEE ##
## Rik van Heumen & Dillen Bruil ##
## 30 January 2017 ##
###################################
## Function to extract the altitude along the route from a DEM
## It prints the min altitude and the max altitude and their difference
## And returns a plot of the altitude
altitude <- function(routepoints){
# Transform the route points to the projection of the DEM
projroute <- spTransform(routepoints, CRS(proj4string(ahn)))
# Extract the DEM data for the points
alt <- extract(ahn, projroute, along = TRUE)
return (alt)
}
|
a71c5eaef2e9734e7eb5b615951feccabe0cc67f
|
ced776f0ae6d8a5605bde05590ee7cb33ac9569b
|
/data/examples/example-parties.R
|
a2d7ba00237d9914a78b7e8d926282acbf47b752
|
[] |
no_license
|
pdwaggoner/methods
|
e9ff0ff3cc03a059c309c5c9a1c4e0b03952f56a
|
81b7904572c54422daba6d645445b528d9f60b09
|
refs/heads/master
| 2020-03-18T18:10:20.031335
| 2018-01-08T21:34:39
| 2018-01-08T21:34:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 465
|
r
|
example-parties.R
|
#' ---
#' title: "An Example with the `parties` Data Set"
#' author: "Carlisle"
#' ---
# set working directory
setwd(here::here())
# load packages
library(tidyverse)
library(rio)
# load data
parties_df <- import("data/parties.rds")
# quick look
glimpse(parties_df)
# scatterplot of eneg and enep by electoral system
ggplot(parties_df, aes(x = eneg, y = enep)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
facet_wrap(~ electoral_system)
|
35c41171af73af0f5369a36b82a00bf94b2f3993
|
8eb87e4dea4f73397d0340be06abd2697a7a57e3
|
/man/hg_converter.Rd
|
15e77557b539f5ccf955b9d81cab49f2040a8971
|
[] |
no_license
|
BacemDataScience/cancereffectsizeR
|
110c4c13ba27c0dff90b256d29e84d485b5a59f8
|
741db1c938993e551cc9f0150d2420e0e35dee4a
|
refs/heads/master
| 2020-08-27T01:51:26.433520
| 2019-10-02T11:36:56
| 2019-10-02T11:36:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,433
|
rd
|
hg_converter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hg_converter.R
\name{hg_converter}
\alias{hg_converter}
\title{hg_converter}
\usage{
hg_converter(chain, maf_to_convert, chrom_col_name = "Chromosome",
new_build_name = "Converted_from_GRCh38_to_hg19")
}
\arguments{
\item{chain}{A directory on your system with the "*over.chain" file to be used with liftOver. For instance, the chain to convert from hg38 to hg19 can to be downloaded from UCSC \url{http://hgdownload.cse.ucsc.edu/gbdb/hg38/liftOver/} (hg38ToHg19.over.chain.gz).}
\item{maf_to_convert}{MAF file, with relevant fields consisten with the GDC MAF specification (\url{https://docs.gdc.cancer.gov/Data/File_Formats/MAF_Format/}), used in the conversion.}
\item{chrom_col_name}{Name of the column header containing chromosome information.}
\item{new_build_name}{This will be the string in the "NCBI_Build" column header of the new MAF file.}
}
\value{
The returned output will be a MAF file with all the same data fields as the original but with "Start_Position" and "End_Position" converted to the build specified by the 'chain' input.
}
\description{
Convert MAF files from one genomic coordinate system to another.
Developed for use with MAF files found on the
NCI Genome Data Commons. Adapted from the liftOver vignettes page at \url{https://master.bioconductor.org/packages/release/workflows/vignettes/liftOver/inst/doc/liftov.html}
}
|
7e9367068b7b9b631b81c30884d1e2a512d8cf50
|
f35b869a012fb5881a232ff6ca1d784b4e76e992
|
/ui.R
|
6cae234da06b17a3aaeb02cff229deddaf1ba9db
|
[] |
no_license
|
Quentin62/jeia
|
ed0ae6382c0e82704900f2469fcc88478bd0c848
|
f482d8242079327bd31843fc62b9952281ca5acf
|
refs/heads/master
| 2020-04-23T02:32:26.808770
| 2019-03-12T08:38:07
| 2019-03-12T08:38:07
| 170,849,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,841
|
r
|
ui.R
|
library(shiny)
library(shinyjs)
library(shinydashboard)
library(DT)
library(plotly)
dashboardPage(dashboardHeader(title = "JEIA 2019"),
dashboardSidebar(
sidebarMenu(id = "tabs",
menuItem("Pile ou face", tabName = "pile", icon = icon("coins")),
menuItem("Taille et poids", tabName = "taille", icon = icon("ruler-vertical")),
menuItem("Iris", tabName = "iris", icon = icon("leaf"))
)
),
dashboardBody(
useShinyjs(),
tabItems(
tabItem(tabName = "pile",
br(),
column(12,
sliderInput("nObsPF", "Nombre d'observations :", min = 10, max = 1000, value = 50, step = 10),
verbatimTextOutput("donneesPF")),
column(10, offset = 1, actionButton("showPlotPF", "Graphiques"),
br(), br(),
hidden(uiOutput("plotPF"), uiOutput("expPF")))
),
tabItem(tabName = "taille",
tabsetPanel(
tabPanel("Données",
br(),
column(6, offset = 3, dataTableOutput("donnees"))
),
tabPanel("Visualisation",
br(),
fluidRow(column(8, offset = 2, box("On ajuste une loi de probabilités sur les variables afin de déterminer et comprendre le comportement des individus.", status = "primary", width = 12))),
sidebarLayout(
sidebarPanel(selectInput("selectedVar", choices = c("Height", "Weight"), label = NULL),
checkboxGroupInput("gender", "Genre :", c("Male", "Female"), selected = c("Male", "Female")),
uiOutput("slider")),
mainPanel(plotOutput("distPlot"),
br(),
actionButton("show", "Montrer les paramètres optimaux"),
br(), br(),
hidden(uiOutput("param"), uiOutput("expTP"))
)
)
),
tabPanel("Visualisation (II)",
br(),
plotlyOutput("plotTP")),
tabPanel("Classification",
br(),
fluidRow(column(12, box("La classification consiste à créer des groupes d'individus avec un comportements similaires.",
"Dans le cas présent, on cherche des comportements différents des personnes en fonction de la taille et du poids sans prendre en compte l'information sur le genre.",
width = 12, status = "primary"))),
br(),
plotOutput("plotClassif"),
br(),
box(tableOutput("paramClassifTP"), title = "Moyennes estimées", status = "primary"),
box(tableOutput("compPartition"), title = "Comparaison de la partition avec le genre", status = "primary"),
box("Pour 2 classes, l'algorithme de classification a trouvé une classe comprenant principalement le femmes et une autre les hommes.",
" Cela sans connaissances préalables sur le sexe. C'est la structure la plus vraisemblable dans les données en se basant sur des lois normales.", width = 12, status = "primary")),
tabPanel("Régression",
br(),
h2("Prédire la taille en fonction du poids"),
sidebarLayout(
sidebarPanel(checkboxGroupInput("gender2", "Genre :", c("Male", "Female"), selected = c("Male", "Female")),
uiOutput("slider2")),
mainPanel(plotlyOutput("plotTP2"),
br(),
actionButton("show2", "Montrer les paramètres optimaux"),
br(), br(),
hidden(uiOutput("param2"))
)
)
)
)
),
tabItem("iris",
tabsetPanel(
tabPanel("Données",
br(),
column(10, dataTableOutput("dataIris"))),
tabPanel("Visualisation",
fluidRow(column(6, selectInput("abscisseIris", "Abscisse", colnames(iris)[-5], selected = "Sepal.Width")),
column(6, selectInput("ordonneesIris", "Ordonnée", colnames(iris)[-5]))),
fluidRow(column(12, plotlyOutput("plotIris")))),
tabPanel("Visualisation (II)",
br(),
fluidRow(column(10, offset = 1, box("Créer un nouveau repère orthogonal qui maximise la variabilité des données. Les nouveaux axes sont des combinaisons linéaires des variables d'originies.",
title = "Analyses en composantes principales", solidHeader = TRUE, status = "primary", width = 10))),
fluidRow(column(12, box(plotlyOutput("plotIris2"), width = 12, status = "primary"))),
fluidRow(column(8, offset = 2, box("Ici le nouveau repère représente environ 96% de l'information du jeu de données de base.",
" On a donc une représentation en 2 dimensions qui contient quasiment la même information que les 4 dimensions du jeu de données de base. C'est une méthode très utile pour visualiser des données.", width = 12, status = "primary"))),
fluidRow(column(8, offset = 2, box(plotOutput("plotIris3"), width = 12, status = "primary"))),
fluidRow(column(8, offset = 2, box("Les nouveaux axes sont des combinaisons linéaires des variables. Regarder la corrélation de ces axes avec les variables permet d'interpréter les nouveaux axes.",
" Ainsi, le 1er axe est corrélé positivement avec Pepal.Width, Pepal.Length et Setal.Length, une valeur élevé sur le 1er axe est le signe de valeurs élevés sur ces 3 variables.", width = 12, status = "primary"))))
)
)
)
)
)
|
65f77756a357abe8e4a31c920c280fda6a1ba107
|
efb268d2e8648286b87ca51dee410f3130ace448
|
/R/sandbox/plots.R
|
b03e417404ddcd9252ac12c63375705a00a2cb9f
|
[] |
no_license
|
mkyriak/FRONTIER
|
05be342b6a8d743a047e52af35db49854be1cad5
|
360c703a506a771e138934d3d1fc6806ae6cf508
|
refs/heads/master
| 2023-06-27T02:53:55.163078
| 2021-05-11T16:33:55
| 2021-05-11T16:33:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,069
|
r
|
plots.R
|
load('results/FRONTIER.QC.filtered.normalized.anno.final.meta.Rdata')
meta = as.data.frame(pData(all_data)) %>% filter(Dataset == "VUmc")
library(tidyverse)
library(ggplot2)
pdf(file = "figures/violin1.pdf", width=11, height=8.5)
ggplot(meta, aes(x=Cell_Predict, y = purity)) +
geom_jitter() +
geom_violin(draw_quantiles = c(0.25, 0.5, 0.75), fill="tan", alpha=0.7) +
theme_minimal(base_size = 18, base_family = "sans") +
theme(axis.text.x = element_text(angle = 15, hjust = 1)) +
labs(x = "Methylation Class", y = "Purity")
dev.off()
summary(lm(purity ~ Cell_Predict, data=meta))
pdf(file = "figures/violin2.pdf", width=11, height=8.5)
ggplot(meta, aes(x=Cell_Predict, y = meta$Dist_to_tumor_surface)) +
geom_jitter() +
geom_violin(draw_quantiles = c(0.25, 0.5, 0.75), fill="tan", alpha=0.7) +
theme_minimal(base_size = 18, base_family = "sans") +
theme(axis.text.x = element_text(angle = 15, hjust = 1)) +
labs(x = "Methylation Class", y = "Distance to tumor surface")
dev.off()
summary(lm(Dist_to_tumor_surface ~ Cell_Predict, data=meta))
|
1c97e1461c93b41f24b938ab68f5020331c2bd3b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/saturnin/examples/account.for.prior.Rd.R
|
9fdf3254ef544228d257fcef81c9f874cf93f86d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 360
|
r
|
account.for.prior.Rd.R
|
library(saturnin)
### Name: account.for.prior
### Title: Accounting for prior edge appearance probability.
### Aliases: account.for.prior
### Keywords: posterior edge probability
### ** Examples
library('saturnin')
data(data_gaussian)
W <- lweights_gaussian(data_gaussian)
prob <- edge.prob(W, log = TRUE)
prob.q0 <- account.for.prior(prob, q0 = 0.5)
|
6abda93c841de6b4a7cb597beb5ec5b39b64871f
|
e83edc59f6bb2424ab86375762b0c116000d0167
|
/R/sfQuickInit.R
|
5a493bcb9b61aa9462d933cfa06572125930cfa6
|
[] |
no_license
|
gearslaboratory/spatial.tools
|
5f919f4474ef0792c37ba88bf47ce60229f65221
|
905954e12ed3092a56afaaa6f356d3f0a7e572c1
|
refs/heads/master
| 2021-01-04T02:51:21.597561
| 2020-02-13T19:34:33
| 2020-02-13T19:34:33
| 240,344,310
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,010
|
r
|
sfQuickInit.R
|
#' Quickly initializes a parallel cluster and registers it with foreach.
#'
#' @param cpus Number of cpus. Will default to the max available cpus.
#' @param methods Logical. Load the methods package? (if FALSE, faster startup). Default=FALSE.
#' @param ... parameters to pass to sfInit()
#' @author Jonathan A. Greenberg
#' @details (Even more) quickly start a parallel cluster with half of available
#' cpus, parallel = TRUE, and type = "SOCK" and registers it with foreach.
#' @examples
#' sfQuickInit(cpus=2)
#' sfQuickStop()
#' @import doParallel parallel
#' @export
# TODO: What to do if a cluster is already running
sfQuickInit <- function(cpus,methods=FALSE,...)
{
if(missing("cpus"))
{
cpus <- floor(detectCores()/2)
}
if(!is.null(getOption("spatial.tools.current.cl"))) sfQuickStop()
cl <- makeCluster(spec=cpus,type="PSOCK",methods=methods)
options(spatial.tools.current.cl=cl)
setDefaultCluster(cl=cl)
registerDoParallel(cl)
return(cl)
}
|
afdbee6e135c2e91a7c5abcce9f572c63c275c1b
|
1585bd45e45049644c851b762119203873fc18b3
|
/R/bs_repositories.R
|
a8b93071e3c19fa90c83d25c911feeca4ae3f2a2
|
[
"MIT"
] |
permissive
|
project-renard-survey/rbace
|
8ac1c0f03f03b2864691d44a2da6e4b4cea268c6
|
f0161ffea24c05c841e69df4372a1759bd08e4a3
|
refs/heads/master
| 2021-06-17T02:26:21.357924
| 2017-06-07T00:56:48
| 2017-06-07T00:56:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 781
|
r
|
bs_repositories.R
|
#' List repositories
#'
#' @export
#' @param coll (character) collection code. For existing, pre-defined
#' collections see Appendix, section "Collection-related queries"
#' @param parse (character) One of 'list' or 'df'
#' @param ... curl options passed on to [crul::HttpClient()]
#' @return XML as character string if `parse = FALSE` or data.frame
#' @examples \dontrun{
#' res <- bs_repositories(coll = "ceu")
#' bs_repositories(coll = "ceu")
#'
#' bs_repositories(coll = "denw")
#' bs_repositories(coll = "de")
#' }
bs_repositories <- function(coll, parse = "df", ...) {
enforce_rate_limit()
on.exit(Sys.setenv(rbace_time = as.numeric(Sys.time())))
query <- ct(list(func = 'ListRepositories', coll = coll))
res <- bs_GET(query, list(...))
bs_parse_repo(res, parse)
}
|
a901bc3637e40fd2d0f96925ebea359a34aee8c7
|
95a1a0b30a0bdbf716eca125edb92ca3111a49f6
|
/man/plot.linreg.Rd
|
ee884943722d58938bfd269af799c7d6a8ad1293
|
[
"MIT"
] |
permissive
|
Marbr987/bonus_lab
|
a8bec3429f04d6ef2402f11070ca59cb67113c54
|
8b316fd2774bdf474ad6962e027a34ef74927e28
|
refs/heads/main
| 2023-08-27T22:02:13.508670
| 2021-10-29T23:58:24
| 2021-10-29T23:58:24
| 417,931,605
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 453
|
rd
|
plot.linreg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.linreg}
\alias{plot.linreg}
\title{plot S3 method}
\usage{
\method{plot}{linreg}(x, ...)
}
\arguments{
\item{x}{An object of class linreg}
\item{...}{other arguments}
}
\value{
List containing the two plots.
}
\description{
S3 method for objects of class linreg providing two plots of the residuals and the standardized residuals among the fitted values
}
|
84f2defb897baeb05e65ef1cda68c34ec444e18b
|
233044bb5dae762c818e0b5a0996e2186dcb4d45
|
/man/NBR2.Rd
|
ab39f8dc7904230d5aed192ae4eb958a0d66178a
|
[] |
no_license
|
cran/nightmares
|
52a4f1cc463806b3c7a25e19fd7d0f5b9545842f
|
5262c05af76aa6843fbd2e4266a3fe7e44c5cc76
|
refs/heads/master
| 2022-12-18T14:24:07.441720
| 2020-09-21T05:50:10
| 2020-09-21T05:50:10
| 295,363,997
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,016
|
rd
|
NBR2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NBR2.R
\name{NBR2}
\alias{NBR2}
\title{NBR2 - Normalized Burn Ratio 2}
\usage{
NBR2(SWIR1, SWIR2)
}
\arguments{
\item{SWIR1}{A raster layer object with the reflectance values for the Short Wave Infrared 1 band.}
\item{SWIR2}{A raster layer object with the reflectance values for the Short Wave Infrared 2 band.}
}
\value{
NBR2 - Normalized Burn Ratio 2.
}
\description{
NBR2 modifies the Normalized Burn Ratio to highlight water sensitivity in vegetation and may be useful in post-fire recovery studies.
}
\examples{
library(raster)
path_files <- system.file("extdata/", package="nightmares")
bands <- stack(list.files(path_files,".tif", full.names=TRUE))
x <- ref_oli(bands, sun.elev= 67.97)
NBR2(x[[6]], x[[7]])
}
\references{
\url{https://www.usgs.gov/core-science-systems/nli/landsat/landsat-surface-reflectance-derived-spectral-indices}.
\url{https://www.geo.university/pages/spectral-indices-with-multispectral-satellite-data}.
}
|
ba87c24872289a890ad03a21a5c0cdab2f719fce
|
a6dbd574caa4be4796ee99765b8e43e3c3b6d838
|
/man/region.Rd
|
844d8aa602ab87edc46af4d51ccdf036359ec919
|
[] |
no_license
|
heike/rotations
|
2356a863c2acfe6a70c85b358b64c3bfac3b1ce8
|
0040f4f673b328aa629f396d09365a76f935ac8e
|
refs/heads/master
| 2020-05-18T00:06:21.259519
| 2013-06-18T20:53:32
| 2013-06-18T20:53:32
| 4,933,660
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,085
|
rd
|
region.Rd
|
\name{region}
\alias{region}
\alias{region.Q4}
\alias{region.SO3}
\title{Confidence Region for Mean Rotation}
\usage{
region(Qs, method, alpha, ...)
\method{region}{Q4} (Qs, method, alpha, ...)
\method{region}{SO3} (Rs, method, alpha, ...)
}
\arguments{
\item{Rs,Qs}{A \eqn{n\times p}{n-by-p} matrix where each
row corresponds to a random rotation in matrix (p=9) or
quaternion form (p=4)}
\item{method}{Character string specifying which type of
interval is required}
\item{alpha}{The alpha level desired, e.g. 0.95 or 0.90}
\item{...}{Additional arguments}
}
\value{
radius of the confidence region centered at the projected
mean
}
\description{
Find the radius of a \eqn{100\alpha%} confidence region
for the projected mean
}
\examples{
Rs<-ruars(20,rcayley,kappa=100)
region(Qs,method='rancourt',alpha=0.9)
}
\references{
Rancourt D, Rivest L and Asselin J (2000). "Using
orientation statistics to investigate variations in human
kinematics." _Journal of the Royal Statistical Society:
Series C (Applied Statistics)_, *49*(1), pp. 81-94.
}
|
75f6e64a80d5b68b4c5b8705398469d41dca3ec8
|
f88009980e71fda893d0b0d9315eb9091e435eb1
|
/r-scripts/mkt1_T_posteriorplot.R
|
74903d386b983043efb8e6883d85eb4ae13a9d24
|
[] |
no_license
|
samiramarx/Bayesian-elasticities
|
5ac12c5b263e12c41e28784b23bed6b765d1ef91
|
8c82ba1917cb4c54797cb71b2b6f00d671835181
|
refs/heads/master
| 2020-03-28T15:50:24.994884
| 2018-09-16T14:52:36
| 2018-09-16T14:52:36
| 148,630,953
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,837
|
r
|
mkt1_T_posteriorplot.R
|
#=========================================
# MSc Transporte Economics
# TRAN 5911 Dissertation
# Samira Marx
#=========================================
#--------------------------------------
# library
#--------------------------------------
library(xtable)
library(ggplot2)
library(plyr)
library(dplyr)
library(reshape2)
#--------------------------------------
# PLOTTING POSTERIOR DISTRIBUTIONS
#V2F
load("bayes_simulation_mkt1_v2f_T.RData")
#rename model components
names(mkt1_v2f)[1] <- "Constant"
names(mkt1_v2f)[2] <- "f_2F"
names(mkt1_v2f)[3] <- "f_2R"
names(mkt1_v2f)[4] <- "g"
names(mkt1_v2f)[5] <- "gamma"
names(mkt1_v2f)[6] <- "sigma"
names(mkt1_v2f)
ref_values <- data_frame(variable= c("Constant", "f_2F", "f_2R", "g", "gamma"),
ols_value = c(9.13, -1.28,0.17, 0.49, -0.98),
mean_value = c(6.10, -1.33, 0.17, 0.77, -0.89),
lower_bound = c(4.97, -1.42, 0.07, 0.67, -0.98),
upper_bound = c(7.11, -1.23, 0.26, 0.87, -0.80))
# Extract params as a (draws * number of chains * number of params) array
temp <- rstan::extract(mkt1_v2f, permuted = F, pars = c("beta")) %>%
# Stack the chains on top of one another and drop the chains label
plyr::adply(2) %>%
dplyr::select(-chains) %>%
# Convert from wide form to long form (stack the columns on one another)
melt() %>%
# Perform a left join with the known parameters
left_join(ref_values, by = "variable")
temp$variable = factor(temp$variable, levels=c('f_2F','f_2R', 'g','gamma', 'Constant'))
temp %>%
# Generate the plot
ggplot(aes(x = value)) +
geom_density(fill = "grey", alpha = 0.5) + # Make it pretty
facet_wrap(~ variable, scales = "free", ncol=3) +
geom_vline(aes(xintercept = mean_value)) +
geom_vline(aes(xintercept = lower_bound), linetype="dotted")+
geom_vline(aes(xintercept = upper_bound), linetype="dotted")+
geom_vline(aes(xintercept = ols_value), colour ="red")+
xlab("") + ylab("Density")
# theme(axis.text.y=element_blank(), axis.ticks.y=element_blank())
# ggtitle("Actual parameters and estimates\ncorrectly specified model\n")
#V2R
load("bayes_simulation_mkt1_v2r_T.RData")
#rename model components
names(mkt1_v2r)[1] <- "Constant"
names(mkt1_v2r)[2] <- "f_2F"
names(mkt1_v2r)[3] <- "f_2R"
names(mkt1_v2r)[4] <- "g"
names(mkt1_v2r)[5] <- "gamma"
names(mkt1_v2r)[6] <- "sigma"
names(mkt1_v2r)
ref_values <- data_frame(variable= c("Constant", "f_2F", "f_2R", "g", "gamma"),
ols_value = c(17.58, 0.81, -0.89, -0.52, -1.06),
mean_value = c(11.27, 0.70, -0.89, 0.06, -0.88),
lower_bound = c(10.32, 0.61, -1, 0.00, -0.97),
upper_bound = c(12.06, 0.80, -0.80, 0.14, -0.80))
# Extract params as a (draws * number of chains * number of params) array
temp <- rstan::extract(mkt1_v2r, permuted = F, pars = c("beta")) %>%
# Stack the chains on top of one another and drop the chains label
plyr::adply(2) %>%
dplyr::select(-chains) %>%
# Convert from wide form to long form (stack the columns on one another)
melt() %>%
# Perform a left join with the known parameters
left_join(ref_values, by = "variable")
temp$variable = factor(temp$variable, levels=c('f_2F','f_2R','g','gamma', 'Constant'))
temp %>%
# Generate the plot
ggplot(aes(x = value)) +
geom_density(fill = "grey", alpha = 0.5) + # Make it pretty
facet_wrap(~ variable, scales = "free", ncol=3) +
geom_vline(aes(xintercept = mean_value)) +
geom_vline(aes(xintercept = lower_bound), linetype="dotted")+
geom_vline(aes(xintercept = upper_bound), linetype="dotted")+
geom_vline(aes(xintercept = ols_value), colour ="red")+
xlab("") + ylab("Density")
# ggtitle("Actual parameters and estimates\ncorrectly specified model\n")
|
3faa534cb63f9a25198a691ca977c6618151da3c
|
6270ce269c59f9dbcbd27980ad4621c08a442681
|
/activity7/activity7_script.r
|
07fde9603ee38c8ae85186770d33ff96ad58ae17
|
[] |
no_license
|
Jbeniers/ENVST206
|
13f23145550b37d752a06a63f373b12fbcdf06cc
|
ec8fa5e11fc2b26a91b91bf67672e76a2aa2a0be
|
refs/heads/master
| 2023-01-23T20:38:59.676015
| 2020-11-24T18:10:43
| 2020-11-24T18:10:43
| 292,412,106
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
activity7_script.r
|
## Activity 7 ##
## Question 1 ## (Answered in word doc)
## Question 2 ## (Answered in word doc)
## Question 3 ## (Answered in word doc)
## Question 4 ## (Answered in word doc)
## Question 5 ##
NYDrought <- read.csv("/Users/JonBeniers/Desktop/ENVST206/Activity 7 Data/dm_export_20191022_20201022.csv")
# Mean & SD of NY D1 level drought from 10/22/2019 to 10/22/2020
mean(NYDrought[,5])
sd(NYDrought[,5])
# plot of NY D1 level drought from 10/22/2019 to 10/22/2020
plot(NYDrought[,5],
type = "b",
pch = 19,
ylab = "Cumulative % Area of NY in D1 Level of Drought",
xlab = "53 Observations Recorded between 10/22/2019 - 10/22/2020 ")
## Question 6 ## (Answered in word doc)
## Question 7 ## (Answered in word doc)
## Question 8 ## (Answered in word doc)
|
3fd2e6206144540ffaba03d3008d9b5635cf30cc
|
12ccdd95c02ca34822d0a3862a28bb17170955f5
|
/R/cm.R
|
656ee79437ebb8034d6b36f71f4b5460c155af91
|
[] |
no_license
|
cran/iRegression
|
d156b210e062f4548d5cd1fa8fc64cb00832b4f1
|
4d2c16e000ed423412f980b5fe7ff27519216fe3
|
refs/heads/master
| 2021-01-19T01:41:43.944423
| 2016-07-18T20:09:24
| 2016-07-18T20:09:24
| 17,696,732
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,144
|
r
|
cm.R
|
cm <- function(formula1,formula2, data, ...){
UseMethod("cm")
}
cm.default <- function (formula1, formula2, data,...)
{
cmEst <- function(x1, x2, y1, y2)
{
xpm <- (x1+x2)/2
xh <- x2-x1
ypm <- (y1+y2)/2
yh <- y2-y1
qxpm <- qr(xpm)
coef <- solve.qr(qxpm, ypm)
df <- nrow(xpm)-ncol(xpm)
sigma2 <- sum((ypm - xpm%*%coef)^2)/df
vcov <- sigma2 * chol2inv(qxpm$qr)
colnames(vcov) <- rownames(vcov) <- colnames(xpm)
list(coefficients = coef,
vcov = vcov,
sigma = sqrt(sigma2),
df = df)
}
mf1 <- model.frame(formula=formula1,data=data)
x1 <- model.matrix(attr(mf1, "terms"), data=mf1)
y1 <- model.response(mf1)
mf2 <- model.frame(formula=formula2,data=data)
x2 <- model.matrix(attr(mf2, "terms"), data=mf2)
y2 <- model.response(mf2)
x1 <- as.matrix(x1)
x2 <- as.matrix(x2)
y1 <- as.numeric(y1)
y2 <- as.numeric(y2)
est <- cmEst(x1, x2, y1, y2)
est$fitted.values.l <- as.vector(x1%*%est$coefficients)
est$fitted.values.u <- as.vector(x2%*%est$coefficients)
est$residuals.l <- y1 - est$fitted.values.l
est$residuals.u <- y2 - est$fitted.values.u
est$call <- match.call()
class(est) <- "cm"
est
}
print.cm <- function(x, ...)
{
cat("Call:\n")
print(x$call)
cat("\n")
names(x$coefficients)[-1] <- seq(1,length(names(x$coefficients)[-1]))
print(list(coefficients = x$coefficients,
sigma = x$sigma, df = x$df))
}
summary.cm <- function(object, ...)
{
rmse.l <- sqrt(mean(object$residuals.l^2))
rmse.u <- sqrt(mean(object$residuals.u^2))
se <- sqrt(diag(object$vcov))
tval <- coef(object) / se
TAB <- cbind(Estimate = coef(object),
StdErr = se)
res <- list(call=object$call,
coefficients=TAB,
RMSE.l = rmse.l,
RMSE.u = rmse.u)
class(res) <- "summary.cm"
res
}
print.summary.cm <- function(x, ...)
{
cat("Call:\n")
print(x$call)
cat("\n")
rownames(x$coefficients)[-1] <- seq(1,length(rownames(x$coefficients)[-1]))
print(x$coefficients)
cat("\n")
cat("RMSE.L:\n")
print(x$RMSE.l)
cat("RMSE.U:\n")
print(x$RMSE.u)
}
fitted.cm <- function(object, ...)
{
fit.Min <- object$fitted.values.l
fit.Max <- object$fitted.values.u
ftd <- cbind(fit.Min,
fit.Max)
fitted <- round(ftd, digits=3)
class(fitted) <- "fitted.cm"
fitted
}
residuals.cm <- function (object, ...)
{
resid.Min <- object$residuals.l
resid.Max <- object$residuals.u
resi <- cbind(resid.Min,resid.Max)
resi <- round(resi,digits=3)
class(resi) <- "residuals.cm"
resi
}
cm.formula <- function(formula1,formula2,data=list(),...)
{
mf1 <- model.frame(formula=formula1,data=data)
x1 <- model.matrix(attr(mf1, "terms"), data=mf1)
y1 <- model.response(mf1)
mf2 <- model.frame(formula=formula2,data=data)
x2 <- model.matrix(attr(mf2, "terms"), data=mf2)
y2 <- model.response(mf2)
est <- cm.default(formula1, formula2, data,...)
est$call <- match.call()
est$formula1 <- formula1
est$formula2 <- formula2
est
}
|
c8f7b3e252ac60474ab4baf3bdbd1b360511942e
|
4deb3ec8ef4f5f24b0a9fe82c6802c61e87e46ec
|
/Supervised Learning in R - Classification.R
|
e748407910cd610c2719f6eedc04aee15e27ecb7
|
[] |
no_license
|
wcaughey1984/datacamp
|
52647f52867093626f80999faad227e0123a11e2
|
f950a6b4a3e0a7283591dacd6956cf78c8e6dfd7
|
refs/heads/master
| 2023-02-23T12:24:09.351470
| 2021-01-27T16:00:53
| 2021-01-27T16:00:53
| 282,528,594
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,489
|
r
|
Supervised Learning in R - Classification.R
|
#
# Title: Supervised Learning in R - Classification
# Purpose: (Personal Development) Code for the Supervised Learning in R Course
# Author: Billy Caughey
# Date: 2021.01.26 - Initial Build
#
##### Chapter 1 - k-Nearest Neighbors #####
# Classification: tasks that require predicting a categorical outcome
# Nearest Neighbors: points that look like each other should have the same type
# I can use the euclidean distance to determine similarity distances
# library(class) - library for knn
# pred <- knn(training_data, testing_data, training_labels)
##### Recognizing a road sign with kNN #####
# Load the 'class' package
library(class)
# Create a vector of labels
sign_types <- signs$sign_type
# Classify the next sign observed
knn(train = signs[-1], test = next_sign, cl = sign_types)
##### Exploring the traffic sign dataset #####
# Examine the structure of the signs dataset
str(signs)
# Count the number of signs of each type
table(signs$sign_types)
# Check r10's average red level by sign type
aggregate(r10 ~ sign_type, data = signs, mean)
##### Classigying a collection of road signs #####
# Use kNN to identify the test road signs
sign_types <- signs$sign_type
signs_pred <- knn(train = signs[-1], test = test_signs[-1], cl = signs$sign_type)
# Create a confusion matrix of the predicted versus actual values
signs_actual <- test_signs[1]
table(signs_actual, signs_pred[1])
# Compute the accuracy
mean(signs_actual == signs_pred)
##### What about the 'k' in kNN? #####
# It is not always the case that large k's are better
# No universal pattern for deciding k
##### Testing other 'k' values #####
# Compute the accuracy of the baseline model (default k = 1)
k_1 <- knn(train = signs, test = signs_test, cl = sign_types)
mean(k_1 == signs_test[1])
# Modify the above to set k = 7
k_7 <- knn(train = signs, test = signs_test, cl = signs$sign_type, k = 7)
mean(k_7 == signs_test)
# Set k = 15 and compare to the above
k_15 <- knn(train = signs, test = signs_test, cl = signs$sign_type, k = 15)
mean(k_15 == signs_test)
##### Seeing how the neighbors voted #####
# Use the prob parameter to get the proportion of votes for the winning class
sign_pred <- knn(train = signs[-1], test = signs_test[-1], cl = signs$sign_type, k = 7, prob = TRUE)
# Get the "prob" attribute from the predicted classes
sign_prob <- attr(sign_pred, "prob")
# Examine the first several predictions
head(sign_pred)
# Examine the proportion of votes for the winning class
head(sign_prob)
##### Data Preparation for kNN #####
# kNN is in NUMERIC FORMAT!
# I will need to scale variables in the data set
##### Understanding Bayesian Methods #####
# Bayesian methods are proability rules based on historical data
# P(A|B) = P(A and B) / P(B)
##### Computing Probabilities #####
# Compute P(A)
p_A <- nrow(subset(where9am, location == "office")) / nrow(where9am)
# Compute P(B)
p_B <- nrow(subset(where9am, daytype == "weekday")) / nrow(where9am)
# Compute the observed P(A and B)
p_AB <- nrow(subset(where9am, location == "office", daytype == "weekday")) / nrow(where9am)
# Compute P(A | B) and print its value
p_A_given_B <- p_AB / p_B
p_A_given_B
##### A simple Naive Bayes location Model #####
# Load the naivebayes package
library(naivebayes)
# Build the location prediction model
locmodel <- naive_bayes(location ~ daytype, data = where9am)
# Predict Thursday's 9am location
predict(locmodel, thursday9am)
# Predict Saturdays's 9am location
predict(locmodel, saturday9am)
##### Examining Raw Probabilities #####
# The 'naivebayes' package is loaded into the workspace
# and the Naive Bayes 'locmodel' has been built
# Examine the location prediction model
locmodel
# Obtain the predicted probabilities for Thursday at 9am
predict(locmodel, thursday9am , type = "prob")
# Obtain the predicted probabilities for Saturday at 9am
predict(locmodel, saturday9am , type = "prob")
##### Understanding NB's "naivety" #####
# A naive simplification: The algorithm assumes the events are independent
# The naive assumption doesn't necessarily hold in the real world...
# But the naive model holds true in many really events.
# The Laplace correction is used when a joint probability is 0 in the chain
##### A more sophisticated location model #####
# The 'naivebayes' package is loaded into the workspace already
# Build a NB model of location
locmodel <- naive_bayes(location ~ daytype + hourtype, locations)
# Predict Brett's location on a weekday afternoon
predict(locmodel, weekday_afternoon)
# Predict Brett's location on a weekday evening
predict(locmodel, weekday_evening)
##### Preparing for unforeseen circumstances #####
# The 'naivebayes' package is loaded into the workspace already
# The Naive Bayes location model (locmodel) has already been built
# Observe the predicted probabilities for a weekend afternoon
predict(locmodel, weekend_afternoon, type = "prob")
# Build a new model using the Laplace correction
locmodel2 <- naive_bayes(location ~ daytype + hourtype,
locations,
laplace = 1)
# Observe the new predicted probabilities for a weekend afternoon
predict(locmodel2, weekend_afternoon, type = "prob")
##### Applying Naive Bayes to other problems #####
# naive bayes works best when I have to consider large data
# naive bayes works well in text data
# binning is the way to bin continuous data
# for text data, we use "bag of words"
##### Making binary predictions with regression #####
# Logistic regression for binary outcomes
##### Building simple logistic regression models #####
# Examine the dataset to identify potential independent variables
str(donors)
# Explore the dependent variable
table(donors$donated)
# Build the donation model
donation_model <- glm(donated ~ bad_address + interest_religion + interest_veterans,
data = donors, family = "binomial")
# Summarize the model results
summary(donation_model)
##### making a binary prediction #####
# Estimate the donation probability
donors$donation_prob <- predict(donation_model, type = "response")
# Find the donation probability of the average prospect
mean(donors$donated)
# Predict a donation if probability of donation is greater than average (0.0504)
donors$donation_pred <- ifelse(donors$donation_prob > 0.0504, 1, 0)
# Calculate the model's accuracy
mean(donors$donation_pred == donors$donated)
##### Model performance tradeoffs #####
# There are times a models accuracy is misleading
# ROC curves can help
# Shape of the ROC curve matters
##### Calculating ROC Curves and AUC ####
# Load the pROC package
library(pROC)
# Create a ROC curve
ROC <- roc(donors$donated, donors$donation_prob)
# Plot the ROC curve
plot(ROC, col = "blue")
# Calculate the area under the curve (AUC)
auc(ROC)
##### Dummy variables, missing data, and interactions #####
# Dummy Variables -> categories to binary
# Imputation Strategies -> mean, missing values
# Interaction Effect -> Combination of inputs have greater impact than inputs alone
##### Coding Categorical Features #####
# Convert the wealth rating to a factor
donors$wealth_levels <- factor(donors$wealth_rating,
levels = c(0, 1, 2, 3),
labels = c("Unknown", "Low", "Medium", "High"))
# Use relevel() to change reference category
donors$wealth_levels <- relevel(donors$wealth_levels, ref = "Medium")
# See how our factor coding impacts the model
summary(glm(donated ~ wealth_levels,
data = donors,
family = "binomial"))
##### Handling Missing Data #####
# Find the average age among non-missing values
summary(donors$age)
# Impute missing age values with the mean age
donors$imputed_age <- ifelse(is.na(donors$age),
round(mean(donors$age, na.rm = T),2),
donors$age)
# Create missing value indicator for age
donors$missing_age <- ifelse(is.na(donors$age), 1, 0)
##### Building a sophisticated model #####
# Build a recency, frequency, and money (RFM) model
rfm_model <- glm(donated ~ money + recency * frequency,
data = donors,
family = "binomial")
# Summarize the RFM model to see how the parameters were coded
summary(rfm_model)
# Compute predicted probabilities for the RFM model
rfm_prob <- predict(rfm_model, type = "response")
# Plot the ROC curve and find AUC for the new model
library(pROC)
ROC <- roc(donors$donated, rfm_prob)
plot(ROC, col = "red")
auc(ROC)
##### Automatic feature selection #####
# Backward Selection
# Forward Selection
# Neither backward or forward model may produce the 'best' model
# The outcome model may not be the same either
# Stepwise selection is built in the absence of domain knowledge
##### Buidling a stepwise regression model #####
# Specify a null model with no predictors
null_model <- glm(donated ~ 1, data = donors, family = "binomial")
# Specify the full model using all of the potential predictors
full_model <- glm(donated ~ ., data = donors, family = "binomial")
# Use a forward stepwise algorithm to build a parsimonious model
step_model <- step(null_model,
scope = list(lower = null_model, upper = full_model),
direction = "forward")
# Estimate the stepwise donation probability
step_prob <- predict(step_model, type = "response")
# Plot the ROC of the stepwise model
library(pROC)
ROC <- roc(donors$donated, step_prob)
plot(ROC, col = "red")
auc(ROC)
##### Making decisions with trees #####
# rpart = recursive partitioning
##### Building a simple decision tree #####
# Load the rpart package
library(rpart)
# Build a lending model predicting loan outcome versus loan amount and credit score
loan_model <- rpart(outcome ~ loan_amount + credit_score,
data = loans,
method = "class",
control = rpart.control(cp = 0))
# Make a prediction for someone with good credit
predict(loan_model, good_credit, type = "class")
# Make a prediction for someone with bad credit
predict(loan_model, bad_credit, type = "class")
##### Visualizaing Classification trees #####
# Examine the loan_model object
loan_model
# Load the rpart.plot package
library(rpart.plot)
# Plot the loan_model with default settings
rpart.plot(loan_model)
# Plot the loan_model with customized settings
rpart.plot(loan_model, type = 3, box.palette = c("red", "green"), fallen.leaves = TRUE)
##### Creating random test datasets #####
# Determine the number of rows for training
nrow(loans) * 0.75
# Create a random sample of row IDs
sample_rows <- sample(x = nrow(loans), 8484)
# Create the training dataset
loans_train <- loans[sample_rows,]
# Create the test dataset
loans_test <- loans[-sample_rows,]
##### Building and evaluating a larger tree #####
# Grow a tree using all of the available applicant data
loan_model <- rpart(outcome ~ .,
data = loans_train,
method = "class",
control = rpart.control(cp = 0, maxdepth = 6))
# Make predictions on the test dataset
loans_test$pred <- predict(loan_model,
loans_test,
type = "class")
# Examine the confusion matrix
table(loans_test$outcome, loans_test$pred)
# Compute the accuracy on the test dataset
mean(loans_test$outcome == loans_test$pred)
##### Tending to classification trees #####
# Pre-pruning - cleaning before the tree grows
# Post-pruning - cleaning after the tree has grown
##### Preventing overgrown trees #####
# Grow a tree with maxdepth of 6
loan_model <- rpart(outcome ~ .,
data = loans_train,
method = "class",
control = rpart.control(cp = 0, maxdepth = 6))
# Make a class prediction on the test set
loans_test$pred <- predict(loan_model,
loans_test,
type = "class")
# Compute the accuracy of the simpler tree
mean(loans_test$outcome == loans_test$pred)
# Swap maxdepth for a minimum split of 500
loan_model <- rpart(outcome ~ .,
data = loans_train,
method = "class",
control = rpart.control(cp = 0, minsplit = 500))
# Run this. How does the accuracy change?
loans_test$pred <- predict(loan_model, loans_test, type = "class")
mean(loans_test$pred == loans_test$outcome)
##### Creating a nicely pruned tree #####
# Grow an overly complex tree
loan_model <- rpart(outcome ~ .,
data = loans_train,
method = "class",
control = rpart.control(cp = 0))
# Examine the complexity plot
plotcp(loan_model)
# Prune the tree
loan_model_pruned <- prune(loan_model, cp = 0.0014)
# Compute the accuracy of the pruned tree
loans_test$pred <- predict(loan_model_pruned, loans_test, type = "class")
mean(loans_test$pred == loans_test$outcome)
#### Seeing the forest from the trees #####
# Random Forests vs Classification Trees
##### Building a random forest model #####
# Load the randomForest package
library(randomForest)
# Build a random forest model
loan_model <- randomForest(outcome ~ ., data = loans_train)
# Compute the accuracy of the random forest
loans_test$pred <- predict(loan_model,
loans_test,
type = "class")
mean(loans_test$pred == loans_test$outcome)
|
198fa38a32bfd1107265fedc252e6dd404f7e138
|
96119c7b733cb899f9b92696745a481d73ce10d8
|
/RandomForestSnowEmergency.R
|
cfa8f34c68e58316d72fe7d24e09d5f2630d86cf
|
[] |
no_license
|
claraj/snow_emergency
|
c58b52eda2b2bbf75fa3a2d5f75c15ba3fc8b01f
|
7e66c6ab0810b42133bc294ae38fe023df791252
|
refs/heads/master
| 2020-05-20T07:26:32.251562
| 2019-05-13T04:56:51
| 2019-05-13T04:56:51
| 185,451,165
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,367
|
r
|
RandomForestSnowEmergency.R
|
# Using Random Forest prediction on sample snow emergency data
library("randomForest")
library("raster")
setwd("/Users/student1/Development/r/snow_proj/data")
dataframe <- read.csv(file="SNOW_TAG_TOW_TYPES_TESTING_R_SCRIPT_FAKE_FAKE_FAKE_DATA.csv")
head(dataframe)
# Ward (1, 2, 3....), Tow_Zone (1 - 6), Day (1, 2, 3) are numerical and interpreted as numeric type.
# But here, they should be treated as categorical data, so convert to factors
dataframe$Ward <- factor(dataframe$Ward)
dataframe$Tow_Zone <- factor(dataframe$Tow_Zone)
# dataframe$Day <- factor(dataframe$Day)
# Create categories for driving distance and driving duration
# Help from http://rcompanion.org/handbook/E_05.html categorizing data
per_00 <- min(dataframe$distance)
per_25 <- quantile(dataframe$distance, 0.25)
per_50 <- quantile(dataframe$distance, 0.5)
per_75 <- quantile(dataframe$distance, 0.55)
per_100 <- max(dataframe$distance)
RB_DI <- rbind(per_00, per_25, per_50, per_75, per_100)
dimnames(RB_DI)[[2]] <- "Value"
dataframe$distanceCat[dataframe$distance >= per_00 & dataframe$distance < per_25] = 1
dataframe$distanceCat[dataframe$distance >= per_25 & dataframe$distance < per_50] = 2
dataframe$distanceCat[dataframe$distance >= per_50 & dataframe$distance < per_75] = 3
dataframe$distanceCat[dataframe$distance >= per_75 & dataframe$distance <= per_100] = 4
# dataframe$distanceCat <- factor(dataframe$distanceCat)
# Repeat for duration. Todo look up if there's a built-in way to do this in R
per_00 <- min(dataframe$duration)
per_25 <- quantile(dataframe$duration, 0.25)
per_50 <- quantile(dataframe$duration, 0.5)
per_75 <- quantile(dataframe$duration, 0.55)
per_100 <- max(dataframe$duration)
RB_DU <- rbind(per_00, per_25, per_50, per_75, per_100)
dimnames(RB_DU)[[2]] <- "Value"
dataframe$durationCat[dataframe$duration >= per_00 & dataframe$duration < per_25] = 1
dataframe$durationCat[dataframe$duration >= per_25 & dataframe$duration < per_50] = 2
dataframe$durationCat[dataframe$duration >= per_50 & dataframe$duration < per_75] = 3
dataframe$durationCat[dataframe$duration >= per_75 & dataframe$duration <= per_100] = 4
# Convert to factor
# dataframe$durationCat <- factor(dataframe$durationCat)
# Save categories
summary(dataframe)
write.csv(dataframe, "categorize_snow_emergency.csv")
# Run the random forest model with the columns given
random_forest <- randomForest( Type ~ Ward + Community + Day + Tow_Zone + STREET_TYPE + distanceCat + durationCat, data=dataframe, ntree=500, importance=TRUE, proximity=TRUE)
importance(random_forest)
# dev.off()
varImpPlot(random_forest)
# Create coordinates for dataframe, which converts dataframe to a SpatialPointsDataFrame
coordinates(dataframe) <- ~Longitude+Latitude
############### Creating predictive raster layer ###############
## Create rasters for each column of interest
# Extent of points in Minneapolis
lonMin <- -93.3275270000000035
lonMax <- -93.2050569999999965
latMin <- 44.8912320000000022
latMax <- 45.0509410000000017
cell_size <- 0.0005
ncols <- (( lonMax - lonMin) / cell_size) + 1
nrows <- (( latMax - latMin) / cell_size) + 1
ext <- extent(lonMin, lonMax, latMax, latMax)
# Works (?)
r_d <- raster(ncols=ncols, nrows=nrows, xmn=lonMin, xmx=lonMax, ymn=latMin, ymx=latMax)
day_raster = rasterize(dataframe, r_d, "Day", fun="min", filename="Day.tif", overwrite=TRUE)
plot(day_raster)
r_di <- raster(ncols=ncols, nrows=nrows, xmn=lonMin, xmx=lonMax, ymn=latMin, ymx=latMax)
distance_raster = rasterize(dataframe, r_di, "distanceCat", fun="min", filename="distanceCat.tif", overwrite=TRUE)
plot(distance_raster)
r_du <- raster(ncols=ncols, nrows=nrows, xmn=lonMin, xmx=lonMax, ymn=latMin, ymx=latMax)
duration_raster = rasterize(dataframe, r_du, "durationCat", fun=mean, filename="durationCat.tif", overwrite=TRUE)
# Everything else is a factor - how to convert to Raster? What value to write for factor's levels?
r_w <- raster(ncols=ncols, nrows=nrows, xmn=lonMin, xmx=lonMax, ymn=latMin, ymx=latMax)
ward_raster = rasterize(dataframe, r_w, "Ward", fun=function(x, na.rm) { max(as.numeric(x)) }, filename="Ward.tif", overwrite=TRUE)
plot(ward_raster)
r_t <- raster(ncols=ncols, nrows=nrows, xmn=lonMin, xmx=lonMax, ymn=latMin, ymx=latMax)
tow_zone_raster = rasterize(dataframe, r_t, "Tow_Zone", fun=function(x, na.rm) { max(as.numeric(x)) }, filename="Tow_Zone.tif", overwrite=TRUE)
r_c <- raster(ncols=ncols, nrows=nrows, xmn=lonMin, xmx=lonMax, ymn=latMin, ymx=latMax)
community_raster = rasterize(dataframe, r_c, "Community", fun=function(x, na.rm) { max(as.numeric(x)) }, filename="Community.tif", overwrite=TRUE)
r_s <- raster(ncols=ncols, nrows=nrows, xmn=lonMin, xmx=lonMax, ymn=latMin, ymx=latMax)
street_type_raster = rasterize(dataframe, r_s, "STREET_TYPE", fun=function(x, na.rm) { max(as.numeric(x)) }, filename="STREET_TYPE.tif", overwrite=TRUE)
raster_combo <- c(ward_raster, community_raster, day_raster, tow_zone_raster, street_type_raster, distance_raster, duration_raster)
for (r in raster_combo) {
extent(r) <- ext
}
raster_stack <- stack(raster_combo)
# set names
names(raster_stack) <- c("Ward", "Community", "Day", "Tow_Zone", "STREET_TYPE", "distanceCat", "durationCat")
predict_raster_layer <- predict(raster_stack, random_forest, "predictive_snow_emergency_raster.tif", overwrite=TRUE)
#dev.off()
plot(predict_raster_layer)
|
c63c28e6beb8bb5254423602cea4c258ce4ec68e
|
d7b1f6f13781ebf0daa817ac6e328513813db7e6
|
/man/wpd_get_views_for_year.Rd
|
3ce299237bd6b20720094dc2f29b0c6d0e5ea4b3
|
[] |
no_license
|
petermeissner/wikipediadumbs
|
2d381d09d1925c921f753b371b21236177b051f5
|
f8565d9796ee0273efede8f662809df251bafbf7
|
refs/heads/master
| 2020-08-06T11:10:49.340760
| 2018-11-20T09:07:50
| 2018-11-20T09:07:50
| 212,954,772
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 269
|
rd
|
wpd_get_views_for_year.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wpd_get_views_for_year.R
\name{wpd_get_views_for_year}
\alias{wpd_get_views_for_year}
\title{wpd_get_views_for_year}
\usage{
wpd_get_views_for_year()
}
\description{
wpd_get_views_for_year
}
|
62b59d3a06a40f8b906226c54f1e417e329ce658
|
7d3e7c1d0a80ac881103bef9c1ba8a2a8693fddd
|
/R_Tutorials/Data_Problems/figure_resfit.R
|
851a623bd27f28d37c9f5545d7a022f4be5c3d9f
|
[] |
no_license
|
wagafo/Applied_Statistics
|
aa6c7418e14bad7154b1405847147e962d42cd47
|
3d1b9fe9ec4f8187b1bcb80c0b7a2cfa75b1460a
|
refs/heads/master
| 2021-06-26T21:30:43.063498
| 2020-11-22T12:14:44
| 2020-11-22T12:14:44
| 184,111,016
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 251
|
r
|
figure_resfit.R
|
parold <- par()
par(mfrow= c(1,2))
x <- runif(100, 0,100)
y <- 3 + 170*x + rnorm(100,0,1000)
fit <- lm(y~x)
plot(fit, which=1, main="Case A")
y <- 3 + 170*x - 2*x^2 + rnorm(100,0,1000)
fit <- lm(y~x)
plot(fit, which=1, main="Case B")
par <- parold
|
284ead0f6c2a5eb8f63719488cf41e4a446f89a2
|
4720a0a69ff2b9aa90440339b359f6ba4cdd873c
|
/tests/testthat/test-tau.estimate.R
|
060ef38e9e8f7e99ab71b6c2c62faad86a6025f3
|
[] |
no_license
|
llrs/RGCCA
|
170173c99d0bf686bd6f98206dd09b94f708a46f
|
8a208d581b9dca50b8616938657b0aca250d7fcf
|
refs/heads/master
| 2021-12-11T20:59:17.240348
| 2021-09-17T15:37:29
| 2021-09-17T15:37:29
| 101,992,036
| 2
| 1
| null | 2019-04-26T12:59:04
| 2017-08-31T11:05:13
|
R
|
UTF-8
|
R
| false
| false
| 577
|
r
|
test-tau.estimate.R
|
test_that("tau.estimate", {
data("Russett")
expect_equal(tau.estimate(Russett), 0.114279478757313)
})
test_that("implementations are coherent", {
data(Russett)
set.seed(45791)
X_agric <- as.matrix(Russett[, c("gini", "farm", "rent")])
s <- scale2_(X_agric, bias = TRUE)
t1 <- tau.estimate(s)
expect_true(t1 != 1)
})
test_that("test scale_col", {
set.seed(45792)
n.obs <- 150
n.vars <- 2
x <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)
out <- scale_col(x)
y <- scale(x, center = TRUE, scale = TRUE)
expect_equal(out, y, ignore_attr = TRUE)
})
|
728dfef92728de7fa00ca49ebb31d4881e3e844a
|
9c0b7c02ea33811c75ea02a020bbfc31e1a6724a
|
/gconcordopt/R/wrappers.R
|
4e2260c5aeb39a9f0bb963281343d6d4d88119f8
|
[] |
no_license
|
hyoucs/heteropc
|
e458afbe807e1b48d875213b64d06560ddaf331e
|
25d5516b479932955fbbb194c1db2e736925eaad
|
refs/heads/master
| 2021-04-26T22:43:36.958703
| 2018-07-23T06:11:55
| 2018-07-23T06:11:55
| 124,137,515
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,568
|
r
|
wrappers.R
|
#######################################
## CONCORD - ISTA
#######################################
#' @useDynLib gconcordopt
#' @export
concordista_e <- function(D, lam, pMat, penalize.diagonal = 0L, BBstep = 0L,
tol = 1e-5, maxit = 100L, info = FALSE,
trkeigs = FALSE, DisS = FALSE, en = 1.0, ...) {
soln <- .Call('gconcordopt_cc_elastic_ista', PACKAGE = 'gconcordopt',
D, lam, pMat, as.integer(penalize.diagonal), as.integer(BBstep),
tol, maxit, as.integer(info), as.integer(trkeigs), as.integer(DisS),
as.double(en))
soln$omega <- as(soln$omega, 'symmetricMatrix')
output <- structure(soln$omega,
lambda = lam,
iter = soln$itr,
misc = soln$info)
output
}
#' @useDynLib gconcordopt
#' @export
concordista <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 0L, BBstep = 0L,
tol = 1e-5, maxit = 100L, info = FALSE,
trkeigs = FALSE, DisS = FALSE, ...) {
p <- ncol(D)
soln <- .Call('gconcordopt_cc_ista', PACKAGE = 'gconcordopt',
D, lam, pMat, as(X0, 'dgCMatrix'),
as.integer(penalize.diagonal), as.integer(BBstep),
tol, maxit, as.integer(info), as.integer(trkeigs), as.integer(DisS))
soln$omega <- as(soln$omega, 'symmetricMatrix')
output <- structure(soln$omega,
lambda = lam,
iter = soln$itr,
misc = soln$info)
output
}
#' @useDynLib gconcordopt
#' @export
concordista_0_0 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 0L, BBstep = 0L, ...) {
concordista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(BBstep), ...)
}
#' @useDynLib gconcordopt
#' @export
concordista_1_0 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 1L, BBstep = 0L, ...) {
concordista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(BBstep), ...)
}
#' @useDynLib gconcordopt
#' @export
concordista_0_1 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 0L, BBstep = 1L, ...) {
concordista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(BBstep), ...)
}
#' @useDynLib gconcordopt
#' @export
concordista_1_1 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 1L, BBstep = 1L, ...) {
concordista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(BBstep), ...)
}
#######################################
## CONCORD - FISTA
#######################################
#' @useDynLib gconcordopt
#' @export
concordfista <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 0L, steptype = 0L,
tol = 1e-5, maxit = 100L, info = FALSE,
trkeigs = FALSE, DisS = FALSE, ...) {
soln <- .Call('gconcordopt_cc_fista', PACKAGE = 'gconcordopt',
D, lam, pMat, as(X0, 'dgCMatrix'), as.integer(penalize.diagonal), as.integer(steptype),
tol, maxit, as.integer(info), as.integer(trkeigs),
as.integer(DisS))
soln$omega <- as(soln$omega, 'symmetricMatrix')
output <- structure(soln$omega,
lambda = lam,
convcond = soln$convcond,
iter = soln$itr,
misc = soln$info)
output
}
#' @useDynLib gconcordopt
#' @export
concordfista_0_0 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 0L, steptype = 0L, ...) {
concordfista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(steptype), ...)
}
#' @useDynLib gconcordopt
#' @export
concordfista_0_1 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 0L, steptype = 1L, ...) {
concordfista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(steptype), ...)
}
#' @useDynLib gconcordopt
#' @export
concordfista_0_2 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 0L, steptype = 2L, ...) {
concordfista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(steptype), ...)
}
#' @useDynLib gconcordopt
#' @export
concordfista_1_0 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 1L, steptype = 0L, ...) {
concordfista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(steptype), ...)
}
#' @useDynLib gconcordopt
#' @export
concordfista_1_1 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 1L, steptype = 1L, ...) {
concordfista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(steptype), ...)
}
#' @useDynLib gconcordopt
#' @export
concordfista_1_2 <- function(D, lam, pMat, X0=as(diag(ncol(D)), 'dgCMatrix'),
penalize.diagonal = 1L, steptype = 2L, ...) {
concordfista(D, lam, pMat, X0, as.integer(penalize.diagonal), as.integer(steptype), ...)
}
|
baa4d20fb671d0742b3d2b4bf012434e3c703302
|
ac1105166bd4a3616f41d7c9308de4f7ea07afdc
|
/AfricaSoil/xgboostModel.R
|
36ab408f30faa7061abbec3750d00f4651eb4eb6
|
[] |
no_license
|
Shmekor/Kaggle
|
5b6e94f6955bfaaeb164a9b34c2193ad1d0405bc
|
a51832a026633d03689e94e5c695757decc91534
|
refs/heads/master
| 2021-01-13T02:37:57.198732
| 2014-09-06T14:04:18
| 2014-09-06T14:04:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,130
|
r
|
xgboostModel.R
|
kaggle.africa.xgboostModel.train <- function(ds, sFeatures, ...) {
trainModel <- function(tDS, target) {
dtrain <- xgb.DMatrix(as.matrix(tDS), label = target)
model <- xgboost(data = dtrain, objective = "reg:linear", eval_metric='rmse', ...)
model
}
result <- list()
result$sFeatures <- names(ds)[sFeatures]
result$caModel <- trainModel(ds[,result$sFeatures], ds$Ca)
result$pModel <- trainModel(ds[,result$sFeatures], ds$P)
result$pHModel <- trainModel(ds[,result$sFeatures], ds$pH)
result$socModel <- trainModel(ds[,result$sFeatures], ds$SOC)
result$sandModel <- trainModel(ds[,result$sFeatures], ds$Sand)
result
}
kaggle.africa.xgboostModel.predict <- function(model, ds) {
result <- data.frame(PIDN=ds$PIDN)
result$Ca <- predict(model$caModel, as.matrix(ds[,model$sFeatures]))
result$P <- predict(model$pModel, as.matrix(ds[,model$sFeatures]))
result$pH <- predict(model$pHModel, as.matrix(ds[,model$sFeatures]))
result$SOC <- predict(model$socModel, as.matrix(ds[,model$sFeatures]))
result$Sand <- predict(model$sandModel, as.matrix(ds[,model$sFeatures]))
result
}
|
99a084599620f8de631d930ac4a8350bf4d52ed1
|
26f5865f149e0e7f87c074c035ebb10fc1bf3800
|
/man/rfe_result.Rd
|
a1de9c42fb75057556369d4f2d34351d2768b209
|
[] |
no_license
|
elpidiofilho/labgeo
|
bfa3580b67f0f777f1c28e82dd826ee6ae89da32
|
aebd001557b18832a2a6080c54892a3262a45162
|
refs/heads/master
| 2021-01-22T12:38:39.902086
| 2018-08-11T13:42:37
| 2018-08-11T13:42:37
| 102,354,346
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 600
|
rd
|
rfe_result.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recursive_feature_elimination.R
\name{rfe_result}
\alias{rfe_result}
\title{Recursive Feature Elimination plot results}
\usage{
rfe_result(fit.rfe)
}
\arguments{
\item{fit.rfe}{Results from recursive_feature_elimination function}
}
\description{
This function plot results of Recursive Feature Elimination (RFE)
}
\details{
details
}
\examples{
\dontrun{
rfe_result(fit.rfe)
}
}
\author{
Elpidio Filho, \email{elpidio@ufv.br}
}
\keyword{Elimination}
\keyword{Feature}
\keyword{Recursive}
\keyword{plot}
\keyword{results}
|
ce3ed0836af27eedefd79c2b5ad614abbee7449a
|
9e7c75c97ab27056531f68ec0990e448d1740eea
|
/R/rle2.R
|
dcd94aef219dff8d5828fd174d3a2a8076b4745b
|
[] |
no_license
|
AkselA/R-ymse
|
e3a235eca7b6b8368c6775af33295254d0ecb093
|
1d66631cc7e0cd36b38abcaee5d8111573723588
|
refs/heads/master
| 2020-03-28T12:42:24.854132
| 2020-03-19T11:25:13
| 2020-03-19T11:25:13
| 148,326,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,329
|
r
|
rle2.R
|
#' Tabulate data
#'
#' Quick and simple function for creating contingency tables
#'
#' @param x a vector or factor object
#' @param na.rm should \code{NA}s be included
#' @param order how should the results be ordered, if any?
#'
#' @return
#' A \code{data.frame} with columns \code{val} (the original values and class
#' of \code{x}) and \code{freq} (the count, or frequency, of each value in
#' \code{x}, integer). The rows are sorted by frequency in descending order.
#'
#' @export
#'
#' @examples
#' set.seed(1)
#' m <- sample(c(rep(NA, 5), rpois(45, 3)))
#' quick_table(m)
#'
#' x <- LETTERS[c(2, 2, 2, 2, 3, 1, 1)]
#' quick_table(x, order="freq")
#' quick_table(x, order="value")
#' quick_table(x, order="none")
quick_table <- function(x, na.rm=FALSE, order=c("frequency", "value", "none")) {
if (na.rm) {
x <- x[!is.na(x)]
}
ux <- unique(x)
freq <- tabulate(match(x, ux))
dtf <- switch(match.arg(order),
frequency=data.frame(val=ux, freq, stringsAsFactors=FALSE)[order(-freq),],
value=data.frame(val=ux, freq, stringsAsFactors=FALSE)[order(ux),],
none=data.frame(val=ux, freq, stringsAsFactors=FALSE))
rownames(dtf) <- NULL
dtf
}
#' Run Length Encoding
#'
#' Compute the lengths and values of runs of equal values in a vector
#'
#' @param x a numeric or character vector
#' @param na.unique should every \code{NA} be conidered unique?
#' @param output what form of output
#'
#' @return
#' Return value depends on \code{output}.
#' \describe{
#' \item{\code{data.frame}}{A data.frame with lengths and values columns}
#' \item{\code{rle}}{An object of class \code{"rle"}}
#' \item{\code{named vector}}{A vector of lengths with values as names}
#' \item{\code{lengths}}{The lengths as a single vector}
#' \item{\code{values}}{The values as a single vector}
#' }
#'
#' @export
#'
#' @examples
#'
#' x <- c(NA, NA, 1, 2, 3, 3, NA, NA, NA, 2, 2, 2, NA, 1, 1, NA, NA)
#' rle2(x)
#'
#' m <- matrix(c(
#' 0.7, 0.2, 0.1,
#' 0.2, 0.6, 0.2,
#' 0.1, 0.2, 0.7
#' ), 3, byrow=TRUE)
#'
#' set.seed(1)
#' y <- LETTERS[markov_seq(n=100, m)]
#' rle2(y, out="named")
#'
#'
#' # Same result as rle
#' rle2(x, na.unique=TRUE, output="rle")
#' rle(x)
#'
#' # inverse.rle works as long as output is "rle"
#' inverse.rle(rle2(x, output="rle"))
rle2 <- function (x, na.unique=FALSE,
output=c("data.frame", "rle", "named vector", "lengths", "values")) {
if (!is.vector(x) && !is.list(x))
stop("'x' must be a vector of an atomic type")
n <- length(x)
if (n == 0L)
return(structure(list(lengths=integer(), values=x), class="rle"))
y <- x[-1L] != x[-n]
if (!na.unique) {
yna <- !(is.na(x[-1L]) & is.na(x[-n]))
y[is.na(y)] <- TRUE
i <- c(which(y & yna), n)
} else {
i <- c(which(y | is.na(y)), n)
}
lengths <- diff(c(0L, i))
values <- x[i]
if (length(output) > 1) {
data.frame(lengths, values, stringsAsFactors=FALSE)
} else {
output <- match.arg(output)
switch(output,
"data.frame"=data.frame(lengths, values, stringsAsFactors=FALSE),
"named vector"={
names(lengths) <- values
lengths
},
"rle"=structure(list(lengths=lengths, values=values), class="rle"),
"lengths"=lengths,
"values"=values
)
}
}
|
ce95972cb7311080838a69c8d68390994f8e42b6
|
c29a2534fb4e5224d49d3fed5e23f6c86987d055
|
/R/moda.R
|
a9380ed17a5f4f1c9b26156f0e3cb182b04712e1
|
[] |
no_license
|
ddeweerd/MODifieRDev
|
8c1ae2cd35c297a5394671e05d3198b6f3b6fcf8
|
5660de4df282b57cd2da20e8fe493e438019b759
|
refs/heads/Devel
| 2020-03-28T18:37:56.271549
| 2019-11-07T10:45:09
| 2019-11-07T10:45:09
| 148,896,901
| 4
| 0
| null | 2019-08-20T14:14:35
| 2018-09-15T11:42:06
|
R
|
UTF-8
|
R
| false
| false
| 5,214
|
r
|
moda.R
|
#' MODA
#'
#' An implementation of MODA co-expression based algorithm.
#' @inheritParams clique_sum_permutation
#' @inheritParams MODA::CompareAllNets
#' @inheritParams MODA::WeightedModulePartitionHierarchical
#' @param group_of_interest Numerical value denoting which group contains the condition of interest (1 or 2)
#'
#' @details
#' This implementation follows a workflow as described in the MODA vignette. First,
#' two separate networks are constructed, a background network containing expression
#' data from all samples and a condition specific network consisting of all samples minus
#' the condition specific samples.
#' Then, hierarchical clustering is performed and cutting height estimated from either
#' maximal average \code{density} or \code{modularity}
#'
#' \emph{Condition} specific co-expression modules are then extracted
#' using the Jaccard index and \code{specificTheta}.
#'
#' The final module will consist of the co-expression module that has the minimal
#' Jaccard index complemented by co-expression modules that have a Jaccard index
#' below this minimal + \code{specificTheta}
#'
#' After analysis, the \code{specificTheta}
#' and thereby the disease module can be adjusted using
#' \code{\link{moda_change_specific_threshold}}
#'
#' @return moda returns an object of class "MODifieR_module" with subclass "MODA".
#' This object is a named list containing the following components:
#' \item{module_genes}{A character vector containing the genes in the final module}
#' \item{group1_modules}{A list containing all co-expression modules in the background network}
#' \item{group2_modules}{A list containing all co-expression modules in the condition specific network}
#' \item{jaccard_table}{A matrix with all Jaccard indexes for all co-expression modules}
#' \item{settings}{A named list containing the parameters used in generating the object}
#'
#' @seealso
#' \url{https://bioconductor.org/packages/release/bioc/vignettes/MODA/inst/doc/MODA.html}
#' @references
#' \cite{Li D, Brown JB, Orsini L, Pan Z, Hu G, He S (2016). MODA: MODA:
#' MOdule Differential Analysis for weighted gene co-expression network. R package version 1.6.0}
#'
#' @author Dirk de Weerd
#'
#' @export
moda <- function(MODifieR_input,
cutmethod = "Density", group_of_interest,
specificTheta = 0.1, conservedTheta = 0.1,
dataset_name = NULL){
# Retrieve settings
evaluated_args <- c(as.list(environment()))
settings <- as.list(stackoverflow::match.call.defaults()[-1])
replace_args <- names(settings)[!names(settings) %in% unevaluated_args]
for (argument in replace_args) {
settings[[which(names(settings) == argument)]] <- evaluated_args[[which(names(evaluated_args) ==
argument)]]
}
#Validate the input parameters
check_expression_matrix(MODifieR_input)
validate_inputs(settings)
if (!(cutmethod == "Density" | cutmethod == "Modularity")){
stop('Cutmethod is not "Density" or "Modularity"', call. = F)
}
if (!(group_of_interest == 1 | group_of_interest == 2)){
stop("Group of interest is not 1 or 2", call. = F)
}
if (!is.null(dataset_name)){
settings$MODifieR_input <- dataset_name
}
##Get relevant input data from input object
#Background network, meaning data from all conditions
datExpr1 <- t(MODifieR_input$annotated_exprs_matrix)
#Condition specific network, meaning all samples MINUS the samples belonging to the condition
datExpr2 <- t(MODifieR_input$annotated_exprs_matrix[,-MODifieR_input$group_indici[[group_of_interest]]])
indicator1 = names(MODifieR_input$group_indici)[1] # indicator for data profile 1
indicator2 = names(MODifieR_input$group_indici)[2] # indicator for data profile 2
modules1 <- WMPH(datExpr = datExpr1, indicatename = indicator1, cutmethod = cutmethod)
modules2 <- WMPH(datExpr = datExpr2, indicatename = indicator2, cutmethod = cutmethod)
jaccard_table <- compare_modules(module_list1 = modules1, module_list2 = modules2)
module_genes <- unique(unlist(modules1[moda_extract_modules_index_specific(
jaccard_table = jaccard_table, specificTheta = specificTheta)]))
new_moda_module <- list("module_genes" = module_genes,
"group1_modules" = modules1,
"group2_modules" = modules2,
"jaccard_table" = jaccard_table,
"settings" = settings)
class(new_moda_module) <- c("MODifieR_module", "MODA", class(MODifieR_input)[3])
return (new_moda_module)
}
#'
#'Change
#'@inheritParams MODA::CompareAllNets
#'@param moda_module A \code{MODifieR_input} object created by \code{\link{moda}}
#'
#'@author Dirk de Weerd
#'
#'@export
moda_change_specific_threshold <- function(moda_module, specificTheta){
module_genes <- unique(unlist(moda_module$group1_modules[moda_extract_modules_index_specific(
jaccard_table = moda_module$jaccard_table, specificTheta = specificTheta)]))
moda_module$module_genes <- module_genes
moda_module$settings$specificTheta <- specificTheta
return(moda_module)
}
|
aa0da909c83e64efa22c0f01d9c124104968688d
|
f19ab11dae48b4442b838eb4b2e367bceedbbcb1
|
/r_programming/progs/rankhospital.R
|
2f6a2d732cb7445b548d526297afad4f9165adc7
|
[] |
no_license
|
chrsaud/datasciencecoursera
|
95be69d405bf80cdf5c8065e0eb3014c0c0e63c3
|
3330592ca3721fd17a3d9229ae49da9297f6ba3f
|
refs/heads/master
| 2021-04-25T08:36:34.505682
| 2018-03-23T10:34:44
| 2018-03-23T10:34:44
| 122,195,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,883
|
r
|
rankhospital.R
|
## Week 4 Programming Assigment - function to find hospital with user specified ranking
## in a state
## Author: Chris Aud
## set working directory
setwd("C:/Users/Chris/Desktop/datasciencecoursera/r_programming/data")
## find hospital with given ranking
rankhospital <- function(state, outcome, num) {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
if (sum(unique(data$State) == state) != 1){
stop("invalid state")
}
if (sum(c("heart attack", "heart failure", "pneumonia") == outcome) != 1){
stop("invalid outcome")
}
## Return hospital name in that state with lowest 30-day death
## convert outcome to a column index
col.index <- cbind(c("heart attack", "heart failure", "pneumonia"), c(11,17,23))
outcome.index <- as.numeric(col.index[,2][col.index[,1] == outcome])
## use outcome.index to select correct outcome column
data.cols <- data[, c(2,7, outcome.index)]
names(data.cols) <- c("hospital_name","State", "Outcome")
## limit to only the selected state
data.limit <- data.cols[data.cols[["State"]] == state, ]
## convert outcome to numeric variable and remove NAs
oldw <- getOption("warn")
options(warn = -1)
data.limit$Outcome <- as.numeric(data.limit$Outcome)
data.limit <- data.limit[is.na(data.limit$Outcome)==FALSE,]
options(warn = oldw)
## sort hospitals by name
data.sort <- data.limit[order(data.limit[["Outcome"]], data.limit[["hospital_name"]]), ]
## rank hospitals
data.sort$rank <- rank(data.sort[["Outcome"]], ties.method = "first")
## control structure for output
if (num == "best") {
hospital <- data.sort[1, 1]
} else if (num == "worst") {
hospital <- data.sort[nrow(data.sort),1]
} else {
hospital <- data.sort[data.sort$rank == num, 1]
}
hospital
}
|
40d33a933e4dbffec1c8558ec8b002ecbcf0fa55
|
e8f282cec225e3678e8464050edf7475d6995a91
|
/R/E-retMatrixCtoL.R
|
3aa72954c6744c23e72e666bd8ce7a868059d497
|
[] |
no_license
|
stjordanis/tsconv
|
aca5eaa03c699bbbf4775a960af38dbfe33e8681
|
0d7e9bc63aaecfcbbda79cf588334af663c20102
|
refs/heads/master
| 2020-04-15T15:12:43.422059
| 2017-01-06T05:59:22
| 2017-01-06T05:59:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 397
|
r
|
E-retMatrixCtoL.R
|
#' Convert continuous (log) returns to linear returns for a matrix
#'
#' @param CRetMatrix is a log return xts matrix
#'
#' @return a linear return xts matrix
#'
#' @examples
#' none
#'
#' @export
retMatrixCtoL <-function(CRetMatrix)
{
CtoLReturns <- lapply( X=CRetMatrix
, FUN=retVectorCtoL
)
CtoLReturns <- do.call(merge, CtoLReturns)
return(CtoLReturns)
}
|
d473270f62119fba66fb3a49e4411ea3d1c408b3
|
1c6d29bf84112963a95afa7324976d6a1e203d28
|
/Assignments/Assignment 3 - Loess/loess.R
|
2d619dbe1c9d5789e655f34dee421cbc31018773
|
[] |
no_license
|
john-james-ai/Practical-Statistical-Learning
|
eb95d6b590e0accadb22dfc9230acb8cb4abf496
|
6ecb06e222e45bb96480bc093c138f3b579a91dd
|
refs/heads/master
| 2023-04-09T07:22:48.035241
| 2021-04-14T10:31:04
| 2021-04-14T10:31:04
| 346,923,377
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 706
|
r
|
loess.R
|
lo.lev <- function(x1, sp){
## YOUR CODE: compute the diagonal entries of
## the smoother matrix S
}
onestep_CV <- function(x1, y1, sp){
## YOUR CODE:
## 1) fit a loess model y1 ~ x1 with span = sp, and extract
## the corresponding residual vector
## 2) call lo.lev to obtain the diagonal entries of S
## 3) compute LOO-CV and GCV
return(list(cv = cv, gcv = gcv))
}
myCV <- function(x1, y1, span){
## x1, y1: two vectors
## span: a sequence of values for "span"
m = length(span)
cv = rep(0, m)
gcv = rep(0, m)
for(i in 1:m){
tmp = onestep_CV(x1, y1, span[i])
cv[i] = tmp$cv
gcv[i] = tmp$gcv
}
return(list(cv = cv, gcv = gcv))
}
|
d94d9b6cd0e1d724a79691f6cfab679e519e72f6
|
c5d3419daa51b54120a0acd5bdbe638a6591d62d
|
/1.0 Introduction/Exercise1.R
|
a45ce525dafc1ef56aae670dbfcd353699a3a769
|
[
"MIT"
] |
permissive
|
riyaaditya/Data-Science-Using-R
|
5c87f1de9f8805cf74b84ceebc64f84043a954c7
|
78434dca2f763776bdf5ee4400968d6d7e849c57
|
refs/heads/master
| 2022-04-18T04:07:01.129726
| 2020-04-15T17:47:53
| 2020-04-15T17:47:53
| 262,352,618
| 1
| 0
|
MIT
| 2020-05-08T15:00:06
| 2020-05-08T15:00:05
| null |
UTF-8
|
R
| false
| false
| 359
|
r
|
Exercise1.R
|
# Exercise 1.1
exp(0-1/2)
exp(0+1/2)
x<-rnorm(1000)
y<-exp(x)
mean(y)
# Exercise 1.2
A<-1
B<-3
C<-1
options(digits=1)
my.vector<-c((-B+sqrt(B^2-4*A*C))/(2*A), (-B-sqrt(B^2-4*A*C))/(2*A))
my.vector
options(digits=6)
c(-0.4,-2.6)/my.vector-1
# Exercise 1.3
x <- rnorm(100, mean=.5, sd=.3)
mean(x)
sd(x)
hist(x)
hist(x,axes=FALSE,ylab="")
axis(1)
axis(4)
|
b7dab2d9d0e0a2a945b042dbedaed4bb8b050c6e
|
d67b72be70e10fa57dfc9f016c28d4babc7577c9
|
/man/get_hue.Rd
|
7386035c54e189de422544d267640307625ead64
|
[] |
no_license
|
ctross/DieTryin
|
10e293ff914232fb9a5734ca6f82e556db96a491
|
45bd367cbdd2bbbd1b75ec10e59b2c948d68b840
|
refs/heads/master
| 2023-08-30T21:17:17.002468
| 2023-08-28T19:36:47
| 2023-08-28T19:36:47
| 132,165,318
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 399
|
rd
|
get_hue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_hue.R
\name{get_hue}
\alias{get_hue}
\title{get_hue}
\usage{
get_hue(path)
}
\arguments{
\item{path}{Full path to image. file.choose() is often useful. Example: get_hue(file.choose())}
}
\description{
This is a helper function to identify HSL hue values. ust set a path to an image, and click to get a pixel color.
}
|
33bcd784f591a7ea6120db901ec07550f55d0b07
|
6c16bfabf987468553ab281cef9e872a017bbdd6
|
/lucho/random_walk_indice.R
|
9bd6b18bd76c8f8896d480bd794acb439b557d3e
|
[] |
no_license
|
ZhengyangXu/r-strategies
|
4bb714a7c28ef4db32c8793adcc13b21919f2164
|
a7543720bbae975dbeb79202b266a0a294797712
|
refs/heads/master
| 2020-03-14T00:20:07.624730
| 2014-10-24T00:44:26
| 2014-10-24T00:44:26
| 131,354,530
| 1
| 0
| null | 2018-04-27T23:49:01
| 2018-04-27T23:49:01
| null |
UTF-8
|
R
| false
| false
| 1,492
|
r
|
random_walk_indice.R
|
library(PerformanceAnalytics)
library(quantmod)
symbol <- 'AAPL'
.from <- '2005-01-01'
.to <- '2014-04-17'
getSymbols(symbol, from=.from, to=.to)
stock <- get(symbol)
stock <- stock[,1:4]
names(stock) <- c("Open", "High", "Low", "Close")
stock$Lowest <- NA
stock$Highest <- NA
stock$ATR <- NA
k <- 14
columnas <- ncol(stock)
for (i in 1:(k-1)) {
stock$Baja <- NA
stock$Baja <- Lag(stock$Low, i)
names(stock)[columnas+i] <- paste0('Low_',i)
}
stock$Lowest <- pmin(stock[,'Low'],stock[,columnas+1],stock[,columnas+2],stock[,columnas+3],stock[,columnas+4],stock[,columnas+5],stock[,columnas+6],stock[,columnas+7],stock[,columnas+8],stock[,columnas+9],stock[,columnas+10],stock[,columnas+11],stock[,columnas+12],stock[,columnas+13])
stock <- stock[,1:7]
columnas <- ncol(stock)
for (i in 1:(k-1)) {
stock$Alta <- NA
stock$Alta <- Lag(stock$High, i)
names(stock)[columnas+i] <- paste0('High_',i)
}
stock$Highest <- pmax(stock[,'High'],stock[,columnas+1],stock[,columnas+2],stock[,columnas+3],stock[,columnas+4],stock[,columnas+5],stock[,columnas+6],stock[,columnas+7],stock[,columnas+8],stock[,columnas+9],stock[,columnas+10],stock[,columnas+11],stock[,columnas+12],stock[,columnas+13])
stock <- stock[,1:7]
stock$ATR <- ATR(stock)$atr
stock$RWI.High <- (1/sqrt(14))*(stock$High - stock$Lowest) / stock$ATR
stock$RWI.Low <- (1/sqrt(14))*(stock$Highest - stock$Low) / stock$ATR
chart.TimeSeries(stock[,8:9], legend="topleft", col=c("green","red"), main="Random Walk Index")
|
daa15a0e66058e0a243e08a19387915a5eadf500
|
f4a0ad6d68516fd86f764920fb33157162cfbecf
|
/R/bspline.R
|
d77c2b87d16abdd3fd393132cc397c6273673ed5
|
[] |
no_license
|
cran/spBayesSurv
|
c012af5247f90ec25b846ab18ea4bf8775a8652a
|
9bf7201a51153a54f5a6192a5911fab18850a944
|
refs/heads/master
| 2023-05-25T00:36:22.423025
| 2023-05-17T18:00:02
| 2023-05-17T18:00:02
| 20,757,555
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,596
|
r
|
bspline.R
|
# Modified from File src/library/splines/R/splines.R
# Part of the R package, https://www.R-project.org
bspline <- function(x, df = NULL, knots = NULL, Boundary.knots = range(x)){
degree <- 3
intercept <- TRUE
ord <- 1L + (degree <- as.integer(degree))
if(ord <= 1) stop("'degree' must be integer >= 1")
nx <- names(x)
x <- as.vector(x)
nax <- is.na(x)
if(nas <- any(nax))
x <- x[!nax]
outside <- if(!missing(Boundary.knots)) {
Boundary.knots <- sort(Boundary.knots)
(ol <- x < Boundary.knots[1L]) | (or <- x > Boundary.knots[2L])
} else FALSE
if(!is.null(df) && is.null(knots)) {
nIknots <- df+2L - ord + (1L - intercept) # == #{inner knots}
if(nIknots < 0L) {
nIknots <- 0L
warning(gettextf("'df' was too small; have used %d",
ord - (1L - intercept) - 2L), domain = NA)
}
knots <-
if(nIknots > 0L) {
knots <- seq.int(from = 0, to = 1,
length.out = nIknots + 2L)[-c(1L, nIknots + 2L)]
quantile(x[!outside], knots)
}
}
Aknots <- sort(c(rep(Boundary.knots, ord), knots))
if(any(outside)) {
warning("some 'x' values beyond boundary knots may cause ill-conditioned bases")
derivs <- 0:degree
scalef <- gamma(1L:ord)# factorials
basis <- array(0, c(length(x), length(Aknots) - degree - 1L))
e <- 1/4 # in theory anything in (0,1); was (implicitly) 0 in R <= 3.2.2
if(any(ol)) {
## left pivot inside, i.e., a bit to the right of the boundary knot
k.pivot <- (1-e)*Boundary.knots[1L] + e*Aknots[ord+1]
xl <- cbind(1, outer(x[ol] - k.pivot, 1L:degree, "^"))
tt <- splineDesign(Aknots, rep(k.pivot, ord), ord, derivs)
basis[ol, ] <- xl %*% (tt/scalef)
}
if(any(or)) {
## right pivot inside, i.e., a bit to the left of the boundary knot:
k.pivot <- (1-e)*Boundary.knots[2L] + e*Aknots[length(Aknots)-ord]
xr <- cbind(1, outer(x[or] - k.pivot, 1L:degree, "^"))
tt <- splineDesign(Aknots, rep(k.pivot, ord), ord, derivs)
basis[or, ] <- xr %*% (tt/scalef)
}
if(any(inside <- !outside))
basis[inside, ] <- splineDesign(Aknots, x[inside], ord)
}
else basis <- splineDesign(Aknots, x, ord)
if(!intercept)
basis <- basis[, -1L , drop = FALSE]
n.col <- ncol(basis)
if(nas) {
nmat <- matrix(NA, length(nax), n.col)
nmat[!nax, ] <- basis
basis <- nmat
}
dimnames(basis) <- list(nx, 1L:n.col)
a <- list(degree = degree, knots = if(is.null(knots)) numeric(0L) else knots,
Boundary.knots = Boundary.knots, intercept = intercept)
attributes(basis) <- c(attributes(basis), a)
class(basis) <- c("bs", "basis", "matrix")
df0 = df +2L
x.bs <- basis
attr(x.bs, "intercept") <- NULL
x.attr = attributes(x.bs);
x.attr$dim[2] = x.attr$dim[2]-2
x.attr$dimnames[[2]] = (x.attr$dimnames[[2]])[-c(df0-1,df0)];
x.bs2 = x.bs[,-c(1,df0)];
attributes(x.bs2)=x.attr;
a <- list(df = df)
attributes(x.bs2) <- c(attributes(x.bs2), a)
class(x.bs2) <- c("bspline", "basis", "matrix")
x.bs2
}
predict.bspline <- function(object, newx, ...){
if(missing(newx))
return(object)
a <- c(list(x = newx), attributes(object)[
c("df", "knots", "Boundary.knots")])
do.call("bspline", a)
}
makepredictcall.bspline <- function(var, call){
if(as.character(call)[1L] != "bspline") return(call)
at <- attributes(var)[c("df", "knots", "Boundary.knots")]
xxx <- call[1L:2]
xxx[names(at)] <- at
xxx
}
|
799b48c23bdbba5a4ab99b73483f6bc6e666ec15
|
aa69b23062d8926bdd84716653cf961feb96ec1a
|
/plot4.R
|
d0f32f0064e3af146168e2ed7e84ee30fdc255ec
|
[] |
no_license
|
raymiller13/ExData_Plotting1
|
002f0ddba323ace5b324482c5cf14615719af4ff
|
fd780dac9b2825212d17ccb85294a430951deded
|
refs/heads/master
| 2021-07-21T23:42:49.882777
| 2017-10-29T23:37:44
| 2017-10-29T23:37:44
| 108,771,589
| 0
| 0
| null | 2017-10-29T21:09:47
| 2017-10-29T21:09:47
| null |
UTF-8
|
R
| false
| false
| 1,888
|
r
|
plot4.R
|
hcdata <- read.table("./household_power_consumption.txt", sep = ";", na.strings = "?", header = TRUE)
hcdata$dt <- lubridate::dmy_hms(paste(hcdata$Date, hcdata$Time))
hcdata$Date <- lubridate::dmy(hcdata$Date)
hcdata$Time <- strptime(hcdata$Time, "%H:%M:%S")
hcdata$Time <- strftime(hcdata$Time, format="%H:%M:%S")
hcdata <- subset(hcdata, Date >= as.Date("2007-02-01"))
hcdata <- subset(hcdata, Date <= as.Date("2007-02-02"))
# Plot 4
par(mfrow = c(2,2), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
plot(hcdata$dt, hcdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(hcdata$dt, hcdata$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(hcdata$dt, hcdata$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(hcdata$dt, hcdata$Sub_metering_2, type = "l", col = "red")
lines(hcdata$dt, hcdata$Sub_metering_3, type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd = 2)
plot(hcdata$dt, hcdata$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
# Write plots to png file
png(filename="plot4.png")
par(mfrow = c(2,2), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
plot(hcdata$dt, hcdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(hcdata$dt, hcdata$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(hcdata$dt, hcdata$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(hcdata$dt, hcdata$Sub_metering_2, type = "l", col = "red")
lines(hcdata$dt, hcdata$Sub_metering_3, type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd = 2)
plot(hcdata$dt, hcdata$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
b37522a43ede04c13926dca642aa7d4b49bd20f8
|
057948809237491836bd0afbb55db34132128f46
|
/data_visualization/squad_usage_gantt_chart.R
|
d1925cfbf14dc52d107ac91b13eeba45a77ea08f
|
[] |
no_license
|
NlIceD/Football
|
4f0e9ed103b8ca137071d10d85220d4b290824d3
|
38b76cdcfd5e0f7a007ecb89d60a1583de02a801
|
refs/heads/master
| 2020-06-18T15:49:00.600030
| 2019-04-10T16:17:54
| 2019-04-10T16:17:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,271
|
r
|
squad_usage_gantt_chart.R
|
library(tidyverse)
source("C:/Users/Ray/Desktop/Football Data/Opta Data/Football/data_visualization/theme_ray.R")
theme_set(theme_ray())
# Code to plot single line gantt chart for team squad usage
df <- read_csv("C:/Users/Ray/Desktop/Football Data/Opta Data/tmp.csv")
# 4 columns: match, start, end, player
# each row contains players start and end time on the field for each match
max_mins = max(df$match)*90
df$match <- as.factor(df$match)
df$player_id <- as.factor(df$player_id)
positions <- unique(df$player)
ggplot(df)+
geom_segment(aes(x=start, xend=end, y=player, yend=player), size=6, alpha=0.45, color='#842742') +
geom_hline(yintercept=seq(1,nrow(df), 1)+.5, color="#e7e7e7")+
scale_y_discrete(limits = positions)+
scale_x_continuous(minor_breaks = (seq(0, max_mins, by = 90)), breaks = (seq(0, max_mins, by = 450)), labels=c('0'='0', '450'='5', '900'='10', '1350'='15', '1800'='20', '2250'='25')) +
ggtitle("Barcelona Squad Usage")+
labs(subtitle="La Liga 2018/19", x='Match')+
theme(panel.grid.major.y = element_blank(),
plot.subtitle=element_text(size=11),
plot.title=element_text(size=14),
axis.text.y=element_text(face='bold'),
axis.title.y=element_blank())
ggsave('tmp.png', width=5, height=5, dpi=1000)
|
bc1f9aed2e69b1f9ca651f5baaa65551923b8a03
|
cb4788572dcb70ce1aea6fc56d2ddf567f4d037b
|
/pkg/R/factory.R
|
58aabde30ab090a24cd136f6707f858636e45ab1
|
[] |
no_license
|
data-cleaning/validate
|
1a287295bccfe517861010d6f2d5e334713b1ae4
|
9e2f4f2e324e16878a9cb299ed2c8c8f3c9544e9
|
refs/heads/master
| 2023-09-03T23:01:20.631922
| 2023-08-21T19:44:01
| 2023-08-21T19:44:01
| 17,057,497
| 394
| 49
| null | 2023-05-01T07:50:21
| 2014-02-21T13:50:42
|
R
|
UTF-8
|
R
| false
| false
| 1,508
|
r
|
factory.R
|
# factory function. Evaluate expressions, catch errors and warnings silently (per option).
factory <- function(fun,opts){
switch(opts('raise')
, 'none' = function(...) { # both errors and warnings are caught
warn <- err <- NULL
res <- withCallingHandlers(
tryCatch(outcheck(fun)(...), error=function(e) {
err <<- conditionMessage(e)
NULL
}), warning=function(w) {
warn <<- append(warn, conditionMessage(w))
invokeRestart("muffleWarning")
})
list(res, warn=warn, err=err)
}
, 'errors' = function(...) { # warnings are caught; errors are raised.
warn <- err <- NULL
res <- withCallingHandlers( outcheck(fun)(...)
, warning=function(w) {
warn <<- append(warn, conditionMessage(w))
#invokeRestart("muffleWarning")
})
list(res, warn=warn, err=err)
}
, 'all' = function(...){
warn <- err <- NULL
res <- outcheck(fun)(...) # errors and warnings are raised.
list(res,warn=warn,err=err)
}
)
}
outcheck <- function(fun){
function(...){
out <- fun(...)
if (!(is.numeric(out) | is.logical(out))){
warning("Expression did not evaluate to numeric or logical, returning NULL"
, call.=FALSE)
return(NULL)
} else {
return(out)
}
}
}
Id <- function(x) x
num_result <- function(x) if (is.list(x)) length(x$result) else length(x)
get_result <- function(x) if (is.list(x)) x$result else x
|
3c08a25600e4e87a3868dcb4c2d87acf866a48ac
|
4a7db6cb5e3adeec233b381bbf0b980aa29f65f1
|
/R/TDR_corrected.R
|
f60e3b0effc1b514640c9c9a65fa3e6f33a9f072
|
[
"MIT"
] |
permissive
|
MiriamLL/spheniscus
|
8a8c3b5a766d38702450c9a7b6ec8bd3c1d285d0
|
2b0c644cf06674693381612465803a64b882aab3
|
refs/heads/master
| 2023-04-07T16:35:03.174003
| 2021-05-19T16:47:09
| 2021-05-19T16:47:09
| 360,213,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 820
|
r
|
TDR_corrected.R
|
#' Corrected data from TDRs
#'
#' EN:
#' The data includes pressure data obtained from a Humbolt Penguin at Tilgo, Chile
#' The information was collected during the breeding season 2018.
#' It contains 42388 obs from 6 variables: tStamp, Pressure, numeric_sequence, daytime, depth and corrected_depth
#' ES:
#' Los datos incluyen datos crudos de un pinguino de Humbold muestreado en Isla Tilgo, Chile
#' La information fue colectada durante la temporada reproductiva 2018.
#' Contiene 42388 observaciones de 6 variables: tStamp, Pressure, numeric_sequence, daytime, depth and corrected_depth
#'
#' @docType data
#'
#' @usage data(TDR_corrected)
#'
#' @format A rda data with one column and 42388 rows
#'
#' @keywords datasets
#'
#' @references Lerma et al. 2021
#'
#'
#' @examples
#' data(TDR_corrected)
"TDR_corrected"
|
b19c463cc07616e5711bbfd6c663e4f66792f597
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/MetaPath/R/MAPE_P_gene_KS.R
|
3f9eb1917f813a8d3608b46a911f98b9ecaae22f
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,803
|
r
|
MAPE_P_gene_KS.R
|
MAPE_P_gene_KS <-
function(study,label,censoring.status,DB.matrix,size.min=15,size.max=500,nperm=500,stat=NULL,rth.value=NULL,resp.type){
if (is.null(names(study))) names(study)=paste('study.',1:length(study),sep="")
out=list()
for(t1 in 1:length(study)){
madata=study[[t1]]
testlabel=madata[[label]]
out[[t1]]=list()
if (resp.type=="survival") {
censoring=madata[[censoring.status]]
}
out[[t1]]=Enrichment_KS_gene(madata=madata,label=testlabel,censoring=censoring,DB.matrix=DB.matrix,size.min=size.min,size.max=size.max,nperm=nperm,resp.type=resp.type)
}
set.common=rownames(out[[1]]$pvalue.set.0)
for(t1 in 2:length(study)){
set.common=intersect(set.common,rownames(out[[t1]]$pvalue.set.0))
}
if (is.null(names(study))) names(study)=paste('study.',1:length(study),sep="")
pvalue.B.array=array(data=NA,dim=c(length(set.common),nperm,length(study)),dimnames=names(study))
pvalue.0.mtx=matrix(NA,length(set.common),length(study))
qvalue.0.mtx=matrix(NA,length(set.common),length(study))
for(t1 in 1:length(study)){
pvalue.B.array[,,t1]=out[[t1]]$pvalue.set.B[set.common,]
pvalue.0.mtx[,t1]=out[[t1]]$pvalue.set.0[set.common,]
qvalue.0.mtx[,t1]=out[[t1]]$qvalue.set.0[set.common,]
}
rownames(qvalue.0.mtx)=set.common
rownames(pvalue.0.mtx)=set.common
rm(out)
## statistics for meta-analysis
if(stat=='maxP'){
## maxP statistics
P.0=as.matrix(apply(pvalue.0.mtx,1,max))
rownames(P.0)=rownames(qvalue.0.mtx)
P.B=apply(pvalue.B.array,c(1,2),max)
rownames(P.B)=rownames(qvalue.0.mtx)
} else if (stat=='minP'){
## minP statistics
P.0=as.matrix(apply(pvalue.0.mtx,1,min))
rownames(P.0)=rownames(qvalue.0.mtx)
P.B=apply(pvalue.B.array,c(1,2),min)
rownames(P.B)=rownames(qvalue.0.mtx)
} else if (stat=='rth'){
## rth statistics
P.0=as.matrix(apply(pvalue.0.mtx,1,function(x) sort(x)[ceiling(rth.value*ncol(pvalue.0.mtx))]))
rownames(P.0)=rownames(qvalue.0.mtx)
P.B=apply(pvalue.B.array,c(1,2),function(x) sort(x)[ceiling(rth.value*dim(pvalue.B.array)[3])])
rownames(P.B)=rownames(qvalue.0.mtx)
} else if (stat=='Fisher'){
DF=2*length(study)
## rth statistics
P.0=as.matrix(apply(pvalue.0.mtx,1,function(x) pchisq(-2*sum(log(x)),DF,lower.tail=T) ))
rownames(P.0)=rownames(qvalue.0.mtx)
P.B=apply(pvalue.B.array,c(1,2),function(x) pchisq(-2*sum(log(x)),DF,lower.tail=T) )
rownames(P.B)=rownames(qvalue.0.mtx)
} else { stop("Please check: the selection of stat should be one of the following options: maxP,minP,rth and Fisher") }
## pvalues and qvalues calculation
meta.out=pqvalues.compute(P.0,P.B,Stat.type='Pvalue')
return(list(pvalue.meta=meta.out$pvalue.0, qvalue.meta=meta.out$qvalue.0, pvalue.meta.B=meta.out$pvalue.B,qvalue.set.study=qvalue.0.mtx,pvalue.set.study=pvalue.0.mtx))
}
|
29fa182d16c9df1c78a6d0573a86846de64573de
|
c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab
|
/man/gtkPanedGetChild2.Rd
|
f96e244395267893a17482376511adb652173fb9
|
[] |
no_license
|
cran/RGtk2.10
|
3eb71086e637163c34e372c7c742922b079209e3
|
75aacd92d4b2db7d0942a3a6bc62105163b35c5e
|
refs/heads/master
| 2021-01-22T23:26:26.975959
| 2007-05-05T00:00:00
| 2007-05-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
rd
|
gtkPanedGetChild2.Rd
|
\alias{gtkPanedGetChild2}
\name{gtkPanedGetChild2}
\title{gtkPanedGetChild2}
\description{Obtains the second child of the paned widget.}
\usage{gtkPanedGetChild2(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkPaned}}] a \code{\link{GtkPaned}} widget}}
\details{ Since 2.4}
\value{[\code{\link{GtkWidget}}] second child, or \code{NULL} if it is not set.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
798054cb033f9745e444a8ae4901acfba6811a7d
|
214216cefc96120cd1fbd484f43e4c1c86904ecd
|
/R/blr-data-stepwise.R
|
15b5ea627f3eb28d00109f92c86834871aeefd66
|
[
"MIT"
] |
permissive
|
rsquaredacademy/blorr
|
9fa8a0fc18b4bbbe91d04124aa4a64f3a47051b3
|
073f672bb830080dd666c7cac4ff5d342b3ce0ac
|
refs/heads/master
| 2023-08-30T17:01:49.220745
| 2021-07-08T11:25:36
| 2021-07-08T11:25:36
| 91,309,738
| 18
| 3
|
NOASSERTION
| 2021-07-08T11:25:37
| 2017-05-15T07:49:49
|
R
|
UTF-8
|
R
| false
| false
| 29
|
r
|
blr-data-stepwise.R
|
#' Dummy Data Set
"stepwise"
|
8b6410b272d8359e2927d93c71ef1a3bdbd65065
|
8acec86427472c50b2407efea07d5422b15829c8
|
/R/conditional_dependency.R
|
fc27ace212b72428a8b56b4a0b354bcf1c53c228
|
[] |
no_license
|
tkonopka/ingredients
|
42b381494ebbe79e6f4b612f42084cc6aaf4bd63
|
e55d086a3d8cad65ad79dc7ca070a97c4b0d7f17
|
refs/heads/master
| 2020-06-14T19:15:44.966002
| 2019-07-25T16:25:51
| 2019-07-25T16:25:51
| 195,100,146
| 0
| 0
| null | 2019-07-03T17:32:12
| 2019-07-03T17:32:11
| null |
UTF-8
|
R
| false
| false
| 5,137
|
r
|
conditional_dependency.R
|
#' Conditional Dependency Profiles
#'
#' Conditional Dependency Profiles (aka Local Profiles) average localy Ceteris Paribus Profiles.
#' Function 'conditional_dependency' calls 'ceteris_paribus' and then 'aggregate_profiles'.
#'
#' Find more detailes in \href{https://pbiecek.github.io/PM_VEE/conditionalProfiles.html}{Local Dependency Profiles Chapter}.
#'
#' @param x a model to be explained, or an explainer created with function `DALEX::explain()` or object of the class `ceteris_paribus_explainer`.
#' @param data validation dataset, will be extracted from `x` if it's an explainer
#' @param predict_function predict function, will be extracted from `x` if it's an explainer
#' @param variables names of variables for which profiles shall be calculated. Will be passed to `calculate_variable_splits()`. If NULL then all variables from the validation data will be used.
#' @param N number of observations used for calculation of partial dependency profiles. By default 500.
#' @param ... other parameters
#' @param variable_splits named list of splits for variables, in most cases created with `calculate_variable_splits()`. If NULL then it will be calculated based on validation data avaliable in the `explainer`.
#' @param grid_points number of points for profile. Will be passed to `calculate_variable_splits()`.
#' @param label name of the model. By default it's extracted from the 'class' attribute of the model
#'
#' @references Predictive Models: Visual Exploration, Explanation and Debugging \url{https://pbiecek.github.io/PM_VEE}
#'
#' @return an 'aggregated_profile_explainer' layer
#' @examples
#' library("DALEX")
#' # Toy examples, because CRAN angels ask for them
#' titanic <- na.omit(titanic)
#' model_titanic_glm <- glm(survived == "yes" ~ gender + age + fare,
#' data = titanic, family = "binomial")
#'
#' explain_titanic_glm <- explain(model_titanic_glm,
#' data = titanic[,-9],
#' y = titanic$survived == "yes")
#'
#' pdp_rf <- conditional_dependency(explain_titanic_glm, N = 50)
#' plot(pdp_rf)
#'
#' \donttest{
#' library("titanic")
#' library("randomForest")
#'
#' titanic_small <- titanic_train[,c("Survived", "Pclass", "Sex", "Age",
#' "SibSp", "Parch", "Fare", "Embarked")]
#' titanic_small$Survived <- factor(titanic_small$Survived)
#' titanic_small$Sex <- factor(titanic_small$Sex)
#' titanic_small$Embarked <- factor(titanic_small$Embarked)
#' titanic_small <- na.omit(titanic_small)
#' rf_model <- randomForest(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked,
#' data = titanic_small)
#' explainer_rf <- explain(rf_model, data = titanic_small,
#' y = titanic_small$Survived == "1", label = "RF")
#'
#' pdp_rf <- conditional_dependency(explainer_rf)
#' plot(pdp_rf)
#' }
#' @export
#' @rdname conditional_dependency
conditional_dependency <- function(x, ...)
UseMethod("conditional_dependency")
#' @export
#' @rdname conditional_dependency
conditional_dependency.explainer <- function(x, variables = NULL, N = 500,
variable_splits = NULL, grid_points = 101,
...) {
# extracts model, data and predict function from the explainer
model <- x$model
data <- x$data
predict_function <- x$predict_function
label <- x$label
conditional_dependency.default(model, data, predict_function,
label = label,
variables = variables,
grid_points = grid_points,
variable_splits = variable_splits,
N = N,
...)
}
#' @export
#' @rdname conditional_dependency
conditional_dependency.default <- function(x, data, predict_function = predict,
label = class(x)[1],
variables = NULL,
grid_points = grid_points,
variable_splits = variable_splits,
N = 500,
...) {
if (N < nrow(data)) {
# sample N points
ndata <- data[sample(1:nrow(data), N),]
} else {
ndata <- data
}
cp <- ceteris_paribus.default(x, data, predict_function = predict_function,
ndata, variables = variables,
grid_points = grid_points,
variable_splits = variable_splits,
label = label, ...)
conditional_dependency.ceteris_paribus_explainer(cp, variables = variables)
}
#' @export
#' @rdname conditional_dependency
conditional_dependency.ceteris_paribus_explainer <- function(x, ...,
variables = NULL) {
aggregate_profiles(x, ..., type = "conditional", variables = variables)
}
#' @export
#' @rdname conditional_dependency
local_dependency <- conditional_dependency
|
7af381a70d9499f019df5d4877d79272a499ccc7
|
907aaa2ef40dd8beeb9d533fa519fac0afaf8e37
|
/R/scrapeHTML.r
|
c7c474498a36c9bf247797fb9fd689e50b2919b1
|
[] |
no_license
|
AndreasFischer1985/qqBaseX
|
eaee341155d66d4ff92ca00d6b4d419c3bf1f28a
|
98bec0ce041666d09d2c89a4ddc6b84a2349fa53
|
refs/heads/master
| 2022-09-14T18:58:05.493380
| 2022-08-26T11:52:38
| 2022-08-26T11:52:38
| 189,703,556
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,504
|
r
|
scrapeHTML.r
|
#' Function scrapeHTML
#'
#' Extracts Information from HTML.
#' @param html A character element containing HTML-code.
#' @param short Logical value specifying whether only lines with verbal information or link should be returned. Defaults to F.
#' @param edit Logical value specifying whether the data.frame should be plotted/edited.
#' @param save Logical value specifying whether the HTML-code should be saved to a csv-file.
#' @param plot Logical value specifying whether to plot the frequency of each HTML-tag found in the html-object.
#' @param filename Character value specifying the filename (if save is TRUE). If NULL (default) as.numeric(Sys.time()) is applied.
#' @details Extracts Information from HTML code (as returned by qqBaseX::getHTML, for example). Returns a data.frame with three columns: the first column contains html-code, the second column contains extracted verbal information, and the third column contains extracted links.
#' @keywords scraping
#' @export
#' @examples
#' scrapeHTML(getHTML())
scrapeHTML <- function (html, short = F, edit = T, save = F, plot = F, filename = NULL)
{
if (length(html) > 1)
if (length(dim(html)) > 2)
stop("please provide HTML as a character vector!")
else if (length(dim(html)) > 1) {
warning("two-dimensional input detected. Only first column is used.")
html = paste(as.character(html[, 1]), collapse = " ")
}
else html = paste(as.character(html), collapse = " ")
strings = gsub("(\n|\t)+", " ", gsub("[ ]+", " ", paste0("<",
strsplit(html, "<")[[1]])))
if (plot) {
s1 = sort(table(gsub("(<[/]?|(>| ).*)", "", strings)))
bp(s1[s1 > 2], main2 = "Common Tags")
}
info = gsub("^[ ]*$", "", gsub("^<[^<]*>", "", strings))
links = character(length(info))
links[grep("href=[\"'].*?[\"']", strings)] = gsub("[\"'].*$",
"", gsub("^.*?href=[\"']", "", grep("href=[\"'].*?[\"']",
strings, value = T)))
result = data.frame(entry = as.character(strings), info = as.character(info),
links = as.character(links), stringsAsFactors = F)
if (short)
result = result[nchar(as.character(result[, 2])) > 0 |
nchar(as.character(result[, 3])) > 0, ]
if (edit)
result = edit(result)
if (save)
write.csv2(data.frame(result), paste0(ifelse(is.character(filename),
filename, as.numeric(Sys.time())), ".csv"))
return(invisible(result))
}
|
61038f63dba2973e6f07d4f41e7717c1153b57b5
|
e0d842fd88dfe8511c73cfd41c2b57e945b43106
|
/misc_metrics/setup.R
|
c9ac2ab5f005de347a8318b6f7a86f039c9ff546
|
[] |
no_license
|
meerapatelmd/omop_mapping
|
361feb2ebf0844961d783a3d0f0669588fa67b54
|
e006bc2d241355ca610931d41ffa92027a5f9134
|
refs/heads/master
| 2022-12-11T15:40:28.253098
| 2020-09-05T00:49:21
| 2020-09-05T00:49:21
| 272,284,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 212
|
r
|
setup.R
|
if ("/Users/patelm9/GitHub/KMI/termite/Map_to_OMOP/misc_metrics" != getwd()) {
setwd("/Users/patelm9/GitHub/KMI/termite/Map_to_OMOP/misc_metrics")
}
source('utils.R')
path_to_input_dir <- "input/COVID"
|
313a52d0477e364fd90489845401184f0b461854
|
066e60e4c6730a945fcba528328fc646945beaa3
|
/R/access.R
|
c29970c6ad24278ada88dc8c9717fceeb3493a35
|
[
"CC0-1.0"
] |
permissive
|
appling/unitted
|
0b59a79a949e7405cb443306216d7038a30c29ad
|
d1f11723cd274297f44adbb5d1af4990eb0cdf06
|
refs/heads/master
| 2021-01-17T07:34:17.966050
| 2017-07-21T22:38:37
| 2017-07-21T22:38:37
| 18,263,678
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,794
|
r
|
access.R
|
#' Extract parts of a unitted object
#'
#' The standard accessors \code{x[...]}, \code{x[[...]]}, and \code{x$name} are
#' all available for unitted objects.
#'
#' \code{x[...]} extracts elements with their units.
#'
#' @name unitted_access
#' @aliases [ Extract
#' @rdname unitted_access
#' @export
#' @seealso \code{\link{unitted_assign}} for assignment to parts of objects;
#' \code{\linkS4class{unitted}} for definition of the unitted class
#'
#' @param x The unitted data.frame, vector, etc. to be accessed
#' @param ... Arguments passed to accessors, typically including one or more
#' extraction indices
#' @return A new data.frame, vector, etc. with the right units still attached.
"[.unitted" <- function(x, ...) {
if(isTRUE(is.data.frame(x))) {
new_units <- NA
x <- deunitted(x, partial=TRUE)
} else {
new_units <- get_unitbundles(x)
x <- deunitted(x)
}
vx <- NextMethod("[")
unitted(vx, new_units)
}
#' @details \code{x[[...]]} drops most attributes but retains units.
#'
#' @aliases [[
#' @rdname unitted_access
#' @export
"[[.unitted" <- function(x, ...) {
if(isTRUE(is.data.frame(x))) {
new_units <- NA
x <- deunitted(x, partial=TRUE)
} else {
new_units <- get_unitbundles(x)
x <- deunitted(x)
}
vx <- NextMethod("[[")
unitted(vx, new_units)
}
#' @details \code{x$name} extracts a named column, with units, from a unitted data.frame
#'
#' @aliases $
#' @rdname unitted_access
#' @export
#'
#' @param name The name of the column to be extracted
"$.unitted" <- function(x, name) {
# NextMethod gives errors about promises being an unacceptable type for 'col',
# so use [[ instead of NextMethod. According to ?`$`, x$name is equivalent to
# x[["name", exact = FALSE]]
x[[name, exact=FALSE]]
}
|
346007153e9fe536b0b6776b71275c0dae4ab15b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/formula.tools/examples/terms.Rd.R
|
e290b3baf3cab01ef06636a8a83528d5bcae3a4b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 271
|
r
|
terms.Rd.R
|
library(formula.tools)
### Name: terms.call
### Title: terms
### Aliases: terms.call terms.expression terms
### Keywords: manip symbolmath utilities
### ** Examples
terms( quote( A + B ) )
data(iris)
x <- terms( quote( . - Species ) , data=iris )
|
b648a737af8fd26e4824e52299e3c6b24a5f066f
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/diffman/man/matrix_liens.Rd
|
792b40c3f432a90b91ad3d65b5df43905ceeea47
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 722
|
rd
|
matrix_liens.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct_reshaping.R
\name{matrix_liens}
\alias{matrix_liens}
\title{Créer la matrice de liens (ou matrice de contiguïté)}
\usage{
matrix_liens(m_crois)
}
\arguments{
\item{m_crois}{Matrice de croisement.}
}
\value{
En sortie on a une matrice carré de booléens.
}
\description{
Cette fonction permet de créer une matrice carré de booléens de taille
égale au nombre de zones du zonage z1.
}
\details{
Un élément (i,j) de cette matrice
vaut TRUE (ou 1) si les zones i et j du zonage z1 sont contigues, c'est-à-dire
s'il existe au moins une zone de z2 recouvrant à la fois i et j. Les élements
de la diagonales portent la valeur FALSE.
}
|
eda3d8b9c65d7dc9f4b8e7a6f2748f405c1e75de
|
1be7bdfb44a4b4ae98cdc48046042f0739fefde1
|
/proj/simu/type-L0.p2.R
|
e89a034a77c63c35c45e4e7a153efefa75f9de97
|
[] |
no_license
|
wzhy2000/LSKAT
|
c521ebe4247fb592a2bec3d2110a41a523e5ca2c
|
d1dee4cc874195eaaab854c9f5f812a4f17ad27b
|
refs/heads/master
| 2020-04-15T10:53:49.427329
| 2018-06-22T20:21:00
| 2018-06-22T20:21:00
| 52,325,786
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,116
|
r
|
type-L0.p2.R
|
source("long-simu-test-rare2.r");
source("test-nocorrelation.R")
ncores<-16;
par <- list(
test.type = "Joint",
mu = 1,
a = 0.5,
b = 0.5,
rho = 0.75,
sig_a = 0.8,
sig_b = 0.8,
sig_e = 0.8,
par1 = 0.7,
par2 = 0.8,
times = 8,
intercept = F,
y.cov.time = 0,
par_t = c(0.2, -0.08),
snprange = c(5*1000, 30*1000),
a.level = 10^(-6),
w.common = c(0.5, 0.5),
w.rare = c(1,25),
common.c1 = 0.12,
rare.c1 = 0.08,
effect.sd = 0.05,
common.cause = 3,
rare.effect = 1,
rare.cutoff = 0.05 );
n.rep <- 1000;
n.loop <- 200;
testR( 0, par, "type1.L0.1k.mn.ar1.2500.pp2.rdata", nsample = 2500, phe.dist = "mn", phe.cov = "ar1", nloop = n.loop, nrep = n.rep, ncores = ncores);
testR( 0, par, "type1.L0.1k.mn.ar1.1000.pp2.rdata", nsample = 1000, phe.dist = "mn", phe.cov = "ar1", nloop = n.loop, nrep = n.rep, ncores = ncores);
testR( 0, par, "type1.L0.1k.mn.ar1.500.pp2.rdata", nsample = 500, phe.dist = "mn", phe.cov = "ar1", nloop = n.loop, nrep = n.rep, ncores = ncores);
|
61c5321a60d420236ac7b8f64e118ee39fc2f379
|
e990d7b84874be47f7967383750f3b504a63c580
|
/R/stop_death.R
|
949b0408e3590067d1b94a46bff909a5bfd0aefa
|
[
"MIT"
] |
permissive
|
ronaldoalves-ms/stopdeath
|
2700002bb6a89d39b38ac0ddd7eb2be0462b15aa
|
15c7b898399225d885c9454e52f1b367c9664b87
|
refs/heads/main
| 2023-04-15T23:19:52.525594
| 2021-05-02T23:33:25
| 2021-05-02T23:33:25
| 363,734,428
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,218
|
r
|
stop_death.R
|
#' Codificação de Causas de Óbitos Evitáveis na Infância
#'
#' \code{stop_death} é uma função de codificação de causas de óbitos evitáveis em crianças menores de 5 anos de idade. A função cria um vetor de códigos identificadores (code_class), cuja estrutura compreende variados níveis hierárquicos de classificação de causas de óbitos evitáveis.
#'
#' @param dados Um data frame contendo os dados de interesse.
#'
#' @param var_nome Um vetor de caracteres contendo os códigos da CID-10.
#'
#' @return A função retorna um data frame enriquecido com a variável "code_class".
#'
#' @references Malta DC, Duarte EC, Almeida MF, et al. Lista de causas de mortes evitáveis por intervenções do Sistema Único de Saúde do Brasil. Epidemiologia e Serviços de Saúde, 16(4):233-244, out-dez 2007.\url{http://scielo.iec.gov.br/pdf/ess/v16n4/v16n4a02.pdf}
#' @references Malta DC, Sardinha LMV, Moura L, et al. Atualização da lista de causas de mortes evitáveis por intervenções do Sistema Único de Saúde do Brasil. Epidemiologia e Serviços de Saúde, 19(2):173-176, abr-jun 2010.\url{http://scielo.iec.gov.br/pdf/ess/v19n2/v19n2a10.pdf}
#'
#' @author Ronaldo Fernandes Santos Alves. Fundação Oswaldo Cruz, Instituto de Comunicação e Informação Científica e Tecnológica em Saúde.
#'
#' @examples
#' library(stopdeath)
#' dados <- stop_death(dados, dados$var_nome)
#'
#' @export
# PACOTES INSTALADOS PARALELAMENTE
#usethis::use_package("magrittr", type = "Imports")
#usethis::use_package("dplyr", type = "Imports")
#usethis::use_package("stringr", type = "Imports")
#usethis::use_pipe(export = TRUE)
stop_death <- function(dados, var_nome){
dados <- dados %>%
dplyr::mutate(code_class = stringr::str_remove_all(var_nome, "[^[:alnum:]]")) %>%
dplyr::mutate(code_class = stringr::str_to_upper(var_nome))
dados <- dados %>%
dplyr::mutate(
code_class = dplyr::case_when(
stringr::str_detect(var_nome, "^A17") ~ "01_11_0_003",
stringr::str_detect(var_nome, "^A19") ~ "01_11_0_004",
stringr::str_detect(var_nome, "^A33") ~ "01_11_0_005",
stringr::str_detect(var_nome, "^A35") ~ "01_11_0_006",
stringr::str_detect(var_nome, "^A36") ~ "01_11_0_007",
stringr::str_detect(var_nome, "^A37") ~ "01_11_0_008",
stringr::str_detect(var_nome, "^A80") ~ "01_11_0_009",
stringr::str_detect(var_nome, "^B05") ~ "01_11_0_010",
stringr::str_detect(var_nome, "^B06") ~ "01_11_0_011",
stringr::str_detect(var_nome, "^B16") ~ "01_11_0_012",
stringr::str_detect(var_nome, "^B26") ~ "01_11_0_013",
stringr::str_detect(var_nome, "G000") ~ "01_11_0_014",
stringr::str_detect(var_nome, "P350") ~ "01_11_0_015",
stringr::str_detect(var_nome, "P353") ~ "01_11_0_016",
stringr::str_detect(var_nome, "^A50") ~ "01_12_1_019",
stringr::str_detect(var_nome, "^B2[0-4]") ~ "01_12_1_020",
stringr::str_detect(var_nome, "^P02[2-3,7-9]") ~ "01_12_1_021",
stringr::str_detect(var_nome, "^P0[0,4]") ~ "01_12_1_022",
stringr::str_detect(var_nome, "^P01") ~ "01_12_1_023",
stringr::str_detect(var_nome, "^P05") ~ "01_12_1_024",
stringr::str_detect(var_nome, "^P07") ~ "01_12_1_025",
stringr::str_detect(var_nome, "P220") ~ "01_12_1_026",
stringr::str_detect(var_nome, "^P26") ~ "01_12_1_027",
stringr::str_detect(var_nome, "^P52") ~ "01_12_1_028",
stringr::str_detect(var_nome, "P55[0-1]") ~ "01_12_1_029",
stringr::str_detect(var_nome, "P55[8-9]|^P5[6-7]") ~ "01_12_1_030",
stringr::str_detect(var_nome, "^P77") ~ "01_12_1_031",
stringr::str_detect(var_nome, "P02[0-1]") ~ "01_12_2_033",
stringr::str_detect(var_nome, "P02[4-6]") ~ "01_12_2_034",
stringr::str_detect(var_nome, "^P03") ~ "01_12_2_035",
stringr::str_detect(var_nome, "^P08") ~ "01_12_2_036",
stringr::str_detect(var_nome, "^P1[0-5]") ~ "01_12_2_037",
stringr::str_detect(var_nome, "^P2[0-1]") ~ "01_12_2_038",
stringr::str_detect(var_nome, "P24[0-2,8-9]") ~ "01_12_2_039",
stringr::str_detect(var_nome, "P22[1,8-9]|^P2[3,5,7-8]") ~ "01_12_3_041",
stringr::str_detect(var_nome, "P35[1-2,4-9]|^P3[6-9]") ~ "01_12_3_042",
stringr::str_detect(var_nome, "^P5[0-1,3-4]") ~ "01_12_3_043",
stringr::str_detect(var_nome, "^P5[8-9]") ~ "01_12_3_044",
stringr::str_detect(var_nome, "^P7[0-4]") ~ "01_12_3_045",
stringr::str_detect(var_nome, "^P6[0-1]") ~ "01_12_3_046",
stringr::str_detect(var_nome, "^P7[5-6,8]") ~ "01_12_3_047",
stringr::str_detect(var_nome, "^P8[0-3]") ~ "01_12_3_048",
stringr::str_detect(var_nome, "^P9[0-4]|P96[0-8]") ~ "01_12_3_049",
stringr::str_detect(var_nome, "^A15") ~ "01_13_0_051",
stringr::str_detect(var_nome, "^A16") ~ "01_13_0_052",
stringr::str_detect(var_nome, "^A18") ~ "01_13_0_053",
stringr::str_detect(var_nome, "G00[1-9]|^G03") ~ "01_13_0_054",
stringr::str_detect(var_nome, "^J0[0-6]") ~ "01_13_0_055",
stringr::str_detect(var_nome, "^J1[2-8]") ~ "01_13_0_056",
stringr::str_detect(var_nome, "^J2[0-2]") ~ "01_13_0_057",
stringr::str_detect(var_nome, "J384") ~ "01_13_0_058",
stringr::str_detect(var_nome, "^J4[0-2,5-7]") ~ "01_13_0_058",
stringr::str_detect(var_nome, "^J6[8-9]") ~ "01_13_0_060",
stringr::str_detect(var_nome, "^A7[0-4]") ~ "01_13_0_061",
stringr::str_detect(var_nome, "^A3[0-2,8-9]|^A4[0-1,6,9]") ~ "01_13_0_062",
stringr::str_detect(var_nome, "E03[0-1]") ~ "01_13_0_063",
stringr::str_detect(var_nome, "^E1[0-4]") ~ "01_13_0_064",
stringr::str_detect(var_nome, "E700") ~ "01_13_0_065",
stringr::str_detect(var_nome, "E730") ~ "01_13_0_066",
stringr::str_detect(var_nome, "^G4[0-1]") ~ "01_13_0_067",
stringr::str_detect(var_nome, "^Q90") ~ "01_13_0_068",
stringr::str_detect(var_nome, "N390") ~ "01_13_0_069",
stringr::str_detect(var_nome, "^I0[0-9]") ~ "01_13_0_070",
stringr::str_detect(var_nome, "^A0[0-9]") ~ "01_14_0_072",
stringr::str_detect(var_nome, "^A2[0-8]") ~ "01_14_0_073",
stringr::str_detect(var_nome, "^A9[0-9]") ~ "01_14_0_074",
stringr::str_detect(var_nome, "^A7[5-9]") ~ "01_14_0_075",
stringr::str_detect(var_nome, "^A82") ~ "01_14_0_076",
stringr::str_detect(var_nome, "^B5[0-9]|^B6[0-4]") ~ "01_14_0_077",
stringr::str_detect(var_nome, "^B6[5-9]|^B7|^B8[0-3]") ~ "01_14_0_078",
stringr::str_detect(var_nome, "^B99") ~ "01_14_0_079",
stringr::str_detect(var_nome, "^D5[0-3]") ~ "01_14_0_080",
stringr::str_detect(var_nome, "^E4[0-9]|^E5[0-9]|^E6[0-4]") ~ "01_14_0_081",
stringr::str_detect(var_nome, "^E86") ~ "01_14_0_082",
stringr::str_detect(var_nome, "^V") ~ "01_14_0_083",
stringr::str_detect(var_nome, "^X4[0-4]") ~ "01_14_0_084",
stringr::str_detect(var_nome, "^X4[5-9]") ~ "01_14_0_085",
stringr::str_detect(var_nome, "^R95") ~ "01_14_0_086",
stringr::str_detect(var_nome, "^W[0-1]") ~ "01_14_0_087",
stringr::str_detect(var_nome, "^X0[0-9]") ~ "01_14_0_088",
stringr::str_detect(var_nome, "^X3[0-9]") ~ "01_14_0_089",
stringr::str_detect(var_nome, "^W6[5-9]|^W7[0-4]") ~ "01_14_0_090",
stringr::str_detect(var_nome, "^W7[5-9]|^W8[0-4]") ~ "01_14_0_091",
stringr::str_detect(var_nome, "^W8[5-9]|^W9") ~ "01_14_0_092",
stringr::str_detect(var_nome, "^X8[5-9]|^X9|^Y0") ~ "01_14_0_093",
stringr::str_detect(var_nome, "^Y[1-2]|^Y3[0-4]") ~ "01_14_0_094",
stringr::str_detect(var_nome, "^W[2-4]") ~ "01_14_0_095",
stringr::str_detect(var_nome, "^Y6") ~ "01_14_0_096",
stringr::str_detect(var_nome, "^Y8[3-4]") ~ "01_14_0_097",
stringr::str_detect(var_nome, "^Y[4-5]") ~ "01_14_0_098",
stringr::str_detect(var_nome, "^R[0-8]|^R9[0-4,6-9]") ~ "99_99_0_100",
stringr::str_detect(var_nome, "^P95") ~ "99_99_0_101",
stringr::str_detect(var_nome, "P969") ~ "99_99_0_102",
TRUE ~ "00_00_0_103"))
}
|
2ca9cea9824e4b7e6c418371bd168096e9d5d298
|
ee909cdba5c91d30cc03e12b2d2effacfde365b1
|
/clase_3/clase_3_ejercicio_1.R
|
287e1ef724fdc97bec73d792151f7a1b47558a2f
|
[
"MIT"
] |
permissive
|
ldgaribello/eia-esp-bd-fundamentos-analitica
|
eef3799d7f79ccc5b24f79cfdb044ee7e6b65f63
|
d77c2325525cf70f264ec716599e4240bc27dddf
|
refs/heads/master
| 2022-11-13T17:11:28.350511
| 2020-07-03T00:20:56
| 2020-07-03T00:20:56
| 271,153,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 676
|
r
|
clase_3_ejercicio_1.R
|
setwd("~/Workspace/Study/Fundamentos Analitica/clase_3")
#### Load libraries ####
#### Load data ####
#### Data analitics ####
# E1: Generar un vector columna,
# distribucion normal,
# media = 10
# varianza = 5
x <- rnorm(50, 10, sqrt(5)) # Tercer parametro es la desviacion estandar
x
#E2
y <- seq(from = -5, length = 20, by = 0.5)
y
#E3
# z <- cbind(x, xy)
z <- c(x1, x2)
z
#E4
#w <- runif(70, min(z), max(z))
w <- runif(70)
wz <- cbind(z, w)
wz
#E5
wz2 <- wz[(w[,2]>.2 & w[,2]<.4) | (w[,2]>.6 & w[,2]<.8)]
#E6
paste(
"La media de la colimna 1 es: ", mean(wz2[,1]), " ",
"Y la varianza es: ", var(w2[,1])
)
#### Results ####
|
6b7029a2d32890bd17c2805076a45146ec8906e3
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/6159_0/rinput.R
|
353fe278c3ce93f3786676573dff8c108e35cb9b
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("6159_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6159_0_unrooted.txt")
|
63c6763443b54e0a7c67f32448a0d9d533114c72
|
1d0465c228a1e8fd4e1c271fbde2262bf24ccb59
|
/Data Analysis in R. Part 1/3.2.1.r
|
b048e71cfc7d150e8decbdd80b4debb6ab37f792
|
[] |
no_license
|
framirov/R-course
|
536ab92111a2a54c0160f82fab960ed8047877a9
|
a01439fa0b1e530385c303bedf7f44af239a6683
|
refs/heads/main
| 2023-01-27T20:38:50.428334
| 2020-12-04T08:26:30
| 2020-12-04T08:26:30
| 312,372,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
3.2.1.r
|
fill_na <- function(x){
fit <- lm(y ~ x_1 + x_2, x, na.action = 'na.exclude')
x$y_full <- predict(fit, x)
x$y_full <- ifelse(is.na(x$y), x$y_full, x$y)
return(x)
}
|
cd5768264db32fcc021e32c0ee9cee2d8ea6a267
|
68143d94982e05496e9d65e25a5bee81d41451ba
|
/Task 6.2.R
|
064896d488af6ab46eb4ebfd91c0a2e2addc7795
|
[
"MIT"
] |
permissive
|
jedisom/Yelp-projects
|
780669563659fce6a7ce052198a5e756bc5c8bb4
|
774698ae3871157175ddc0a30e967664514118cb
|
refs/heads/master
| 2021-01-21T13:57:12.632306
| 2016-05-20T10:11:20
| 2016-05-20T10:11:20
| 54,270,594
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,931
|
r
|
Task 6.2.R
|
#Coursera Data Mining Capstone - UIUC
#Task 6
#Jed Isom
#October 5, 2015
library("pacman")
pacman::p_load(jsonlite, tm, slam, topicmodels, LiblineaR)
rm(list=ls())
setwd("./Capstone/Raw Data")
#Hygiene.dat file loading
filename <- "./Hygiene/hygiene.dat"
reviews <- readLines(filename)
#Hygiene.dat.labels file loading
filename <- "./Hygiene/hygiene.dat.labels"
labels <- readLines(filename)
labels <- as.numeric(labels)
#Hygiene.dat.additional file loading
filename <- "./Hygiene/hygiene.dat.additional"
X <- read.csv(filename, header=FALSE)
names(X) <- c("categories", "Zip", "review.count", "avg rating")
X[,"categories"] <- as.character(X[,"categories"])
X[,"review.count"] <- as.numeric(X[,"review.count"])
X[,"avg rating"] <- as.numeric(X[,"avg rating"])
#F-score combining review counta and avg rating
#normalize the review counts (log of counts) and average ratings to be values between 0 and 1
log.counts <- log(X[, "review.count"])
norm.counts <- (log.counts-min(log.counts)+1)/(max(log.counts)-min(log.counts)+1) #add 1 to prevent 0's
norm.stars <- (X[,"avg rating"]-min(X[,"avg rating"]))/(max(X[,"avg rating"])-min(X[,"avg rating"]))
beta <- 3.0
X[,"F1"] <- (1+beta^2)*(norm.counts * norm.stars)/((beta^2 * norm.counts) + norm.stars)
#turn first column into lists of categories, not long characters/factors
X[,1] <- gsub("\\[", "", X[,1])
X[,1] <- gsub("\\]", "", X[,1])
X[,1] <- gsub("'", "", X[,1])
#read text reviews into a "corpora" and clean them up for analysis
reviews <- gsub(" ", "", reviews)
reviews <- paste(reviews, " zaybxc") #add dummy term: helps if document is empty after scrubbing.
corp <- Corpus(VectorSource(reviews))
rm(reviews) #delete reviews to save RAM
corp <- tm_map(corp, removeNumbers)
corp <- tm_map(corp, content_transformer(tolower)) #lower case needs to be before stopwords
corp <- tm_map(corp, removeWords, rev.default(stopwords('english'))) #reverse order to get contractions
corp <- tm_map(corp, removeWords, "'s")
corp <- tm_map(corp, removePunctuation) #remove after stopwords because many contractions are stop words
corp <- tm_map(corp, stripWhitespace)
corp <- tm_map(corp, stemDocument)
dtm <- DocumentTermMatrix(corp)
#create label vectors to split the dtm by pass/fail
Labels <- matrix(labels, ncol=1)
Labels1 <- Labels
Labels0 <- Labels
Labels1[is.na(Labels[,1]),1] <- 0
Labels0[is.na(Labels[,1]),1] <- 1
Labels1 <- as.vector((Labels1[,1]==1))
Labels0 <- as.vector((Labels0[,1]==0))
dtm0 <- dtm[Labels0, ]
dtm1 <- dtm[Labels1, ]
#calculate and sort comparative term frequencies for pass/fail reviews
length0 <- sum(as.matrix(rollup(dtm0, 2, na.rm=TRUE, FUN = sum)))
length1 <- sum(as.matrix(rollup(dtm1, 2, na.rm=TRUE, FUN = sum)))
terms0 <- as.data.frame(t(as.matrix(rollup(dtm0, 1, na.rm=TRUE, FUN = sum))))
terms1 <- as.data.frame(t(as.matrix(rollup(dtm1, 1, na.rm=TRUE, FUN = sum))))
terms0 <- terms0/length0
terms1 <- terms1/length1
termsDelta <- as.data.frame(terms0-terms1)
names(termsDelta) <- "Delta"
termsDelta[,"terms"] <- row.names(termsDelta)
termsDelta <- termsDelta[termsDelta[,1]!=0,]
termsDelta[,1] <- abs(termsDelta[,1])
termsDelta <- termsDelta[order(-termsDelta$Delta),]
#rm(corp) #delete corpus to save RAM
#Get document lengths based on terms found
X[,"doc.lengths"] <- as.data.frame(as.matrix(rollup(dtm, 2, na.rm=TRUE, FUN = sum)))
term.list <- termsDelta$terms
#term.list <- c(findFreqTerms(dtm, lowfreq=10), "zaybxc")
dtm <- dtm[ ,term.list]
dtm0 <- dtm0[ , term.list]
dtm1 <- dtm1[ , term.list]
#create topic models to be turned into X variables (0=passed inspection, 1=failed inspection)
#figure out roughly the best # of topics to create based on knee of perplexity curve
#This takes a long time, but I only ran this code once get the number of topics
for (i in 2:20){
topics0 <- LDA(dtm0, i, method = "Gibbs") #, seedwords = suspect.terms)
print(paste(i, ": ", perplexity(topics0, dtm0, estimate_theta=FALSE), sep=""))
}
for (i in 6:18){
topics1 <- LDA(dtm1, i, method = "Gibbs") #, seedwords = suspect.terms)
print(paste(i, ": ", perplexity(topics1, dtm1, estimate_theta=FALSE), sep=""))
}
zero.topic.count <- 11
one.topic.count <- 13
topics0 <- LDA(dtm0, zero.topic.count, method = "Gibbs") #, seedwords = suspect.terms)
topics1 <- LDA(dtm1, one.topic.count, method = "Gibbs") #, seedwords = good.terms)
#Use topic models from p/f restaurants to create posterior probabilities for unknown p/f rest's
num.docs <- dim(X)[1]
topic0names <- paste("pass_topic_",(1:zero.topic.count), sep="")
topic1names <- paste("fail_topic_",(1:one.topic.count), sep="")
X[, topic0names] <- 0
X[, topic1names] <- 0
for (i in 1:num.docs){
temp.dtm <- dtm[i, ]
#print (posterior(topics0, temp.dtm)$topics)
X[i, topic0names] <- posterior(topics0, temp.dtm)$topics
X[i, topic1names] <- posterior(topics1, temp.dtm)$topics
print (paste("row ", i, " of 13299", sep=""))
}
#write to file just in case
write.csv(X,"X.csv")
#turn categories into sparse boolean vectors/columns for analysis in the future
cuisine.list <- as.list(unique(unlist(strsplit(X[,1], split=", "))))
cuisine.list[[match("Restaurants", cuisine.list)]] <- NULL
cuisine.list <- unlist(cuisine.list)
X[,cuisine.list] <- 0 #add empty columns that will be logical if cuisine or not
nrows <- dim(X)[1]
for (i in 1:nrows){
tempList <- strsplit(X[i,1], split=", ")
for (j in tempList){
X[i,j] <- 1
}
}
#figure out which cuisines might be used as predictors for pass/fail
tRows <- 1:546
tLabels <- as.matrix(labels,ncol=1)[tRows,1]
cuisine.fail <- matrix(tLabels, ncol=1)
cuisine.fail <- cbind(cuisine.fail, X[1:546, cuisine.list])
names(cuisine.fail) <- c("pass_fail", cuisine.list)
cuisine.pass <- cuisine.fail[cuisine.fail[,1]==0, ]
cuisine.fail <- cuisine.fail[cuisine.fail[,1]==1, ]
cuisine.diff <- colSums(cuisine.pass[, cuisine.list])-colSums(cuisine.fail[, cuisine.list])
pred.cuisines <- names(cuisine.diff[cuisine.diff!=0])
#Add in variables for term count/doc.length for each document (normalize between 0 and 1)
#X[ ,term.list] <- as.data.frame(as.matrix(dtm)/X[,"doc.lengths"])
#X[ ,term.list] <- (X[ ,term.list]-min(X[ ,term.list]))/
# (max(X[ ,term.list])-min(X[ ,term.list]))
#remove dtm variable to save RAM
#rm(dtm)
###LOGISTIC REGRESSION
#train logistic regression model
Xs <- X[,c("review.count", "avg rating", "F1", topic0names, topic1names, pred.cuisines)]
c <- heuristicC(as.matrix(Xs[tRows, ]))
regConstants <- c(.0000001, .0000003, .000001, .000003, .00001, .00003, .0001, .0003, .001, .003,
.01, .03, .1, .3, 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000)
set.seed(1)
for (i in regConstants){
temp <- LiblineaR(data=as.matrix(Xs[tRows, ]), target=tLabels, cost=i, cross=10)
print(paste(i, cat("\t"), temp))
}
logRegModel <- LiblineaR(data = as.matrix(Xs[tRows, ]), target=tLabels, cost=c)
#get predictions for the rest of the data
test.rows <- 547:dim(X)[1]
logRegPredict <- as.numeric(as.character(unlist(predict(object=logRegModel,
newx=as.matrix(Xs[test.rows, ])))))
#create submission file
submission <- matrix(c("jedi623", logRegPredict),ncol=1)
write(submission, file="logRegSubmission6.txt")
#Add the logistic regression model output to the X parameters used to tune future models
X[,"logReg"] <- as.numeric(as.character(unlist(predict(object=logRegModel, newx=as.matrix(Xs)))))
###sUPPORT VECTOR MACHINE
#train svm model
Xs <- X[,c("review.count", "avg rating", "F1", "doc.lengths", topic0names, topic1names, "logReg")]
c <- heuristicC(as.matrix(Xs[tRows, ]))
regConstants <- c(.0000001, .0000003, .000001, .000003, .00001, .00003, .0001, .0003, .001, .003,
.01, .03, .1, .3, 1, 3, 10, 30, 100, 300)
set.seed(1)
for (i in regConstants){
#setting type =1 changes model from logistic regression (default=0) to SVM (1)
temp <- LiblineaR(data=as.matrix(Xs[tRows, ]), target=tLabels, cost=i, cross=10, type=1)
print(paste(i, cat("\t"), temp))
}
SVMModel <- LiblineaR(data=as.matrix(Xs[tRows, ]), target=tLabels, cost=.003, type=1)
#get predictions for the rest of the data
SVMPredict <- as.numeric(as.character(unlist(predict(object=SVMModel, newx=as.matrix(Xs[test.rows,])))))
#create submission file
submission <- matrix(c("jedi623", SVMPredict),ncol=1)
write(submission, file="SVMSubmission2.txt")
###
#figure out which cuisines might be used as predictors for pass/fail
cuisine.fail <- matrix(tLabels, ncol=1)
cuisine.fail <- cbind(cuisine.fail, X[1:546, cuisine.list])
names(cuisine.fail) <- c("pass_fail", cuisine.list)
cuisine.pass <- cuisine.fail[cuisine.fail[,1]==0, ]
cuisine.fail <- cuisine.fail[cuisine.fail[,1]==1, ]
cuisine.diff <- colSums(cuisine.pass[, cuisine.list])-colSums(cuisine.fail[, cuisine.list])
pred.cuisines <- names(cuisine.diff[cuisine.diff!=0])
|
3143eea0876144e1c91c35e95a04e02a96e99533
|
0b91e58f2a016009abff9cdabd782a278962ab84
|
/MLwithR/Practice/char3_KNN_R.R
|
46f98bcc7317d19f6c8f5803f537c1bba350945d
|
[] |
no_license
|
staAndZhh/R_WorkSpace
|
4ac7bd6da9948ec7ef1265f7913a8a62bfc22c2f
|
ea7b246c9549026530308702bb5083d9ab77932b
|
refs/heads/master
| 2021-06-06T17:26:59.993751
| 2018-11-06T11:35:50
| 2018-11-06T11:35:50
| 94,954,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 448
|
r
|
char3_KNN_R.R
|
# min-max normalize
normalize <- function(x){
return((x-min(x))/(max(x)-min(x)))
}
as.data.frame(lapply(data,normalize))
# z-score
data_z <- scale(data)
# data_train <-
# data_test <-
# data_train_labels <-
# data_test_labels <-
install.packages('class')
install.packages('gmodels')
library(gmodels)
library(class)
data_test_pred <- knn(data_train,data_test,data_train_labels,k=21)
CrossTable(data_test_labels,data_test_pred,prop.chisq = FALSE)
|
b1c8fbc62b654a6ea0395ac82278de4db1fb73c8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dafs/examples/pvalue.Rd.R
|
796ced78354cf70d6fde431aa26fd3bd4e82464b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 159
|
r
|
pvalue.Rd.R
|
library(dafs)
### Name: pvalue
### Title: Formats an P-value figure for LaTeX
### Aliases: pvalue
### Keywords: Book
### ** Examples
p = 0.04
pvalue(p)
|
76e47356a44cdc5dfd1deccde4a919b4dbce963b
|
09fb2a455921488989b6f311a10231cb9f30665b
|
/Aula 4 - 06 Vetores.R
|
92c1d8842278113fa1aca273da689abecc77d862
|
[] |
no_license
|
Henrique-RS/metedosHeuristicos
|
e872ee1a4f204b5d80cc570bf7f54da1d5ec9ba0
|
753ad6eb6cd50e9d76cf8ff5e392f2e54c10a48a
|
refs/heads/master
| 2023-08-11T02:14:11.164517
| 2021-10-01T23:10:18
| 2021-10-01T23:10:18
| 398,546,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 617
|
r
|
Aula 4 - 06 Vetores.R
|
# Vetor de String
vetor_caracter <- c("Unisinos", "Engenharia", "Produção")
vetor_caracter
# Vetor de Floats
vetor_numerico <- c(1.90, 45.3, 300.5)
vetor_numerico
# Vetor de Sequência / Utilizando uma seq()
vetor1 <- seq(1:10)
vetor1
# Utilizando rep()
vetor2 <- rep(1:10)
vetor2
# Indexação de Vetores
a <- rep(1:5)
a
a[1]
a[6]
b <- c("Unisinos", "Engenharia", "Produção")
b
b[1]
b[2]
b[3]
# Combinando Vetores
v1 <- c(2,4,6)
v2 <- c('aa', 'bb', 'cc', 'dd')
v3 <- c(v1, v2)
v3
# Operações com Vetores
x <- c(1,2,5,7)
y <- c(3,6,8,9)
x * 3
x + y
x - y
x * y
x / y
x ^ y
sqrt(x)
|
e16c9ba37d46cbcf19259488eb2f245b5b91d4b3
|
4211e421263bbcc5c59195aba02ada15d1bf75a1
|
/Dependencias/loadDependencies.R
|
30c55d0f214902c9871a7d9a6ef78d8233ab2a84
|
[] |
no_license
|
hallucigeniak/reportesLaboratorios
|
f66e70a781642d7ab75bfebba65990978c4dfa42
|
4a79bab7c1f82e94ed49d3dcc018e4004e3ede14
|
refs/heads/master
| 2020-03-21T08:13:54.861160
| 2019-11-06T17:12:26
| 2019-11-06T17:12:26
| 138,325,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,979
|
r
|
loadDependencies.R
|
####################################
###--- MODULO DE DEPENDENCIAS ---###
####################################
#
#--- Declarar las librerias de R necesarias
dependencias<-c("shiny",
"shinydashboard",
"shinycssloaders",
"shinyjs",
"shinyBS",
"odbc",
"sqldf",
"dplyr",
"tidyr",
"xlsx",
"png",
"ggplot2",
"lubridate",
"scales",
"DT")
#--- Verificar que todas las librerias de R necesarias esten instaladas
if (!all(dependencias %in% installed.packages())){
faltantes<-which(!(dependencias %in% installed.packages()))
faltantes<-dependencias[faltantes]
lapply(faltantes, install.packages, character.only=TRUE)
}
#--- Verficar que las librerías de R necesarias esten cargadas
if(!all(dependencias %in% loadedNamespaces())){
faltantes<-which(!(dependencias %in% loadedNamespaces()))
faltantes<-dependencias[faltantes]
lapply(dependencias, require, character.only=TRUE)
}
#
#--- Cargar dependencias definidas por el usuario
#- Funciones
source("Dependencias/FuncionesGenerarQueries.R")
if (!exists("con")){
source("Dependencias/ConnectToDB.R")
}
source("Dependencias/FuncionesGenerarGraficas.R")
source("Dependencias/FuncionesProcesarTablas.R")
#
#- Datos
tablaTodasProfesiones<-read.delim("Dependencias/Profesiones.tsv", sep="\t") #Tabla1 para mapear profesiones
tablaProfesionesAgrupadas<-read.delim("Dependencias/GruposProfesiones.tsv", sep="\t") #Tabla2 para mapear profesiones
tablaEspecialidades<-read.delim("Dependencias/Especialidades.tsv", sep="\t") #Tabla de especialidades mapeadas
catalogoLabs<-read.delim("Dependencias/MapeoLaboratorios.csv", header = T, sep="\t")
listaPaises<-read.delim("Dependencias/ListaPaises.csv", sep = "\t", header = T) #Lista de paises
listaLogos<-read.delim("Dependencias/Logos.csv", header = T, sep="\t")
|
6d0070a1f6a112cac2b22e4df6299e2b3728e304
|
103cefcd0a90175d953b11b1a13a6c76adb28aef
|
/analyses/bb_analysis/maketables.forsupp/mod_table.R
|
a0056857f9db8d4f6ac2fb8e39bfe7bfa290767e
|
[] |
no_license
|
lizzieinvancouver/ospree
|
8ab1732e1245762194db383cdea79be331bbe310
|
9622af29475e7bfaa1b5f6697dcd86e0153a0a30
|
refs/heads/master
| 2023-08-20T09:09:19.079970
| 2023-08-17T10:33:50
| 2023-08-17T10:33:50
| 44,701,634
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,594
|
r
|
mod_table.R
|
#This file makes a table of model summaries with 95 % credible intervalsfor the ospree budburst supplement
#would be good to add n_studies
#utah units, nonz
load("../../analyses/bb_analysis/stan/output/m2lni_spcompexprampfputah_nonz.Rda") # m2l.ni
fit.nonz <- summary(m2l.ni)$summary
#summary(fit.nonz)# min n_ef: 1104
nonztab<-as.data.frame(round(cbind(fit.nonz[1:9,1],fit.nonz[1:9,5],fit.nonz[1:9,7],fit.nonz[1:9,4],fit.nonz[1:9,8]),digits=2))
#add n_sp
nonztab<-rbind(nonztab,c(length(fit.nonz[grep("a_sp", rownames(fit.nonz)),1])-2,"","","","",""))
rownames(nonztab)[10]<-"n_sp"
#utah units, z
load("../../analyses/bb_analysis/stan/output/m2lni_spcompexprampfputah_z.Rda") # m2l.ni
fit.z <- summary(m2l.ni)$summary
#summary(fit.z)# min n_ef: 1198
ztab<-as.data.frame(round(cbind(fit.z[1:9,1],fit.z[1:9,5],fit.z[1:9,7],fit.z[1:9,4],fit.z[1:9,8]),digits=2))
ztab<-rbind(ztab,c(length(fit.z[grep("a_sp", rownames(fit.z)),1])-2,"","","","",""))
rownames(ztab)[10]<-"n_sp"
#cp units, nonz
load("../../analyses/bb_analysis/stan/output/m2lni_spcompexprampfpcp_nonz.Rda") # m2l.ni
fitcp.nonz <- summary(m2l.ni)$summary
#summary(fitcp.nonz)# min n_ef: 1228
nonzcptab<-as.data.frame(round(cbind(fitcp.nonz[1:9,1],fitcp.nonz[1:9,5],fitcp.nonz[1:9,7],fitcp.nonz[1:9,4],fitcp.nonz[1:9,8]),digits=2))
nonzcptab<-rbind(nonzcptab,c(length(fitcp.nonz[grep("a_sp", rownames(fitcp.nonz)),1])-2,"","","","",""))
rownames(nonzcptab)[10]<-"n_sp"
#cp units, z
load("../../analyses/bb_analysis/stan/output/m2lni_spcompexprampfpcp_z.Rda") # m2l.ni
fitcp.z <- summary(m2l.ni)$summary
#summary(fitcp.z)# min n_ef: 1192
zcptab<-as.data.frame(round(cbind(fitcp.z[1:9,1],fitcp.z[1:9,5],fitcp.z[1:9,7],fitcp.z[1:9,4],fitcp.z[1:9,8]),digits=2))
zcptab<-rbind(zcptab,c(length(fitcp.z[grep("a_sp", rownames(fitcp.z)),1])-2,"","","","",""))
rownames(zcptab)[10]<-"n_sp"
#add model with crops and all species and all treatments included, nonz
load("../../analyses/bb_analysis/stan/output/m2lni_allsppwcrop_utah_nonz.Rda") # m2l.ni
fitallsp.nonz <- summary(m2l.ni)$summary
#summary(fitallsp.nonz)# min n_ef: 441.8
nonzallsptab<-as.data.frame(round(cbind(fitallsp.nonz[1:9,1],fitallsp.nonz[1:9,5],fitallsp.nonz[1:9,7],fitallsp.nonz[1:9,4],fitallsp.nonz[1:9,8]),digits=2))
nonzallsptab<-rbind(nonzallsptab,c(length(fitallsp.nonz[grep("a_sp", rownames(fitallsp.nonz)),1])-2,"","","","",""))
rownames(nonzallsptab)[10]<-"n_sp"
#add model with crops and all species and all treatments included, z
load("../../analyses/bb_analysis/stan/output/m2lni_allsppwcrop_utah_z.Rda") # m2l.ni
fitallsp.z <- summary(m2l.ni)$summary
#summary(fitallsp.z)# min n_ef: 432.1
zallsptab<-as.data.frame(round(cbind(fitallsp.z[1:9,1],fitallsp.z[1:9,5],fitallsp.z[1:9,7],fitallsp.z[1:9,4],fitallsp.z[1:9,8]),digits=2))
zallsptab<-rbind(zallsptab,c(length(fitallsp.z[grep("a_sp", rownames(fitallsp.z)),1])-2,"","","","",""))
rownames(zallsptab)[10]<-"n_sp"
#add column names to all sub tables
# colnames(ztab)<-c("utah.mean","utah.25%", "utah.75%", "utah.2.5%","utah.97.5%")
# colnames(zcptab)<-c("cp.mean","cp.25%", "cp.75%","cp.2.5%","cp.97.5%")
# colnames(zallsptab)<-c("allsp.mean","allsp.25%", "allsp.75%","allsp.2.5%","allsp.97.5%")
# colnames(nonztab)<-c("utah.mean","utah.25%", "utah.75%", "utah.2.5%","utah.97.5%")
# colnames(nonzcptab)<-c("cp.mean","cp.25%", "cp.75%","cp.2.5%","cp.97.5%")
# colnames(nonzallsptab)<-c("allsp.mean","allsp.25%", "allsp.75%","allsp.2.5%","allsp.97.5%")
colnames(ztab)<-colnames(zcptab)<-colnames(zallsptab)<-
colnames(nonztab)<-colnames(nonzcptab)<-colnames(nonzallsptab)<-
c("mean","25%", "75%","2.5%","97.5%")
zmodtable<-cbind(ztab,zcptab,zallsptab)
row.names(zmodtable)<-c(row.names(fitallsp.z)[1:9],"n_sp")
#next step is to get these in:
#row.names(fitallsp.z)[1]<-c("$\\mu_\\alpha$")
nonzmodtable<-cbind(nonztab,nonzcptab,nonzallsptab)
row.names(zmodtable)<-row.names(nonzmodtable)<-c("$\\mu_{\\alpha}$","$\\mu_{forcing}$","$\\mu_{photoperiod}$",
"$\\mu_{chilling}$","$\\sigma_{\\alpha}$", "$\\sigma_{forcing}$"
, "$\\sigma_{photoperiod}$","$\\sigma_{chilling}$","$\\sigma_{y}$","$N_{sp}$")
write.csv(zmodtable,"../../analyses/output/supptables/zmodetable.csv", row.names = FALSE)
write.csv(nonzmodtable,"../../analyses/output/supptables/nonzmodetable.csv", row.names = FALSE)
#zmodtable<-rbind(c("","Utah.units","","","","","Chill.portions","","","","","All.species","","","",""),zmodtable)
#nonzmodtable<-rbind(c("","Utah.units","","","","","Chill.portions","","","","","All.species","","","",""),nonzmodtable)
|
9bffb0002cf49be9dd3d8c65938535c5e868a60e
|
15105f85792b860f76da45e0692d4865fab75402
|
/man/screening-package.Rd
|
50772980242de2fd4557b4b9172c091e05201084
|
[] |
no_license
|
bowerth/screening
|
9ced4bed9502e354b09a126f2871d44d7259a2ae
|
083ab104816dc2cbfee1344737bf1e9f0aa33f67
|
refs/heads/master
| 2020-07-03T12:33:16.526020
| 2014-07-22T23:03:37
| 2014-07-22T23:03:37
| 11,631,729
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
rd
|
screening-package.Rd
|
\docType{package}
\name{screening-package}
\alias{screening-package}
\title{The screening Package}
\description{
Data Screening.
}
\details{
\tabular{ll}{ Package: \tab screening\cr Type: \tab
Package\cr Version: \tab 0.1\cr Date: \tab 2014-03-20\cr
Depends: \tab R (>= 2.10.0)\cr Encoding: \tab UTF-8\cr
License: \tab GPL (>= 3)\cr LazyLoad: \tab yes\cr URL:
\tab http://www.oecd.org/sti/stan\cr }
Provides functions for Structural Analysis
}
\author{
STAN OECD \email{stan.contact@oecd.org}
}
\keyword{package}
|
bf4f9c54ad4838cdc3c6498d842206e74c9e3b5d
|
6c7e1bead59eb9c4635a58f05eac19ece401ed69
|
/cachematrix.R
|
25a45949b022f954bb4ad02ebd4092bf99852e41
|
[] |
no_license
|
sabthami30/ProgrammingAssignment2
|
2da3ac9085f7e7eba49c670294a65510fec116c8
|
5217867af077f4313c8dc6a00eda50d9bc06cc6f
|
refs/heads/master
| 2021-04-30T17:01:45.866136
| 2017-01-27T10:51:42
| 2017-01-27T10:51:42
| 80,171,528
| 0
| 0
| null | 2017-01-27T01:10:54
| 2017-01-27T01:10:54
| null |
UTF-8
|
R
| false
| false
| 1,214
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL ## set the initial value of inverse as NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(solve) ## the solve function is assigned to setinverse
inverse <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## if the inverse is not equal to Null return a message getting matrix in cache
cacheSolve <- function(x, ...) {
##inverse the matrix which is already present in makeCacheMatrix function.
inverse <-x$getinverse()
if(!is.null(inverse)) {
message("getting Matrix in cache")
return inv)
}
mat<- x$get()
inverse<-solve(mat,...)
x$setinverse(inverse)
inverse ##print the value of inverse
## Return a matrix that is the inverse of 'x'
}
|
268a99a87a79f5d90cf76f435a44c2b7d9348772
|
3b2242a9eaa35ddbb757300fa1f54c49ce4c5dab
|
/ui.R
|
23669cabb7f9b3517127cba7d00c0f77a2e6c0f7
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
wikimedia-research/WhereInTheWorldIsWikipedia
|
0a5915f923ffde99b649801182b83e53f7381bd9
|
53fd54374138acf1b6cebde525e3dcf7601a94a5
|
refs/heads/master
| 2020-06-04T18:28:43.589272
| 2015-03-05T14:29:37
| 2015-03-05T14:29:37
| 31,289,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,408
|
r
|
ui.R
|
library(shiny)
library(shinythemes)
country_data <- read.delim("./data/language_pageviews_per_country.tsv", as.is = TRUE, header = TRUE)
shinyUI(fluidPage(
titlePanel(title = "", windowTitle = "Where in the world is Wikipedia?"),
h2("Where in the world is Wikipedia?"),
h4("Explore how traffic to Wikimedia projects is distributed around the globe."),
sidebarLayout(
sidebarPanel(
selectInput("project",
"Project:",
choices = unique(country_data$project),
selected = "en.wikipedia.org"),
downloadButton("downloadCountrySubset", "Download this subset"),
h2("About this data"),
p("This dataset contains the proportion of traffic to each public Wikimedia project, from each known country, with some caveats."),
h3("Details"),
p("Wikimedia properties receive 125,000 requests every second,
for myriad projects and from myriad countries. Too little of it is
made available to third-party researchers, due to an understandable and
laudable desire to avoid compromising the privacy of our users.
So instead, we analyse it ourselves."),
p("Part of the analysis we perform is high-level geolocation:
investigating the idea that where our traffic comes from has
implications for systemic bias and reach. This is /also/ work that third-parties
do really well. We've decided to release a high-level dataset of
geodata, to assist these researchers in their work. This tool
represents a simple attempt to visualise it and make it explorable."),
h3("Data preparation"),
HTML("<p>This dataset represents an aggregate of 1:1000 sampled pageviews from the entirety of 2014. The pageviews definition applied
was the Foundation's
<a href = 'https://github.com/wikimedia/analytics-refinery-source/blob/master/refinery-core/src/main/java/org/wikimedia/analytics/refinery/core/Pageview.java'>
new pageviews definition</a>; additionally, spiders and similar automata were filtered out with Tobie's <a href = 'http://www.uaparser.org/'>ua-parser</a>.
Geolocation was then performed using MaxMind's <a href = 'http://dev.maxmind.com/geoip/'> geolocation products</a>.</p>"),
p("There are no privacy implications that we could identify; The data comes from 1:1000 sampled logs, is proportionate rather than raw, and aggregates any nations with <1% of a project's pageviews
under 'Other'."),
h3("Reusing this data"),
HTML("The data is released into the public domain under the
<a href = 'https://creativecommons.org/publicdomain/zero/1.0/'>CC-0 public domain dedication</a>, and can be freely reused
by all and sundry. Iff you decide you want to credit it to people, though, the appropriate citation is:
<br/><br/>
<blockquote>Keyes, Oliver (2015) <em><a href = 'http://dx.doi.org/10.6084/m9.figshare.1317408'>Geographic Distribution of Wikimedia Traffic</a></em></blockquote>
"),
downloadButton("downloadAll", "Download all data")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("country_distribution"),
br(),br(),br(),br(),br(),br(),br(),br(),br(),
h2(textOutput("project_output")),
dataTableOutput("table")
)
), theme = shinytheme("cosmo")
))
|
ff8630b1c2eaa6fa0ff3a20851cdc8417602378d
|
f60a70640a026dfed8d19651ffe866cef933fce2
|
/scripts/Clustering/mclust_example.R
|
06a1177d145b7faafb108c9e736c7f80ca8668af
|
[] |
no_license
|
cdbarko/R
|
c34aca130615e33b363a093eadac485f95fea308
|
d79613a491148209316562e02c538d49b63a5a59
|
refs/heads/master
| 2021-05-09T20:23:30.989657
| 2018-01-24T07:04:06
| 2018-01-24T07:04:06
| 118,687,688
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,036
|
r
|
mclust_example.R
|
library(mclust)
# create some data for example purposes -- you have your read.csv(...) instead.
myData <- data.frame(x=runif(100),y=runif(100),z=runif(100))
# get parameters for most optimal model
myMclust <- Mclust(myData)
# if you wanted to do your summary like before:
mySummary <- summary( myMclust$BIC, data=myData )
# add a column in myData CLUST with the cluster.
myData$CLUST <- myMclust$classification
myData$PROB <- myMclust$z
# now to write it out:
write.csv(myData[,c("CLUST","PROB","x","y","z")], # reorder columns to put CLUST first
file="out.csv", # output filename
row.names=FALSE, # don't save the row numbers
quote=FALSE) # don't surround column names in ""
# Example data
set.seed(101)
data = c(rnorm(100, mean = 10), rnorm(30, mean = 20), rnorm(50, mean = 15))
hist(data)
# Run Mclust
mixmdl = Mclust(data)
summary(mixmdl)
# Show means of fitted gaussians
print( mixmdl$parameters$mean )
|
9230e252720cb6ef2eb4e69f0219a4cb172bac1f
|
5a51f0ce45b7f2f8f221d71f467950fb94645913
|
/R_analysis/asis06_excess_deaths.R
|
6ea2bd97395ef9aac3845d90a026c0f076b1c837
|
[] |
no_license
|
The-Strategy-Unit/708_effect_of_covid_on_pop_health
|
d2818815c2c2f8028bba5c30ce74e555cda2c68d
|
dc0084507e205a8403061d2dd552f2d2713e5ad2
|
refs/heads/master
| 2023-07-12T12:31:32.225609
| 2021-07-09T13:27:52
| 2021-07-09T13:27:52
| 375,490,786
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,834
|
r
|
asis06_excess_deaths.R
|
# Estimate excess deaths from Covid
# What effect will Covid-19 have on key PH measures? (708)
# author Paul Seamer
# last edit "2021-03-04 09:45:40 GMT"
Sys.time()
# README ----------------------------------------------------------------------
# under-1s omitted from all models because of missing weekly exposure for years after most recent mye
# include weeks with public holidays in models
# in this script --------------------------------------------------------------
# 1 read
# 2 simple counterfactuals
# 3 build models
# 4 model predictions
# 5 summarise
# 6 save
# 1 read ----------------------------------------------------------------------
wkDthsAgeGrp <- readRDS(str_c(.datDir, "dat05_age_grp_weekly_deaths_2010_to_2021.RDS"))
popExpWk <- readRDS(str_c(.datDir, "asis03_weekly_exposure_2003_to_2021.RDS"))
# 2 simple counterfactuals ----------------------------------------------------
# (1) deaths from 2019 - last normal year before pandemic
expDths2019 <- expand_grid(
# there is a week 53 in 2020
isoYr = c(2020:2021), isoWk = c(1:53)
) %>%
# but no week 53 in 2021
filter(!(isoYr == 2021 & isoWk == 53)) %>%
left_join(
wkDthsAgeGrp %>%
filter(isoYr == 2019, ageGrp != "under1") %>%
select(isoWk, gender:dths)
, by = "isoWk"
) %>%
filter(!(isoYr == 2020 & isoWk == 53)) %>%
# use week 1 2020 as comparator for week 53 2020 (as per CMI)
bind_rows(
wkDthsAgeGrp %>%
filter(isoYr == 2020, isoWk == 1, ageGrp != "under1") %>%
select(isoYr:isoWk, gender:dths) %>%
mutate(isoWk = 53)
) %>%
mutate(isoYrWk = str_c(isoYr, "-W", formatC(isoWk, width = 2, format = "d", flag = "0"))) %>%
# rm future weeks
filter(!(isoYr == 2021 & isoWk > recentWk))
# (2) average of deaths over previous 5 years
expDthsMn5 <- wkDthsAgeGrp %>%
# there is a week 53 in 2015
filter(isoYr %in% c(2015:2019), ageGrp != "under1") %>%
group_by(isoWk, isoYr, isoYrWk) %>%
summarise(dths = sum(dths)) %>%
ungroup() %>%
select(isoWk, dths) %>%
group_by(isoWk) %>%
summarise(dths = mean(dths)) %>%
ungroup() %>%
left_join(
expand_grid(
# there is a week 53 in 2020
isoYr = c(2020:2021), isoWk = c(1:53)
) %>%
# but no week 53 in 2021
filter(!(isoYr == 2021 & isoWk == 53))
, by = "isoWk"
) %>%
mutate(isoYrWk = str_c(isoYr, "-W", formatC(isoWk, width = 2, format = "d", flag = "0"))) %>%
# rm future weeks
filter(!(isoYr == 2021 & isoWk > recentWk))
# (3) average mortality by week, gender, and age group multiplied by exposure (like standardisation)
# see Arburo paper
avgMx <- wkDthsAgeGrp %>%
filter(isoYr %in% c(2015:2019), ageGrp != "under1") %>%
left_join(
popExpWk %>%
left_join(age_group_lookup, by = "ageGrp") %>%
group_by(isoYr, isoYrWk, isoWk, gender, onsAgeGrp) %>%
summarise(exp = sum(exp)) %>%
ungroup()
, by = c("isoYr", "gender", "ageGrp" = "onsAgeGrp", "isoWk", "isoYrWk")
) %>%
mutate(exp = exp/(365/7)) %>%
group_by(isoWk, gender, ageGrp) %>%
mutate(a = dths / exp) %>%
summarise(avgMx = mean(dths / exp)) %>%
ungroup()
expDthsAvgMx <- wkDthsAgeGrp %>%
# retain 2020+ weeks
filter(isoYr >= 2020, ageGrp != "under1") %>%
left_join(
popExpWk %>%
left_join(age_group_lookup, by = "ageGrp") %>%
group_by(isoYr, isoYrWk, isoWk, gender, onsAgeGrp) %>%
summarise(exp = sum(exp)) %>%
ungroup()
, by = c("isoYr", "gender", "ageGrp" = "onsAgeGrp", "isoWk", "isoYrWk")
) %>%
mutate(exp = exp/(365/7)) %>%
left_join(
avgMx, by = c("isoWk", "gender", "ageGrp")
) %>%
mutate(expDths = avgMx * exp) %>%
group_by(isoYr, isoWk, isoYrWk) %>%
summarise(expDths = sum(expDths))
# 3 build models -------------------------------------------------------------
# setup model df
expDthsModelDf <- wkDthsAgeGrp %>%
# no weekly exposure for under-1s from 2019 onward
filter(isoYr %in% c(2015:2021), ageGrp != "under1") %>%
rename(onsAgeGrp = ageGrp) %>%
left_join(
popExpWk %>%
left_join(age_group_lookup, by = "ageGrp") %>%
group_by(isoYr, isoYrWk, isoWk, gender, onsAgeGrp) %>%
summarise(exp = sum(exp)) %>%
ungroup()
, by = c("isoYr", "gender", "onsAgeGrp", "isoWk", "isoYrWk")
) %>%
mutate(time = group_indices(., isoYrWk)) %>%
mutate(phWk = case_when(isoWk %in% c(1, 52, 53) ~ 1, TRUE ~ 0)) %>%
# would require updating if under-1s not filtered out at top of chain
mutate(onsAgeGrpN = as.integer(substr(onsAgeGrp, 1, 2))) %>%
mutate(across(c(gender, onsAgeGrp), as.factor)) %>%
mutate(train = case_when(isoYr>= 2020 ~ 0L, TRUE ~ 1L))
# (3a) GAM (negBin)
# GAM model with smooth effects for long term trend, age, & seasonality,
# & interaction between age and seasonality (smooth effects stratified by gender)
gamNb <- gam(
dths ~
# public holidays
phWk
# log-linear long-term trend
+ time * gender * onsAgeGrp
# penalized spline for age effect
+ s(onsAgeGrpN, bs = "ps", k = 6, by = gender)
# penalized cyclic spline for seasonality
+ s(isoWk, bs = "cp", k = 8, by = gender)
# smooth interaction between age and seasonality
+ ti(isoWk, onsAgeGrpN, bs = c("cp", "ps"), k = c(8, 6), by = gender)
# population exposure
+ offset(log(exp))
, family = nb(link = "log"), weights = exp, method = "REML"
, data = expDthsModelDf %>% filter(train == 1))
# (3b) GAM (Poisson)
# same as (3a) but Poisson not negative binomial
gamPoi <- gam(
dths ~
# public holidays
phWk
# log-linear long-term trend
+ time * gender * onsAgeGrp
# penalized spline for age effect
+ s(onsAgeGrpN, bs = "ps", k = 6, by = gender)
# penalized cyclic spline for seasonality
+ s(isoWk, bs = "cp", k = 8, by = gender)
# smooth interaction between age and seasonality
+ ti(isoWk, onsAgeGrpN, bs = c("cp", "ps"), k = c(8, 6), by = gender)
# population exposure
+ offset(log(exp))
, family = poisson(link = "log"), weights = exp, method = "REML"
, data = expDthsModelDf %>% filter(train == 1))
# (3c) FluMOMO model
# GLM with ...
# see Arburto paper and scripts (OxfordDemSci-Excess-Deaths-d848e43.zip)
glmFm <- glm(
dths ~
# public holidays
phWk
# log linear long-term trend
+ time * gender * onsAgeGrp
# seasonality by age and sex
# full year period
+ sin(2 * pi * isoWk / (365.25/7)) * gender * onsAgeGrp
+ cos(2 * pi * isoWk / (365.25/7)) * gender * onsAgeGrp
# half year period
+ sin(2 * pi * isoWk / (365.25/2/7)) * gender * onsAgeGrp
+ cos(2 * pi * isoWk / (365.25/2/7)) * gender * onsAgeGrp
# population exposure
+ offset(log(exp))
, family = poisson(link = "log"), weights = NULL, method = "glm.fit"
, data = expDthsModelDf %>% filter(train == 1))
# 4 model predictions ---------------------------------------------------------
# compare mean absolute error (MAE)
mean(abs(residuals(gamNb, type = "response")))
mean(abs(residuals(gamPoi, type = "response")))
mean(abs(residuals(glmFm, type = "response")))
# compare root mean squared error (RMSE)
sqrt(mean(residuals(gamNb, type = "response")^2))
sqrt(mean(residuals(gamPoi, type = "response")^2))
sqrt(mean(residuals(glmFm, type = "response")^2))
# gam Poisson wins!
# gam negBin model
gamNbFit <- predict.gam(gamNb, type = "response", newdata = expDthsModelDf)
# gam Poisson model
gamPoiFit <- predict.gam(gamPoi, type = "response", newdata = expDthsModelDf)
# FluMOMO model
glmFmFit <- predict.glm(glmFm, type = "response", newdata = expDthsModelDf)
# model results
expDthsModelRes <- expDthsModelDf %>%
select(-onsAgeGrpN) %>%
mutate(gamNbFit = gamNbFit, gamPoiFit = gamPoiFit, glmFmFit = glmFmFit)
# plot model predictions v. actual
ggplot(expDthsModelRes) +
geom_line(aes(x = time, y = dths, group = onsAgeGrp), color = "grey") +
geom_line(aes(x = time, y = gamNbFit, group = onsAgeGrp), color = "orange") +
geom_line(aes(x = time, y = gamPoiFit, group = onsAgeGrp), color = "red") +
geom_line(aes(x = time, y = glmFmFit, group = onsAgeGrp), color = "blue") +
facet_wrap(vars(gender)) +
scale_x_continuous(breaks = c(2010:2021))
# models weekly predictions
expDthsModelResTot <- expDthsModelRes %>%
group_by(isoYr, isoWk, isoYrWk) %>%
summarise(across(c(dths, gamNbFit, gamPoiFit, glmFmFit), sum)) %>%
ungroup()
# simple counterfactuals weekly predictions
simpleCfResTot <- expDthsMn5 %>%
rename(mn5 = dths) %>%
left_join(
expDths2019 %>%
group_by(isoWk, isoYr, isoYrWk) %>%
summarise(dths = sum(dths)) %>%
ungroup() %>%
rename(dths19 = dths)
, by = c("isoWk", "isoYr", "isoYrWk")) %>%
left_join(
expDthsAvgMx %>%
rename(avgMx = expDths)
, by = c("isoWk", "isoYr", "isoYrWk")) %>%
left_join(
wkDthsAgeGrp %>%
group_by(isoYr, isoYrWk, isoWk) %>%
summarise(dths = sum(dths))
, by = c("isoWk", "isoYr", "isoYrWk"))
# xs deaths v 2019 by age group
xsDthsv2019AgeGrp <- expDths2019 %>%
rename(dths19 = dths) %>%
left_join(
wkDthsAgeGrp, by = c("isoWk", "isoYr", "isoYrWk", "gender", "ageGrp")) %>%
mutate(xsDths = dths - dths19)
ggplot(expDthsModelResTot) +
geom_line(aes(x = isoYrWk, y = dths, group = 1), color = "grey") +
geom_line(aes(x = isoYrWk, y = gamNbFit, group = 1), color = "orange") +
geom_line(aes(x = isoYrWk, y = gamPoiFit, group = 1), color = "red") +
geom_line(aes(x = isoYrWk, y = glmFmFit, group = 1), color = "blue") +
scale_x_discrete(breaks = c(
"2010-W01", "2011-W01", "2012-W01", "2013-W01", "2014-W01", "2015-W01"
, "2016-W01", "2017-W01", "2018-W01", "2019-W01", "2020-W01", "2021-W01"))
ggplot(simpleCfResTot) +
geom_line(aes(x = isoYrWk, y = dths, group = 1), color = "grey") +
geom_line(aes(x = isoYrWk, y = mn5, group = 1), color = "green") +
geom_line(aes(x = isoYrWk, y = dths19, group = 1), color = "black") +
geom_line(aes(x = isoYrWk, y = avgMx, group = 1), color = "gold") +
scale_x_discrete(breaks = c(
"2010-W01", "2011-W01", "2012-W01", "2013-W01", "2014-W01", "2015-W01"
, "2016-W01", "2017-W01", "2018-W01", "2019-W01", "2020-W01", "2021-W01"))
# 5 summarise -----------------------------------------------------------------
a <-
expDthsModelResTot %>%
left_join(
waves, by = c("isoYr", "isoWk")
) %>%
filter(wave != "preCovid") %>%
group_by(wave) %>%
summarise(across(c(dths, gamNbFit, gamPoiFit, glmFmFit), sum)) %>%
mutate(across(c(gamNbFit , gamPoiFit, glmFmFit), ~ dths - .x)) %>%
pivot_longer(dths:glmFmFit, names_to = "model", values_to = "dths")
b <-
simpleCfResTot %>%
select(-avgMx) %>%
left_join(
waves, by = c("isoYr", "isoWk")
) %>%
filter(wave != "pre-Covid") %>%
group_by(wave) %>%
summarise(across(c(dths, mn5, dths19), sum)) %>%
mutate(across(c(mn5, dths19), ~ dths - .x)) %>%
pivot_longer(dths:dths19, names_to = "model", values_to = "dths") %>%
filter(model != "dths")
xsDthsSumTb <- a %>% bind_rows(b) %>%
pivot_wider(wave, names_from = model, values_from = dths) %>%
bind_rows(
summarise(.
, across(where(is.numeric), sum)
, across(where(is.character), ~"Total"))) %>%
arrange(factor(wave, levels = c("First wave", "Summer lull", "Second wave", "pre-Covid")))
# 6 save ----------------------------------------------------------------------
saveRDS(xsDthsSumTb, str_c(.datDir, "asis06_xs_deaths_summary_table.RDS"))
saveRDS(expDthsModelResTot, str_c(.datDir, "asis06_xs_deaths_model_results.RDS"))
saveRDS(expDthsModelRes, str_c(.datDir, "asis06_xs_deaths_model_age_grp_results.RDS"))
saveRDS(simpleCfResTot, str_c(.datDir, "asis06_xs_deaths_simple_counterfactuals.RDS"))
saveRDS(xsDthsv2019AgeGrp, str_c(.datDir, "asis06_xs_deaths_v2019_age_grp.RDS"))
|
2f7b65aafb736b75fd6c0345008ea208b2d9d071
|
dd1eaf4c1f9319bfae0b5ad5a87ac854438399d4
|
/cost_driven_validation.R
|
dde87823ed70a94f5d69f0a50c8254e4c8221efe
|
[] |
no_license
|
siddharthabingi/Credit_card_fraud_detection
|
3207d2d6866d1a6c4745cf16f00dd39db84272c1
|
238824d00f55f86584b6726a1eef9143098ddc1c
|
refs/heads/master
| 2020-08-24T15:26:25.778615
| 2019-10-22T16:07:00
| 2019-10-22T16:07:00
| 216,854,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,502
|
r
|
cost_driven_validation.R
|
cred <- credit.card1
cred <- credit.card1[!(credit.card1$Amount==0),]
nrow(cred[(cred$Amount>2825),])
cred <- (cred[!(cred$Amount>2825),])
set.seed(2222)
index <- createDataPartition(y = cred$Class, p = 0.70,list = F) #Using caret package
train.credit <- cred[index,]
test.credit <- cred[-index,]
prop.table(table(train.credit$Class))*100 #Train Data Proportion is same as original data
prop.table(table(test.credit$Class))*100 #Test Data Proportion is same as original data
smote.data <- SMOTE(form = Class~. , data = train.credit, perc.over = 200, k = 5,
perc.under = 200)
table(train.credit$Class)
prop.table(table(smote.data$Class))
table(smote.data$Class)
plot(smote.data$Amount[smote.data$Class == 1],
xlab = 'Observations', ylab = 'Amount',
main = "Plot of Feature 'Amount' for fraud transactions")
plot(smote.data$Amount[smote.data$Class == 0],
xlab = 'Observations', ylab = 'Amount',
main = "Plot of Feature 'Amount' for non-fraud transactions")
mean(smote.data$Amount[smote.data$Class == 1])
mean(smote.data$Amount[smote.data$Class == 0])
library(rpart)
fit_samp <- rpart(formula = Class~.,data = smote.data,method = 'class')
pred_test <- predict(object = fit_samp, newdata = test.credit[,-1],type = 'class')
pred_train <- predict(object = fit_samp, newdata = train.credit[,-1], type = 'class')
confusionMatrix(data = pred_train,reference = train.credit$Class, positive = '0')
confusionMatrix(data = pred_test,reference = test.credit$Class, positive = '0')
library(rpart.plot)
rpart.plot(fit)
#####################################################
fit2 <- rpart(formula = Class~.,data = smote.data,method = 'class',
parms = list(split = 'information'))
pred2 <- predict(object = fit2, newdata = test.credit[,-1],type = 'class')
library(pROC)
roc(test.credit$Class,fit2)
confusionMatrix(data = pred2,reference = test.credit$Class, positive = '1')
library(rpart.plot)
rpart.plot(fit)
##############################################################
cost.driven.model <- rpart(Class~.,data = smote.data,parms=list(loss=matrix(c(0,1,5,0),
byrow=TRUE,
nrow=2)))
pred_cost_test <- predict(object = cost.driven.model, newdata = test.credit[,-1],type = 'class')
pred_cost_train <- predict(object = cost.driven.model, newdata = train.credit[,-1],type = 'class')
confusionMatrix(data = pred_cost_test,reference = test.credit$Class, positive = '0')
confusionMatrix(data = pred_cost_train,reference = train.credit$Class, positive = '0')
roc.curve(response = test.credit$Class,predicted = pred_cost_test)
#ROC Cruves - Test Data
roc.curve(response = test.credit$Class, predicted = pred_test,
col = 'blue', lwd = 2, main = 'ROC Curve : Test Data')
roc.curve(response = test.credit$Class, predicted = pred_cost_test,
col = 'red', lwd = 2, add.roc = T)
legend("bottomright", c("SMOTE", "COST Sensitive"),
col=c('blue','red'),lwd=2)
#Train Data
roc.curve(response = train.credit$Class, predicted = pred_train,
col = 'blue', lwd = 2, main = 'ROC Curve : Train Data')
roc.curve(response = train.credit$Class, predicted = pred_cost_train,
col = 'red', lwd = 2, add.roc = T)
legend("bottomright", c("SMOTE", "COST Sensitive"),
col=c('blue','red'),lwd=2)
|
921c2bc3fefe544dc7e1b37ea07a43f2e1a9eb5b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/googleVis/examples/Stock.Rd.R
|
b59181bae0fe49310fdd33b522f0791619f89daa
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,189
|
r
|
Stock.Rd.R
|
library(googleVis)
### Name: Stock
### Title: Stock: googleVis example data set
### Aliases: Stock
### Keywords: datasets
### ** Examples
## Create data as used by Google in their annotated time line example
Date <- as.Date(paste("2008-1-", 1:6, sep=""))
Pencils <- c(3000, 14045, 5502, 75284, 41476, 333222)
Pencils.titles <-c(rep(NA,4), 'Bought pencils', NA)
Pencils.annotation <-c(rep(NA,4), 'Bought 200k pencils', NA)
Pens <- c(40645, 20374, 50766, 14334, 66467, 39463)
Pens.titles <- c(rep(NA, 3), 'Out of stock', NA, NA)
Pens.annotation <- c(rep(NA, 3), 'Ran of out stock of pens at 4pm', NA, NA)
original.df=data.frame(Date, Pencils, Pencils.titles,
Pencils.annotation, Pens, Pens.titles,
Pens.annotation)
Stock <- reshape(original.df, idvar="Date", times=c("Pencils", "Pens"),
timevar="Device",
varying=list(c("Pencils", "Pens"),
c("Pencils.titles", "Pens.titles"),
c("Pencils.annotation", "Pens.annotation")),
v.names=c("Value", "Title", "Annotation"),
direction="long")
|
a02bcf5b68335278182b10c8b3951284adfd9869
|
2404d8c85199a306a7d107e0cf7b50623f5381ba
|
/HierarchialClusteringVisualization.R
|
9d1eed5c51418f9aa622560df83237ed89ea37e1
|
[] |
no_license
|
Santosh-Sah/Hierarchial_Clustering_R
|
187c48bd1375eb389831e167d31d01034f42b87a
|
b3c61ab8adda4aefa6ee5bca96422239a7e4233d
|
refs/heads/master
| 2022-05-28T20:16:59.719415
| 2020-05-02T04:03:21
| 2020-05-02T04:03:21
| 260,609,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,775
|
r
|
HierarchialClusteringVisualization.R
|
source("HierarchialClusteringUtils.R")
library(ggplot2)
library(cluster)
#reading dataset
hierarchialClusteringDataset = importHierarchialClusteringDataset("Hierarchial_Clustering_Mall_Customers.csv")
#reading HierarchialClustering model
hierarchialClusteringModel = readRDS("HierarchialClusteringModel.RDS")
#Using the dendogram method to find the optimal number of clusters
hierarchialClusteringDendogramMethod <- function(hierarchialClusteringDataset){
hierarchialClusteringDendogram = hclust(d = dist(hierarchialClusteringDataset, method = 'euclidean'), method = 'ward.D')
png("HierarchialClusteringDendogram.png")
plot(hierarchialClusteringDendogram,
main = paste("Dendogram"),
xlab = "Customers",
ylab = "Euclidean distances")
dev.off()
}
#visualizing cluster
#if your dataset has multiple dimension then please do not run this plot. This plot is only for two dimensional data only.
#if your dataset has multiple dimension and still you want to draw this plot then we need to create two dimension with the help of PCA.
hierarchialClusteringClusters <- function(hierarchialClusteringDataset, hierarchialClusteringModel){
y_hc = cutree(hierarchialClusteringModel, 5)
png("HierarchialClusteringClusters.png")
clusplot(hierarchialClusteringDataset,
y_hc,
lines = 0,
shade = TRUE,
color = TRUE,
labels= 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of customers'),
xlab = 'Annual Income',
ylab = 'Spending Score')
dev.off()
}
#hierarchialClusteringDendogramMethod(hierarchialClusteringDataset)
hierarchialClusteringClusters(hierarchialClusteringDataset, hierarchialClusteringModel)
|
74b6a0f84f337dfdb9be705b57c419014968f3ae
|
acb7f21a7d2ff171e4cf0f550aca30607a8f9866
|
/pattern matching/AgnosticPatternMatching.R
|
a913b8337ae5d6748ae578ed6beddba14e114a3a
|
[] |
no_license
|
ukparliament/data-analysis
|
3e22a01d6e7b931f5bda7bd65bf25080ad829218
|
5f98a1fcca242309ac85d220c1361f5274800726
|
refs/heads/master
| 2020-03-25T17:44:51.790639
| 2019-02-20T16:49:09
| 2019-02-20T16:49:09
| 143,993,401
| 0
| 8
| null | 2018-12-06T12:42:14
| 2018-08-08T09:44:59
|
R
|
UTF-8
|
R
| false
| false
| 491
|
r
|
AgnosticPatternMatching.R
|
# with a set of patterns and names in a data frame names rules, and a vector of strings called strings,
# this code will produce a data frame with the regex and name and all the strings it matches with for
# every match between the rules and the strings.
library(plyr)
library(stringr)
rules <- rules
strings = df2$uri
rules$row = row.names(rules)
m<- merge(strings, rules, stringsAsFactors=F)
m[]<-lapply(m,as.character)
m['match']<-str_detect(m$x, m$rule)
m <- m[m$match == TRUE,]
|
9d07b0b71e793a8372766da7f14a85b693b95ca7
|
c58ec63a4bffeaa40546d7aca9cebdb15446430a
|
/dependency-decision-tree.R
|
4e0dd7dff8aa812f60e03deaa519d5eecc8a8986
|
[] |
no_license
|
sudhir-voleti/decision-tree
|
ba4f88c1bae085a45a70fd513d5b9ba4345bd0cb
|
a89a536eaa6dd7d4f86dabfc27c15d583d09542b
|
refs/heads/master
| 2023-01-20T22:44:28.833784
| 2023-01-19T09:28:58
| 2023-01-19T09:28:58
| 102,189,332
| 0
| 2
| null | 2023-01-19T09:29:03
| 2017-09-02T10:32:52
|
R
|
UTF-8
|
R
| false
| false
| 914
|
r
|
dependency-decision-tree.R
|
suppressPackageStartupMessages({
if (!require('shiny')){install.packages('shiny')}; library(shiny)
if (!require('pastecs')){install.packages('pastecs')}; library(pastecs)
if (!require('rpart')){install.packages('rpart')}; library(rpart)
if (!require('dplyr')){install.packages('dplyr')}; library(dplyr)
if (!require('Hmisc')){install.packages('Hmisc')}; library(Hmisc)
if (!require('randomForest')){install.packages('randomForest')}; library(randomForest)
if (!require('hydroGOF')){install.packages('hydroGOF')}; library(hydroGOF)
if (!require('sparkline')){install.packages('sparkline')}; library(sparkline)
if (!require('partykit')){install.packages('partykit')}; library(partykit)
if (!require('visNetwork')){install.packages('visNetwork')};library(visNetwork)
if (!require('DT')){install.packages('DT')};library(DT)
if (!require('tidyr')){install.packages('tidyr')};library(tidyr)
})
|
861083b260333ee7b1e48e8f94cb4ad7bdb408ee
|
0de998cbb0b0e4149c00d4f849933cdead74adaf
|
/man/addGridInfo.Rd
|
1149f7dfb5e4957db475bd981d83a1cd46e00f10
|
[] |
no_license
|
teokaram/Lithics3D
|
a7e696443c2db1fa2b604e01dc9f7d128d99ba32
|
0274a2306c3863e56e37477fc08d634cb115009e
|
refs/heads/master
| 2020-03-27T23:17:24.189443
| 2018-09-03T18:41:50
| 2018-09-03T18:41:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,227
|
rd
|
addGridInfo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mesh_addGridInfo.R
\name{addGridInfo}
\alias{addGridInfo}
\title{Group coordinates into a 2D grid layout}
\usage{
addGridInfo(coords, c.res, axes = c(1, 2))
}
\arguments{
\item{coords}{a data frame object with coordinates (one per row)}
\item{c.res}{size of the requested grid intervals in mesh units (e.g. if
mesh units are mm, then a value of 0.2 would indicate a gid resolution of
0.2mm).}
\item{axes}{a vector containing 2 indices or names for the input columns to
be used (e.g. c("x","y"), or c(1,2)). Longet vectors will be truncated to
the first two entries (e.g. c("x","y","z") == c("x","y"))}
}
\value{
A list containing the altered input coordinate data frame object and
information on the grid. The output df contains two additional columns:
GDIM1 and GDIM2.
}
\description{
Computes a grid based on a) the maximum distance between
the input coordinates along either of the two requested axes, and b) the
requested size of the grid intervals. Each coordinate is then assigned to a
grid interval along the two requested axes (e.g. x and y) - in essence, to a
grid cell.
}
\note{
1. The grid origin is set to 0,0 along the requested axes. Note that the
GDIM1 and GDIM2 columns refer to x,y locations on a 2d map of the
coordinates, projected according to the specified columns.
2. You should ensure the requested grid resolution makes sense given the
resolution of the mesh (i.e. how many vertices are expected per given
interval)
3. This function is generic - it will work on any data.frame.
4. NA values will result with some bad input (e.g. number of requested
breaks)
}
\examples{
data(demoFlake1)
alignedMesh<-alignMesh.PCA(demoFlake1$mesh)
vertexCoords<-data.frame(t(alignedMesh$vb))
gridded<-Lithics3D:::addGridInfo(vertexCoords, 0.2, axes=c(2,3)) # or c("y","z")
mfval<-par("mfcol")
par(mfcol=c(2,1))
# Plot raw data:
plot(t(alignedMesh$vb)[,2:3], pch=".", asp=1, xlab="y", ylab="z")
# Plot grid: origin points of each grid cell.
plot(gridded$coords[,5:6], pch=".", asp=1, xlab="y", ylab="z")
par(mfcol=mfval) # reset graphic parameters
}
\author{
Cornel M. Pop
}
|
961c276331cbcb72021134d5a0b8848bbba3e4d8
|
b08ce6dc07ef1f6484e395ff32a29a4455deef8d
|
/R/CairoBasic.R
|
2d1d3ddf9c224a1031a21fa41f0625719313c21d
|
[
"MIT"
] |
permissive
|
coolbutuseless/cairobasic
|
fce0bb6b4b8d78c468c19603ad8a19b41e53a543
|
b2012016af3f23c68e2c7196c4e3c26bf3fc9f6d
|
refs/heads/master
| 2022-12-07T11:25:44.548843
| 2020-08-18T05:36:44
| 2020-08-18T05:36:44
| 288,364,319
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,685
|
r
|
CairoBasic.R
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Cairo Basic R6 Wrapper
#'
#' @export
#' @import R6
#' @import cairocore
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CairoBasic <- R6::R6Class(
"CairoBasic",
public = list(
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' @field cr cairo context
#' @field surface cairo surface
#' @field width,height dimensions of canvas
#' @field flipy flip the y axis so origin is at bottom left? default: TRUE
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cr = NULL,
surface = NULL,
width = NULL,
height = NULL,
flipy = NULL,
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' @description Initialise
#'
#' @param width,height dimensions of canvas
#' @param bg initial background colour
#' @param flipy flip the y axis such that the origin is at the lower-left.
#' default: TRUE. If FALSE, then origin is at top-left of
#' canvas
#' @param antialias logical. default TRUE. If FALSE, then antialiasing will
#' be turned off and the everything will look a lot jaggier, but
#' will render much faster.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
initialize = function(width, height, bg = 'white', flipy = TRUE, antialias = TRUE) {
self$width <- width
self$height <- height
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create the surface
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self$surface <- cairocore::cairo_image_surface_create(cairo_format_t$CAIRO_FORMAT_ARGB32, width, height)
self$cr <- cairocore::cairo_create (self$surface)
if (!isTRUE(antialias)) {
cairocore::cairo_set_antialias(self$cr, cairocore::cairo_antialias_t$CAIRO_ANTIALIAS_NONE)
}
self$flipy <- isTRUE(flipy)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# flip the y if requested
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (isTRUE(flipy)) {
mat <- cairocore::cairo_matrix_t(1, 0, 0, -1, 0, height)
cairocore::cairo_set_matrix(self$cr, mat)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Fill the background
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fill <- col2rgb(bg, alpha = TRUE)/255
cairocore::cairo_set_source_rgba(self$cr, fill[1], fill[2], fill[3], fill[4])
cairocore::cairo_paint(self$cr)
invisible(self)
},
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Show the surface in the plotting window
#'
#' @param interpolate interpolate pixels for plotting the raster
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
show = function(interpolate = FALSE) {
raster_out <- cairocore::cairo_image_surface_get_raster(self$surface, nchannel = 3)
plot(raster_out, interpolate = interpolate)
invisible(self)
},
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Save surface as PNG
#'
#' @param filename PNG filename
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
write_png = function(filename) {
cairocore::cairo_surface_flush(self$surface)
cairocore::cairo_surface_write_to_png(surface = self$surface, filename = filename)
invisible(self)
},
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Get the surface as a raster object
#'
#' @param nchannel integer value. 1 = grey, 3 = rgb, 4 = rgba. Default: 3
#'
#' @return Raster object
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
as_raster = function(nchannel = 3) {
cairocore::cairo_image_surface_get_raster(self$surface, nchannel = nchannel)
},
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Get the surface as an array object
#'
#' @param nchannel integer value. 1 = grey, 3 = rgb, 4 = rgba. Default: 3
#'
#' @return Array
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
as_array = function(nchannel = 3) {
cairocore::cairo_image_surface_get_array(self$surface, nchannel = nchannel)
},
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Add circles
#'
#' @param x,y centres of the circles
#' @param r radius of circles
#' @param fill,colour fill and stroke colours. set to NA to not stroke or
#' fill. May be an R colour name or a hex colour
#' @param color same as 'colour'
#' @param linewidth line width. default: 1
#'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
add_circles = function(x, y, r, linewidth = 1, fill = 'black', colour = NA, color = colour) {
col <- col2rgb(color, alpha = TRUE)/255
fill <- col2rgb(fill , alpha = TRUE)/255
cairocore::cairo_save(self$cr)
cairocore::cairo_set_line_width(self$cr, linewidth)
cairocore::cairo_arc_vec(
self$cr, x, y, r,
angle1 = 0, angle2 = 2*pi,
fill[1,], fill[2,], fill[3,], fill[4,],
col [1,], col [2,], col[3,], col[4,]
)
cairocore::cairo_restore(self$cr)
invisible(self)
},
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Add line segments
#'
#' @param x1,y1,x2,y2 segment endpoint coordinates
#' @param colour segment colour
#' @param color same as 'colour'
#' @param linewidth line width. default: 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
add_line_segments = function(x1, y1, x2, y2, linewidth = 1, colour = 'black', color = colour) {
col <- col2rgb(color, alpha = TRUE)/255
cairocore::cairo_save(self$cr)
cairocore::cairo_set_line_width(self$cr, linewidth)
cairocore::cairo_segment_vec(
self$cr, x1, y1, x2, y2,
col[1,], col[2,], col[3,], col[4,]
)
cairocore::cairo_restore(self$cr)
invisible(self)
},
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Add polygons
#'
#' @param x,y polygon coordinates
#' @param idx An integer vector used to separate locations in x and y into
#' multiple polygons. Specifies consecutive blocks of locations which
#' make up separate polygons.
#' @param fill,colour fill and stroke colours. set to NA to not stroke or
#' fill. May be an R colour name or a hex colour
#' @param color same as 'colour'
#' @param linewidth line width. default: 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
add_polygons = function(x, y, idx, linewidth = 1, fill = 'black', colour = NA, color = colour) {
col <- col2rgb(color, alpha = TRUE)/255
fill <- col2rgb(fill , alpha = TRUE)/255
cairocore::cairo_save(self$cr)
cairocore::cairo_set_line_width(self$cr, linewidth)
cairocore::cairo_polygon_vec(
self$cr, x, y, idx,
fill[1,], fill[2,], fill[3,], fill[4,],
col [1,], col [2,], col[3,], col[4,]
)
cairocore::cairo_restore(self$cr)
invisible(self)
},
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Add rectangles
#'
#' @param x,y,width,height rectangle positions
#' @param fill,colour fill and stroke colours. set to NA to not stroke or
#' fill. May be an R colour name or a hex colour
#' @param color same as 'colour'
#' @param linewidth line width. default: 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
add_rectangles = function(x, y, width, height, linewidth = 1, fill = 'black', colour = NA, color = colour) {
col <- col2rgb(color, alpha = TRUE)/255
fill <- col2rgb(fill , alpha = TRUE)/255
cairocore::cairo_save(self$cr)
cairocore::cairo_set_line_width(self$cr, linewidth)
cairocore::cairo_rectangle_vec(
self$cr, x, y, width, height,
fill[1,], fill[2,], fill[3,], fill[4,],
col [1,], col [2,], col[3,], col[4,]
)
cairocore::cairo_restore(self$cr)
invisible(self)
},
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Add text
#'
#' @param text text string
#' @param x,y position
#' @param fontsize font size
#' @param angle angle in degrees
#' @param center center the text at the give x,y position. default FALSE
#' @param colour text colour
#' @param color same as 'colour'
#' @param family 'sans' or 'serif'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
add_text = function(text, x, y, fontsize = 20, angle = 0, center = FALSE,
colour = 'black', color = colour,
family = 'sans') {
stopifnot(length(color) == 1)
stopifnot(length(text) == 1)
col <- col2rgb(color, alpha = TRUE)/255
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Select the font. Currently just 'sans'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cairo_select_font_face (
self$cr,
family = 'sans',
slant = cairocore::cairo_font_slant_t$CAIRO_FONT_SLANT_NORMAL,
weight = cairocore::cairo_font_weight_t$CAIRO_FONT_WEIGHT_NORMAL
)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Calculate the width of the text and compensate for it
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cairo_set_font_size (self$cr, fontsize)
if (isTRUE(center)) {
te <- cairocore::cairo_text_extents_t()
cairocore::cairo_text_extents (self$cr, text, te);
te <- as.list(te)
x <- x - te$width / 2 - te$x_bearing
y <- y - te$height / 2 - te$y_bearing
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set up a transformation so that the text is rotated correctly
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cairocore::cairo_save(self$cr)
cairocore::cairo_translate(self$cr, x, y)
cairocore::cairo_rotate(self$cr, angle * pi/180)
if (self$flipy) {
tmat <- cairocore::cairo_matrix_t(fontsize, 0, 0, -fontsize, 0, 0)
cairocore::cairo_set_font_matrix(self$cr, tmat)
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the actual text
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cairo_set_source_rgba(self$cr, col[1], col[2], col[3], col[4])
cairo_move_to(self$cr, 0, 0)
cairo_show_text(self$cr, text)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Restore the transformation state prior to rendering the font
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cairo_restore(self$cr)
invisible(self)
}
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.