blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M โ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 โ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 โ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb73f09b03e0766987e8927884dbfd9acff7b507 | f86f962e1d3d404be5e24afecdb96f929749b0f3 | /winescript.R | 74828eb2abbb45506c05ae3a744b98a36f49328c | [] | no_license | PedroCadilha/Red-Wine | a897f2227196aa29059edaca7de12473766c97fd | 97b661e42d673d3debc4b8af935956428c029439 | refs/heads/main | 2023-02-11T12:09:07.456089 | 2021-01-08T00:47:33 | 2021-01-08T00:47:33 | 327,744,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,116 | r | winescript.R | #Installing the libraries needed to run the code if not already installed
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(ROSE)) install.packages("ROSE", repos = "http://cran.us.r-project.org")
if(!require(DMwR)) install.packages("DMwR", repos = "http://cran.us.r-project.org")
if(!require(randomForest)) install.packages("randomForest", repos = "http://cran.us.r-project.org")
library(tidyverse)
library(caret)
library(ROSE)
library(DMwR)
library(randomForest)
options(digits=3)
# Downloading the data set and reading the file
wine<-"https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
basename(wine)
download.file(wine,basename(wine))
winedata<-read.csv("winequality-red.csv")
# First look at file to check if we need to wrangle the data
head(winedata)
# Data wrangling, removing delimiters and renaming columns
winedata<-read_delim("winequality-red.csv",
delim = ";",
locale = locale(decimal_mark = ".",
grouping_mark = ","),
col_names = TRUE)
cnames <- c("fixed_acidity", "volatile_acidity", "citric_acid",
"residual_sugar", "chlorides", "free_sulfur_dioxide",
"total_sulfur_dioxide", "density", "pH",
"sulphates", "alcohol", "quality")
colnames(winedata)<-cnames
winedata<-as.data.frame(winedata)
head(winedata)
dim(winedata)
#Data exploration
#Wine quality histogram
winedata%>%ggplot(aes(quality))+geom_histogram(bins=20)
#Applying cutoff and factorizing on wine quality
winedata<-winedata%>%mutate(quality=ifelse(quality>=7,"1","0"))%>%mutate(quality=factor(quality))
#New distribution of wine quality
histogram(winedata$quality)
table(winedata$quality)
prop.table(table(winedata$quality))
#Boxplots of features vs. wine quality
winedata%>%gather(wine_properties, values, -quality)%>%
ggplot(aes(quality, values, fill=quality))+geom_boxplot()+
facet_wrap(~wine_properties,scales="free")+
theme(axis.text.x = element_blank())
#modeling
#data partition
set.seed(2020, sample.kind = "Rounding")
index<-createDataPartition(winedata$quality,times=1, p=0.2, list=FALSE)
test<-winedata[index,]
train<-winedata[-index,]
#Distribution of wine quality in the new datasets
table(train$quality)
table(test$quality)
prop.table(table(train$quality))
prop.table(table(test$quality))
#Logistic Regression model
glm_model<-train(quality~., method="glm", data=train)
glm_model_preds<-predict(glm_model,test)
confusionMatrix(data=glm_model_preds,reference=test$quality, positive = "1")
#K-Nearest Neighbours
set.seed(2020,sample.kind = "Rounding")
tuning<-data.frame(k=seq(3,31,2))
knn_model<-train(quality~., method="knn", data=train,tuneGrid = tuning)
plot(knn_model)
knn_model$bestTune
knn_model_preds<-predict(knn_model,test)
confusionMatrix(data=knn_model_preds,reference=test$quality, positive="1")
#Random forest model
set.seed(2020,sample.kind = "Rounding")
tuning<-data.frame(mtry=c(1:5))
rf_model<-train(quality~.,method="rf",
tuneGrid=tuning,
importance=TRUE,
data=train)
rf_model$bestTune
plot(rf_model)
rf_model_preds<-predict(rf_model,test)
confusionMatrix(data=rf_model_preds,reference=test$quality,positive="1")
#Upsampling
set.seed(2020,sample.kind = "Rounding")
train_up<-upSample(x=train[,-ncol(train)],y=train$quality)
table(train_up$Class)
#Downsampling
set.seed(2020,sample.kind = "Rounding")
train_down<-downSample(x=train[,-ncol(train)],y=train$quality)
table(train_down$Class)
#ROSE
library(ROSE)
set.seed(2020,sample.kind = "Rounding")
train_rose<-ROSE(quality ~ ., data=train)$data
table(train_rose$quality)
#SMOTE
library(DMwR)
set.seed(2020,sample.kind = "Rounding")
train_smote<-SMOTE(quality~., data=train)
table(train_smote$quality)
#Using random forest in upsampling
set.seed(2020,sample.kind = "Rounding")
tuning<-data.frame(mtry=c(1,2,3,4,5))
train_rf_up<-train(Class~.,method="rf",
tuneGrid=tuning,
importance=TRUE,
data=train_up)
train_rf_up$bestTune
rf_preds_up<-predict(train_rf_up,test)
confusionMatrix(data=rf_preds_up,reference=test$quality, positive="1")
varImp(train_rf_up)
plot(train_rf_up)
#Using random forest in downsampling
set.seed(2020,sample.kind = "Rounding")
tuning<-data.frame(mtry=c(1,2,3,4,5))
train_rf_down<-train(Class~.,method="rf",
tuneGrid=tuning,
importance=TRUE,
data=train_down)
train_rf_down$bestTune
rf_preds_down<-predict(train_rf_down,test)
confusionMatrix(data=factor(rf_preds_down),
reference=factor(test$quality),
positive="1")
varImp(train_rf_down)
plot(train_rf_down)
#Using random forest in ROSE
set.seed(2020,sample.kind = "Rounding")
tuning<-data.frame(mtry=c(1,2,3,4,5))
train_rf_rose<-train(quality~.,method="rf",
tuneGrid=tuning,
importance=TRUE,
data=train_rose)
train_rf_rose$bestTune
rf_preds_rose<-predict(train_rf_rose,test)
confusionMatrix(data=factor(rf_preds_rose),
reference=factor(test$quality),
positive="1")
varImp(train_rf_rose)
plot(train_rf_rose)
#Using random forest in SMOTE
set.seed(2020,sample.kind = "Rounding")
tuning<-data.frame(mtry=c(1,2,3,4,5))
train_rf_smote<-train(quality~.,method="rf",
tuneGrid=tuning,
importance=TRUE,
data=train_smote)
train_rf_smote$bestTune
rf_preds_smote<-predict(train_rf_smote,test)
confusionMatrix(data=factor(rf_preds_smote),
reference=factor(test$quality),
positive="1")
varImp(train_rf_smote)
plot(train_rf_smote)
# Calculation of the F meas for all our Random Forest models, with beta=0.25
b<-0.25
F_meas_rf<-F_meas(rf_model_preds,test$quality, beta=b)
F_meas_rf_up<-F_meas(rf_preds_up,test$quality, beta=b)
F_meas_rf_down<-F_meas(rf_preds_down,test$quality, beta=b)
F_meas_rf_rose<-F_meas(rf_preds_rose,test$quality, beta=b)
F_meas_rf_smote<-F_meas(rf_preds_smote,test$quality, beta=b)
f_meas_values<-c(F_meas_rf,F_meas_rf_up,F_meas_rf_down,
F_meas_rf_rose,F_meas_rf_smote)
model<-c("Random Forest", "Random Forest Upsampling", "Random Forest Downsampling",
"Random Forest ROSE", "Random Forest SMOTE")
data.frame(model,f_meas_values)%>%knitr::kable()
# Calculation of the F meas for all our Random Forest models, with beta=0.5
b<-0.5
F_meas_rf<-F_meas(rf_model_preds,test$quality, beta=b)
F_meas_rf_up<-F_meas(rf_preds_up,test$quality, beta=b)
F_meas_rf_down<-F_meas(rf_preds_down,test$quality, beta=b)
F_meas_rf_rose<-F_meas(rf_preds_rose,test$quality, beta=b)
F_meas_rf_smote<-F_meas(rf_preds_smote,test$quality, beta=b)
f_meas_values<-c(F_meas_rf,F_meas_rf_up,F_meas_rf_down,
F_meas_rf_rose,F_meas_rf_smote)
model<-c("Random Forest", "Random Forest Upsampling", "Random Forest Downsampling",
"Random Forest ROSE", "Random Forest SMOTE")
data.frame(model,f_meas_values)%>%knitr::kable()
# Calculation of the F meas for all our Random Forest models, with beta=1
b<-1
F_meas_rf<-F_meas(rf_model_preds,test$quality, beta=b)
F_meas_rf_up<-F_meas(rf_preds_up,test$quality, beta=b)
F_meas_rf_down<-F_meas(rf_preds_down,test$quality, beta=b)
F_meas_rf_rose<-F_meas(rf_preds_rose,test$quality, beta=b)
F_meas_rf_smote<-F_meas(rf_preds_smote,test$quality, beta=b)
f_meas_values<-c(F_meas_rf,F_meas_rf_up,F_meas_rf_down,
F_meas_rf_rose,F_meas_rf_smote)
model<-c("Random Forest", "Random Forest Upsampling", "Random Forest Downsampling",
"Random Forest ROSE", "Random Forest SMOTE")
data.frame(model,f_meas_values)%>%knitr::kable()
|
286e7bf71ab257e37c6da10d87d2c342c2bde710 | 1e18cb41f905669f96a2be3c47144a167ad2aed1 | /mummerplot.R | 9916c16726ca468081fb5c7e38136ba0ed165b37 | [] | no_license | georgek/potato-figures | dcca54436acad22ea0ec9fbe2ef039948280b201 | 54cd16794f95de89c68b66b4d3ec9ac8244ef0cc | refs/heads/master | 2021-03-12T20:23:47.277964 | 2017-05-16T12:49:51 | 2017-05-16T12:49:51 | 91,456,931 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,442 | r | mummerplot.R | #!/usr/bin/env Rscript
library("ggplot2")
args <- commandArgs(TRUE)
if (length(args) < 3) {
print("usage: prog output coords xticks yticks")
quit()
}
output <- args[1]
coords <- args[2]
xticks <- args[3]
yticks <- args[4]
t <- read.table(coords,
col.names=c("x1", "y1", "x2", "y2", "pid", "dir"))
t$dir <- factor(t$dir, levels=c("+","-"))
xticks <- read.table(xticks,
sep=",",
col.names=c("labels", "breaks"))
yticks <- read.table(yticks,
sep=",",
col.names=c("labels", "breaks"))
p <- ggplot(t, aes(x=x1,xend=x2,y=y1,yend=y2,alpha=pid,colour=dir)) +
geom_segment() +
geom_point() +
geom_point(aes(x2,y2)) +
scale_x_continuous(name="Reference",
breaks=xticks$breaks,
labels=xticks$labels,
minor_breaks=NULL) +
scale_y_continuous(name="Assembly",
breaks=yticks$breaks,
labels=yticks$labels,
minor_breaks=NULL) +
scale_colour_discrete(guide=FALSE) +
scale_alpha_continuous(guide=FALSE) +
theme(axis.text.y=element_text(size=6,angle=45),
axis.text.x=element_text(size=6,angle=45,hjust=1))
pdf(file=sprintf("%s-gg.pdf",output), width=7, height=6.5)
print(p)
dev.off()
png(file=sprintf("%s-gg.png",output), width=800, height=800)
print(p)
dev.off()
|
edd2c0c1c10df31a5d300ca20a237d79488581ae | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/DstarM/R/getTer.R | f3811d6ef62b5acebc3665d5243d65cf41471af6 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,396 | r | getTer.R | #' Calculate Mean of the nondecision distribution.
#'
#' @param res An object of class D*M.
#' @param data The data object used to create \code{res}.
#' @param formula Optional formula argument, for when columns names in the data are different from those used to obtain the results.
#'
#' @return A vector containing estimates for the mean of the nondecision densities.
#' @details The object \code{res} can either be output from \code{estDstarM} or output from \code{estND}.
#' If the former is supplied it is also necessary to supply the data used for the estimation.
#' The mean will then be estimated by subtracting the mean of the model densities from the mean of the data density.
#' If the latter is supplied than this is not required; the mean will be calculated by
#' integrating the nondecision distribution.
# calculate Ter according to splits used in analyses; returns scalar |
# groups
#' @export
getTer <- function(res, data, formula = NULL) {
if (!(is.DstarM.fitD(res) || is.DstarM.fitND(res)))
stop("res should be output from either estDstarM or estND.")
# nondecision output is easy
if (is.DstarM.fitND(res)) {
return(apply(res$r.hat, 2, nth.momentS, x = res$tt))
}
if (dim(data)[1L] != res$n) {
warning(sprintf("Number of observations used in analysis (%g) does not match number of observations in data provided (%g).",
res$n, dim(data)[1L]), call. = FALSE, immediate. = TRUE)
}
if (is.null(res$splits) & !is.null(res$split))
res$splits <- res$split # backward compatibility
data <- getData(res[["formula"]], data)
rtime <- data[["rtime"]]
response <- data[["response"]]
condition <- data[["condition"]]
hasConditions <- data[["hasConditions"]]
data <- data[["data"]]
ncondition <- res$ncondition
splits <- res$splits
m <- res$modelDist
group <- groups(ncondition, splits)
mm2 <- matrix(0, 2 * ncondition, dim(group)[2L])
for (i in 1:dim(group)[2L]) {
mm2[group[, i], i] <- 1
}
m <- m %*% mm2
m <- m %*% (diag(dim(m)[2L])/(colSums(mm2)/2))
uniq <- unique(data[[condition]])
group <- groups(ncondition, splits, TRUE)
for (i in 1:length(group)) {
group[i] <- uniq[i]
}
muDat <- rep.int(0, dim(group)[2L])
for (i in dim(group)[2L]) {
muDat[i] <- mean(data[[rtime]][data[[condition]] %in% group[, i]])
}
muMod <- apply(m, 2, nth.momentS, x = res$tt)
return(muDat - muMod)
}
|
52e0c88957668c5f3a2e57d01f97dbc286bd7d48 | 011bcf96297cdee5d2f259b75c7dd0141853a68e | /analysis/Funn.R | 851ea0d8ca1bbb765e155e1d85742d5d0a26f874 | [] | no_license | resendislab/scPipeline | db3fadb4e4b609fb020a238985e570cab4fada21 | 6bf72619e043354be57fbd486417d1de3045a46a | refs/heads/master | 2022-12-26T13:01:07.954641 | 2020-10-09T06:00:53 | 2020-10-09T06:00:53 | 239,567,962 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,524 | r | Funn.R |
linePlot <- function(...){ ### x, y, tit, xlab, ylab, ng
x <- list(...)
png(file=paste('./results/plots/',x[3],'.png',sep = ""))
par(cex.axis=1.8)
print(x[2])
plot(x[1],x[2])
plot(x[1],x[2], xlab=x[4],ylab=x[5],
cex.lab=1.8,ylim=c(0,100), lty=1, lwd=1)
if (length(x)==6){
abline(v=x[6],lty=2,lwd=2 ,col="red")
abline(h=x[2][x[6]],lwd=2,lty=2,col="red")
}
dev.off()
}
k.groups <- function(sse,tit,i){
sse1 <- sse
png(file=paste('./results/plots/SSE_',tit,'.png',sep = " "))
mar.default <- c(5,4,4,2)
par(cex.axis=1.8,mfrow=c(1, 1),oma=c(0, 0, 0, 0),
mar = mar.default + c(0, 1, 0, 0))
matplot(2:18, sse1, type="o",lty=1,pch = 19, lwd = 2.0,
xlab = "Groups number (k)",
ylab = "SSE",cex.lab=1.8)
legend("topright",
as.character(i),
pch = 21,
pt.bg = c(1:length(i)),
bty = "n",
pt.cex = 1.5,
cex = 2
)
dev.off()
##### Slope of SSE
m <-diff(sse)
dm <- diff(m)
maxi<-unlist(lapply(1:length(i),FUN=function(x) order(dm[,x],decreasing = TRUE)[1:3]))
k <- c(3:18)
kmx <- k[maxi]
kmx <- matrix(kmx, nrow=3,ncol=length(i))
return(kmx[1,])
}
MyplotScatter <- function(i, Y, G1.col,G1.gr,G2.col,G2.gr,tit){
png(file=paste('./results/plots/Clusters_',tit,
'_',toString(i),'.png',sep = "")
,width = 1200, height = 700,
units = "px", pointsize = 24)
par(mfrow=c(1, 2), mar=c(4.1, 2.1, 1.5, 0.5),
oma=c(0, 0, 0, 0))
plot(Y,main=paste('Var_Space_',toString(i),sep = "")
,pch=c(21,24),bg = G1.col, col=1)
legend("bottomright",
c(LETTERS[1:G1.gr],'D6','D19'),
pch = c(rep(22,G1.gr),19,17),
pt.bg = c(1:G1.gr,1,1),
bty = "n",
pt.cex = 1,
cex = 1
)
plot(Y,main=paste('uMAP_Space_',toString(i),sep = ""),pch=c(21,24),
bg = G2.col, col=1)
legend("bottomright",
c(LETTERS[1:G2.gr],'D6','D19'),
pch = c(rep(22,G2.gr),19,17),
pt.bg = c(1:G2.gr,1,1),
bty = "n",
pt.cex = 1,
cex = 1
)
dev.off()
}
Proportion <- function(clust,n,col){
# col <- colnames(col.cols)
# nm <- as.data.frame(clust)
# col <- rownames(nm)
tot=c(134,230)
nam=c("D6_","D19_")
prop <- vector(length = n*2)
prop1 <- vector(length = n*2)
for(i in 0:(n*2-1)){
id <- i%/%2+1
id1 <- i%/%(2*id-1)+1
b=which(clust %in% id)
prop[i+1]<-length(grep(nam[id1],col[b],value=TRUE))/tot[id1]*100
if (id1 == 2){
prop1[i:(i+1)]=100*prop[i:(i+1)]/sum(prop[i:(i+1)])
}
}
prop <-array(prop,dim=c(2,n))
prop1 <-array(prop1,dim=c(2,n))
prop <- as.table(prop)
prop1 <- as.table(prop1)
rownames(prop) <- c('D6','D19')
# colnames(prop) <- c(toString(1:n))
rownames(prop1) <- c('D6','D19')
# colnames(prop1) <- c(toString(1:n))
return(list(prop,prop1))
}
MyplotBar <- function(prop,tit,i){
png(file=paste('./results/plots/Proportions_',tit,'_',toString(i),'.png',
sep = " "),width = 1200, height = 700,
units = "px", pointsize = 24)
par(mfrow=c(1, 2), mar=c(4.1, 2.1, 1.5, 0.5),
oma=c(0, 2, 0, 0))
barplot(prop[[1]], ylim = c(0,100), col=c("darkblue","red"),
legend = rownames(prop[[1]]),main = 'Sample percentage')
barplot(prop[[2]], ylim = c(0,100),col=c("darkblue","red"),
legend = rownames(prop[[2]]),main = "Groups percentage")
dev.off()
} |
5e40c2945e62ba1b3a89b0be3619c030dbbf8d9c | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googledataflowv1b3.auto/man/StreamingConfigTask.userStepToStateFamilyNameMap.Rd | aaaf49fb09e59898989f44adbcf11954fe437f46 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 680 | rd | StreamingConfigTask.userStepToStateFamilyNameMap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataflow_objects.R
\name{StreamingConfigTask.userStepToStateFamilyNameMap}
\alias{StreamingConfigTask.userStepToStateFamilyNameMap}
\title{StreamingConfigTask.userStepToStateFamilyNameMap Object}
\usage{
StreamingConfigTask.userStepToStateFamilyNameMap()
}
\value{
StreamingConfigTask.userStepToStateFamilyNameMap object
}
\description{
StreamingConfigTask.userStepToStateFamilyNameMap Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Map from user step names to state families.
}
\seealso{
Other StreamingConfigTask functions: \code{\link{StreamingConfigTask}}
}
|
0e59a977f86a7e3316f350f1e49bb0d17494047c | a11b09d66d079c143ff6085dd51cee36788e072c | /R/loadICOproduction.R | b174c617a68fd74d52cb92c0635e2f60fbe331b3 | [] | no_license | tomcopple/coffeestats | c9e38d113049b208e562b75d692f98ace62c6a0b | 1c7ff262448391f77eaf000ae76de8604a7ff765 | refs/heads/master | 2020-06-23T05:27:42.204104 | 2018-03-03T14:08:23 | 2018-03-03T14:08:23 | 70,635,158 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 903 | r | loadICOproduction.R | #' Loads ICO production/consumption
#'
#' Just loads data from a local file, which should be in the coffeestats data folder, and have the form "2017-01-01-icoFlow.csv".
#'
#'
#' @return Dataframe called icoFlow to the global environment
#'
loadICOproduction <- function(env = .GlobalEnv) {
library(tidyverse)
coffeestats::setDataDir()
# Assume there's a file called YYYY-MM-DD-icoflow.csv; get the most recent
tryCatch({
fileName <- dplyr::last(
list.files(path = coffeestats, pattern = "icoFlow.csv")
)
icoFlow <- suppressWarnings(readr::read_csv(
file = file.path(coffeestats, fileName), col_types = readr::cols())
)
}, error = function(e) {
stop("Error loading the csv file, check it exists in the coffeestats directory. It should be called 'YYYY-MM-DD-icoFlow.csv'")
}
)
env$icoFlow <- icoFlow
}
|
775e1d6240e9b68039533d6290536d5bf14eccff | 6ba493ca9129518a3a9d52826beb6d3404b140da | /R/CAAPlanetPerihelionAphelion_MercuryAphelion.R | 6a3b5400d1affcb471857dff0ca14da925466b9f | [] | no_license | helixcn/skycalc | a298e7e87a46a19ba2ef6826d611bd9db18e8ee2 | 2d338b461e44f872ceee13525ba19e17926b2a82 | refs/heads/master | 2021-06-16T08:54:46.457982 | 2021-03-25T02:15:38 | 2021-03-25T02:15:38 | 35,885,876 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 122 | r | CAAPlanetPerihelionAphelion_MercuryAphelion.R | CAAPlanetPerihelionAphelion_MercuryAphelion <-
function(k){
.Call("CAAPlanetPerihelionAphelion_MercuryAphelion", k)
}
|
6239aa9515c6ae8a62c9b1080c8e9514c8e0f0d4 | f3edf454f134bcefb824654e1d2b0efca1ef82c8 | /R/fl_function.R | 79fe93106a80a4c975359b56d2dda9c5d007e42a | [] | no_license | jonathanbart/baxtr | 295eb84bb00dd4af7b1e1d491edfbe652051a2ca | 40699b25bd61431a0e1626f7a6505e660f87a297 | refs/heads/master | 2020-03-13T17:34:59.160558 | 2018-04-29T07:03:33 | 2018-04-29T07:03:33 | 131,216,904 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,572 | r | fl_function.R | #' Calculate the Flourishing Scale
#'
#' This function renames columns; converts all columns to numerics; tests if the scores are outside of the scale limits;
#' and calculates an overall flourishing score.
#' @param start_col The column number where the scale begins, reference by number only.
#' @param end_col The column number where the scale end, reference by number only.
#' @param data The reference dataframe.
#' @keywords
#' Wellbeing, Flourishing, The Flourishing Scale
#' @note
#' This function is designed to work with the validated question order as printed in the reference article. This function will
#' give inaccurate results if question order is different from the published validated scale.
#' @references
#' Diener, E., Wirtz, D., Tov, W., Kim-Prieto, C., Choi, D. W., Oishi, S., & Biswas-Diener, R. (2010). New well-being measures: Short scales to assess flourishing and positive and negative feelings. Social Indicators Research, 97(2), 143-156.
#' @examples
#' x <- c(1:7)
#' df <- data.frame(matrix(sample(x, 10*10, replace = TRUE), nrow = 5, ncol = 10))
#' fl(1, 10, df)
#' @export
fl <- function (start_col, end_col, data) {
n <- c("fl1", "fl2", "fl3", "fl4", "fl5", "fl6", "fl7", "fl8", "fl9", "fl10")
names(data)[start_col:end_col] = n
data[,n] <- apply(data[,n], 2, to_numeric)
if(any(data[,n] < 1) | any(data[,n] > 7)) {
cat(crayon::red("Function error: scale limits exceeded"))
return()
}
data$fl <- (data$fl1 + data$fl2 + data$fl3 + data$fl4 + data$fl5 + data$fl6 + data$fl7 + data$fl8 + data$fl9 + data$fl10)
data
}
|
18853f85edf82995329cb4fbc1f8e1aa0f23ec68 | c0bf3af652d76aa3463b2787076ddde64fe370fe | /R/reg_plots.R | 28e1688d6dbf082b89cb1d46d2f1630d82a2178d | [] | no_license | atredennick/modselr | 6658ca26be9e97a71261ae6fddc68e0b7eac2845 | c34d8bd154d5c967e47a7d5fd5b8a280996dd20b | refs/heads/master | 2021-06-24T03:37:04.420208 | 2020-12-08T13:08:42 | 2020-12-08T13:08:42 | 130,245,752 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,308 | r | reg_plots.R | #' Generate coefficient path plot.
#'
#' @param coef_df A data frame with 3 columns: \code{covariate}, \code{lambda},
#' \code{value}.
#' @param best_lambda A scalar for the lambda value at which the cv score is
#' lowest. For plotting a vertical line for the selected coefficient values.
#' @param style A character scalar. Possible values are \code{c("base", "clean")}.
#' \code{"base"} returns a ggplot object using the base (gray) theme. \code{"clean"}
#' returns a ggplot object using \code{theme_few}. Default is \code{"base"}.
#' @return A ggplot object.
make_coef_plot <- function(coef_df, best_lambda, style = "base") {
if(style == "base") ggplot2::theme_set(theme_gray())
if(style == "clean") ggplot2::theme_set(theme_few())
ggplot2::ggplot(coef_df, aes(x = lambda, y = value, color = covariate))+
ggplot2::geom_vline(aes(xintercept = best_lambda),
color = "grey65",
linetype = "dashed")+
ggplot2::geom_line()+
ggplot2::xlab(expression(log(lambda)))+
ggplot2::ylab("Coefficient value")+
ggplot2::theme(legend.position = "bottom")
}
#' Generate cross-validation score path plot.
#'
#' @param cvscore_df A data frame with 2 columns: \code{cvscore}, \code{lambda}.
#' @param score_name A string identifying the type of score used, e.g.,
#' \code{"MSE"} for mean square error.
#' @param best_lambda A scalar for the lambda value at which the cv score is
#' lowest. For plotting a vertical line for the selected coefficient values.
#' @param style A character scalar. Possible values are \code{c("base", "clean")}.
#' \code{"base"} returns a ggplot object using the base (gray) theme. \code{"clean"}
#' returns a ggplot object using \code{theme_few}. Default is \code{"base"}.
#' @return A ggplot object.
make_cvscore_plot <- function(cvscore_df, score_name, best_lambda, style = "base") {
if(style == "base") ggplot2::theme_set(theme_gray())
if(style == "clean") ggplot2::theme_set(theme_few())
ggplot2::ggplot(mse_df, aes(x = lambda, y = score))+
ggplot2::geom_vline(aes(xintercept = best_lambda),
color = "grey65",
linetype = "dashed")+
ggplot2::geom_line()+
ggplot2::xlab(expression(log(lambda)))+
ggplot2::ylab(paste("Cross-validation", score_name))
}
|
01a84a4372897a6fbab1cb845ee0153beceb4d96 | dd1640ee2a8244d6b4626b045b041cbbc9877eb8 | /R/summary.VegX.R | 6d913fa335022e35fb1827e21d17c9f81e6499ed | [] | no_license | Heterocephalus/VegX | 959b8eb846ec4a6d9cfff758bfe17673eac5d86a | 9a1302031de5933eb09bb15eea56f0417db8c7b6 | refs/heads/master | 2022-05-01T16:38:23.488190 | 2022-04-05T18:42:51 | 2022-04-05T18:42:51 | 118,386,196 | 0 | 0 | null | 2018-09-19T22:12:04 | 2018-01-22T00:22:12 | R | UTF-8 | R | false | false | 3,564 | r | summary.VegX.R | #' @describeIn VegX
#'
#' Generates a summary of the Veg-X object (i.e., the number of elements of each type).
#' @param object Veg-X object to be summarized
#' @param ... Additional parameters (for compatibility)
#'
setMethod("summary", signature=c("VegX"), definition = function(object, ...) {
cat(paste0("================================================================\n"))
cat(paste0("\ Veg-X object (ver 2.0.0) \n"))
cat(paste0("----------------------------------------------------------------\n"))
cat(paste0("\n"))
cat(paste0(" Projects: ", length(object@projects),"\n"))
if(length(object@projects)>0) {
for(i in 1:length(object@projects)){
cat(paste0(" ",i,". ", object@projects[[i]]$title,"\n"))
}
}
cat(paste0("\n"))
nplots = length(object@plots)
nsubplots = .getNumberOfSubPlots(object)
cat(paste0(" Plots: ", nplots," [Parent plots: ", nplots - nsubplots," Sub-plots: ", nsubplots,"]\n"))
cat(paste0("\n"))
cat(paste0(" Individual organisms: ", length(object@individualOrganisms),"\n"))
cat(paste0("\n"))
cat(paste0(" Organism names: ", length(object@organismNames),"\n"))
cat(paste0("\n"))
cat(paste0(" Taxon concepts: ", length(object@taxonConcepts),"\n"))
cat(paste0("\n"))
cat(paste0(" Organism Identities: ", length(object@organismIdentities),"\n"))
cat(paste0("\n"))
cat(paste0(" Vegetation strata: ", length(object@strata),"\n"))
if(length(object@strata)>0) {
for(i in 1:length(object@strata)){
cat(paste0(" ",i,". ", object@strata[[i]]$stratumName," [",object@strata[[i]]$order,"/",object@methods[[object@strata[[i]]$methodID]]$name,"]\n"))
}
}
cat(paste0("\n"))
cat(paste0(" Surface types: ", length(object@surfaceTypes),"\n"))
if(length(object@surfaceTypes)>0) {
for(i in 1:length(object@surfaceTypes)){
cat(paste0(" ",i,". ", object@surfaceTypes[[i]]$surfaceName,"\n"))
}
}
cat(paste0("\n"))
cat(paste0(" Parties: ", length(object@parties),"\n"))
cat(paste0("\n"))
cat(paste0(" Literature citations: ", length(object@literatureCitations),"\n"))
cat(paste0("\n"))
cat(paste0(" Methods: ", length(object@methods),"\n"))
if(length(object@methods)>0) {
for(i in 1:length(object@methods)){
attIDs = .getAttributeIDsByMethodID(object, names(object@methods)[i])
cat(paste0(" ",i,". ", object@methods[[i]]$name," [",object@methods[[i]]$subject," / ",length(attIDs), " ", object@methods[[i]]$attributeType," atts.]\n"))
}
}
cat(paste0("\n"))
npobs = length(object@plotObservations)
nsubpobs = .getNumberOfPlotObservationsInSubPlots(object)
cat(paste0(" Plot observations: ", npobs," [in parent plots: ", npobs - nsubpobs," in sub-plots: ", nsubpobs,"]\n"))
cat(paste0("\n"))
cat(paste0(" Individual organism observations: ", length(object@individualObservations),"\n"))
cat(paste0("\n"))
cat(paste0(" Aggregated organism observations: ", length(object@aggregateObservations),"\n"))
cat(paste0("\n"))
cat(paste0(" Stratum observations: ", length(object@stratumObservations),"\n"))
cat(paste0("\n"))
cat(paste0(" Community observations: ", length(object@communityObservations),"\n"))
cat(paste0("\n"))
cat(paste0(" Site observations: ", length(object@siteObservations),"\n"))
cat(paste0("\n"))
cat(paste0(" Surface cover observations: ", length(object@surfaceCoverObservations),"\n"))
cat(paste0("\n"))
cat(paste0("================================================================\n"))
})
|
022dc0e31a08294cd58436824511594f2d6aee3f | 1ca8f38d411de1f22ca8640d37ab7071c7f34b07 | /man/makeCanonicalCorrelationAnalysis.Rd | f847c72d1313611d02cc19bad21fd2b7c6e8b29a | [] | no_license | koralgooll/OmicsON | ba2a8164429a41e9edf33fc998d6c41bdfb0b61b | 092e39cbcb306e909c54d022e645b12a9e3acd70 | refs/heads/master | 2021-08-17T03:09:52.084848 | 2020-05-04T12:53:00 | 2020-05-04T12:53:00 | 131,963,461 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,074 | rd | makeCanonicalCorrelationAnalysis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MapReactome.R
\name{makeCanonicalCorrelationAnalysis}
\alias{makeCanonicalCorrelationAnalysis}
\title{Calculate CCA on data returned from decoration methods.}
\usage{
makeCanonicalCorrelationAnalysis(xNamesVector, yNamesVector, XDataFrame,
YDataFrame, xCutoff = 1, yCutoff = 1, scalingFactor = 1)
}
\arguments{
\item{xNamesVector}{A vector of names from functional interactions data frame. Mostly genes symbols.}
\item{yNamesVector}{A vector of names from functional interactions data frame. Mostly root column.}
\item{XDataFrame}{A data frame with data to CCA analysis.
This argument is strongly connected with xNamesVector arg.}
\item{YDataFrame}{A data frame with data to CCA analysis.
This argument is strongly connected with yNamesVector arg.}
\item{xCutoff}{You can use it to remove highly correlated variables on XDataFrame.}
\item{yCutoff}{You can use it to remove highly correlated variables on YDataFrame.}
\item{scalingFactor}{scaling factor for input data.}
}
\value{
list with CCA analysis data.
}
\description{
Function is used to easily perform CCA analysis on functional interactions
data frames, those data frames are returned from OmicsON::createFunctionalInteractionsDataFrame
which works directly on data returned from functions responsible for decoration.
To close workflow, you can then use
dedicated plotting function on returned data, which is OmicsON::plotCanonicalCorrelationAnalysisResults.
}
\examples{
ontology2GenesSymboleFromEnsembleFunctionalInteractions <- OmicsON::createFunctionalInteractionsDataFrame(
decoratedByReactome,
singleIdColumnName = 'ontologyId',
idsListColumnName = 'genesSymbolsFromEnsemble')
ccaResultsEnsemble <- OmicsON::makeCanonicalCorrelationAnalysis(
xNamesVector = ontology2GenesSymboleFromEnsembleFunctionalInteractions$genesSymbolsFromEnsemble,
yNamesVector = ontology2GenesSymboleFromEnsembleFunctionalInteractions$root,
XDataFrame = transcriptomicsInputData,
YDataFrame = lipidomicsInputData, xCutoff = 0.5, yCutoff = 0.75)
}
|
ac9360e8cf1fbc9a7cf7f2218c07b529cdfe0eb3 | eb6ce5fd7603a6621b014b798c4d659307e29c5c | /R_SHINY_PLSQL_APPS/COVID/server.R | 959ed84d744d7fab8ef5bf1373536e929d47ac21 | [] | no_license | wojteksilesia/SCRIPTS | eda64b4bfd968af2fda47a20c52b652f68f064be | ce0e47a1d8e6aa2586e268dda1ae9ce69b58a2b8 | refs/heads/master | 2021-08-07T15:32:02.099997 | 2021-01-03T19:52:24 | 2021-01-03T19:52:24 | 236,285,624 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,930 | r | server.R | library(shiny)
library(shinyjs)
library(httr)
library(ggplot2)
library(dplyr)
shinyServer(
function(input,output,session){
#########################
##### LOAD DATA
#create the URL where the dataset is stored with automatic updates every day
#url <- paste("https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-",format(Sys.time(), "%Y-%m-%d"), ".xlsx", sep = "")
#download the dataset from the website to a local temporary file
#GET(url, authenticate(":", ":", type="ntlm"), write_disk(tf <- tempfile(fileext = ".xlsx")))
#read the Dataset sheet into โRโ
#data_covid <- read_excel(tf)
GET("https://opendata.ecdc.europa.eu/covid19/casedistribution/csv", authenticate(":", ":", type="ntlm"), write_disk(tf <- tempfile(fileext = ".csv")))
#read the Dataset sheet into R. The dataset will be called "data".
data_covid <- read.csv(tf)
data_covid$dateRep <- as.POSIXct(data_covid$dateRep,format="%d/%m/%Y")
data_covid$country=factor(data_covid$countriesAndTerritories)
df_data_sorted <- data_covid %>% arrange(dateRep)
##################################################################
##### COUNTRY DATASET PREPARATION ##########################
show_country_data <- function(input_df,input_country,input_day_0_cases){
input_df<-input_df%>%arrange(dateRep)
country_df <- input_df %>% filter(countriesAndTerritories==input_country)
## All COVID cases
all_cases<-c()
all_cases[1]<-country_df$cases[1]
for(i in 2:nrow(country_df)){
all_cases[i]<-all_cases[i-1]+country_df$cases[i]
}
country_df$all_cases<-all_cases
## All deaths
all_deaths<-c()
all_deaths[1]<-country_df$deaths[1]
for(i in 2:nrow(country_df)){
all_deaths[i]<-all_deaths[i-1]+country_df$deaths[i]
}
country_df$all_deaths<-all_deaths
## Cases dynamics
cases_dynamic<-c()
cases_dynamic[1]<-0
for(i in 2:nrow(country_df)){
cases_dynamic[i]<-country_df$cases[i]/country_df$cases[i-1]-1
}
country_df$cases_dynamic<-cases_dynamic
## Day 0
for(i in 1:nrow(country_df)){
if(country_df$all_cases[i]<input_day_0_cases){
next
}else{
day_0<-country_df$dateRep[i]
break
}
}
## Subdataset based on day_0
country_df<-country_df%>%filter(dateRep>=day_0)
## Day number
country_df$day_number<-1:nrow(country_df)
return(country_df)
}
######################################################################
### PLOT PREPARATION
print_plot<-function(in_df,x_param,y_param){
if(x_param=="DATA"){
if(y_param=="ZACHOROWANIA"){
p<-ggplot(data=in_df,aes(x=dateRep,y=cases,color=country))
}else if(y_param=="SUMA ZACHOROWAล"){
p<-ggplot(data=in_df,aes(x=dateRep,y=all_cases,color=country))
}else if(y_param=="DYNAMIKA ZACHOROWAล"){
p<-ggplot(data=in_df,aes(x=dateRep,y=cases_dynamic,color=country))
}else if(y_param=="ZGONY"){
p<-ggplot(data=in_df,aes(x=dateRep,y=deaths,color=country))
}else if(y_param=="SUMA ZGONรW"){
p<-ggplot(data=in_df,aes(x=dateRep,y=all_deaths,color=country))
}
}else if(x_param=="DZIEล 0"){
if(y_param=="ZACHOROWANIA"){
p<-ggplot(data=in_df,aes(x=day_number,y=cases,color=country))
}else if(y_param=="SUMA ZACHOROWAล"){
p<-ggplot(data=in_df,aes(x=day_number,y=all_cases,color=country))
}else if(y_param=="DYNAMIKA ZACHOROWAล"){
p<-ggplot(data=in_df,aes(x=day_number,y=cases_dynamic,color=country))
}else if(y_param=="ZGONY"){
p<-ggplot(data=in_df,aes(x=day_number,y=deaths,color=country))
}else if(y_param=="SUMA ZGONรW"){
p<-ggplot(data=in_df,aes(x=day_number,y=all_deaths,color=country))
}
}
pl <- p + geom_line(size=1.2)+ggtitle(paste("COVID-19 - ",y_param))+
theme(plot.title = element_text(hjust=0.5,size=30,colour="blue",margin=margin(b=14)),
axis.title.y = element_text(size=16,margin = margin(r=25)),
axis.title.x=element_text(size=16,margin=margin(t=14)),
legend.title=element_text(size=18,,hjust=0.5),
legend.text = element_text(size=15),
axis.text.x = element_text(size=13),
axis.text.y = element_text(size=13))+
ylab(y_param) + xlab(ifelse(x_param=="DATA","DATA","DZIEล"))+labs(col="KRAJ")
if(x_param=="DZIEล 0"){
pl<-pl+scale_x_continuous(breaks=seq(0,max(in_df$day_number)+5,by=5))
}
return(pl)
}
#####################################################################
### Countries list
output$out_country_1<-renderUI({
selectInput(inputId="in_country_list_A",label="KRAJ A",
choices = sort(unique(df_data_sorted$countriesAndTerritories)))
})
output$out_country_2<-renderUI({
selectInput(inputId="in_country_list_B",label="KRAJ B",
choices = sort(unique(df_data_sorted$countriesAndTerritories)))
})
output$out_country_3<-renderUI({
selectInput(inputId="in_country_list_C",label="KRAJ C",
choices = sort(unique(df_data_sorted$countriesAndTerritories)))
})
### Disable countries list
observeEvent(input$id_number,{
if(as.numeric(input$id_number==1)){
disable("in_country_list_B")
disable("in_country_list_C")
}else if(as.numeric(input$id_number==2)){
disable("in_country_list_C")
enable("in_country_list_B")
}else if(as.numeric(input$id_number==3)){
enable("in_country_list_B")
enable("in_country_list_C")
}
})
### Disable DAY 0 panel if "DATE" choosen for X axis
observeEvent(input$id_x_axis,{
if(input$id_x_axis=="DATA"){
updateTextInput(session=session,inputId="in_day_0",value=0)
disable("in_day_0")
}else{
enable("in_day_0")
}
})
### Action after submitting parameters
observeEvent(input$in_button,{
#### Preparing dataset
## Empty df structure
submited_countries_data<-df_data_sorted %>% filter(cases==-1)
choosen_countries<-c(input$in_country_list_A,input$in_country_list_B,input$in_country_list_C)
for(i in 1:as.numeric(input$id_number)){
one_country_df <-show_country_data(input_df = df_data_sorted,
input_country = choosen_countries[i],
input_day_0_cases =as.numeric(input$in_day_0))
submited_countries_data<-rbind(submited_countries_data,one_country_df)
#### Preparing the plot
output$out_plot<-renderPlot(print_plot(in_df=submited_countries_data,
x_param=input$id_x_axis,
y_param=input$id_y_axis))
}
})
# observeEvent(input$in_button,{
# if(as.numeric(input$id_number)==1){
# country_a_data<-show_country_data(input_df=df_data_sorted,
# input_country=input$in_country_list_A,
# input_day_0_cases=as.numeric(input$in_day_0))
#
# if(input$id_x_axis=="DATA"){
# if(input$id_y_axis=="ZACHOROWANIA"){
# output$out_plot<-renderPlot(ggplot(data=country_a_data,aes(x=dateRep,
# y=cases))+geom_line())
# }else if(input$id_y_axis=="SUMA ZACHOROWAล"){
# output$out_plot<-renderPlot(ggplot(data=country_a_data,aes(x=dateRep,
# y=all_cases))+geom_line())
# }else if(input$id_y_axis=="DYNAMIKA ZACHOROWAล"){
# output$out_plot<-renderPlot(ggplot(data=country_a_data,aes(x=dateRep,
# y=cases_dynamic))+geom_line())
# }else if(input$id_y_axis=="ZGONY"){
# output$out_plot<-renderPlot(ggplot(data=country_a_data,aes(x=dateRep,
# y=deaths))+geom_line())
# }else if(input$id_y_axis=="SUMA ZGONรW"){
# output$out_plot<-renderPlot(ggplot(data=country_a_data,aes(x=dateRep,
# y=all_deaths))+geom_line())
# }
# }
# }
#
# })
}
)
#y=country_a_data[,y_parameter])) |
a3092ac53001880066fa40c1dbb722bb8f55f764 | 5f060fccbaa784350f417d769349189c63fdfbad | /man/afex_options.Rd | b5038ef9148b28fd7f6ddc556600caee597b4936 | [] | no_license | singmann/afex | 6da8bd47b15ebf65dc09f8c252fd3e9c2d99dd1f | b6d844d20a0b2807b5363ee35f4c0c2ac18d63b9 | refs/heads/master | 2023-05-24T19:46:00.855292 | 2023-05-23T08:23:51 | 2023-05-23T08:23:51 | 38,268,278 | 114 | 43 | null | 2023-05-17T19:37:47 | 2015-06-29T20:00:57 | R | UTF-8 | R | false | true | 3,666 | rd | afex_options.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{afex_options}
\alias{afex_options}
\title{Set/get global afex options}
\usage{
afex_options(...)
}
\arguments{
\item{...}{One of four: (1) nothing, then returns all options as a list; (2)
a name of an option element, then returns its' value; (3) a name-value pair
which sets the corresponding option to the new value (and returns nothing),
(4) a list with option-value pairs which sets all the corresponding
arguments. The example show all possible cases.}
}
\value{
depends on input, see above.
}
\description{
Global afex options are used, for example, by \code{\link{aov_car}} (et al.)
and \code{\link{mixed}}. But can be changed in each functions directly using
an argument (which has precedence over the global options).
}
\details{
The following arguments are currently set:
\itemize{
\item \code{check_contrasts} should contrasts be checked and changed to
sum-to-zero contrasts? Default is \code{TRUE}.
\item \code{type} type of sums-of-squares to be used for testing effects,
default is 3 which reports Type 3 tests.
\item \code{method_mixed}: Method used to obtain p-values in
\code{\link{mixed}}, default is \code{"KR"} (which will change to
\code{"LRT"} soon). (\code{mixed()} only)
\item \code{es_aov}: Effect size reported for ANOVAs (see
\code{\link{aov_car}}), default is \code{"ges"} (generalized eta-squared).
\item \code{correction_aov}: Correction used for within-subjects factors with
more than two levels for ANOVAs (see \code{\link{aov_car}} or
\code{\link{nice}}), default is \code{"GG"} (Greenhouse-Geisser correction).
(ANOVA functions only)
\item \code{emmeans_model}: Which model should be used by \pkg{emmeans} for
follow-up analysis of ANOVAs (i.e., objects pf class \code{"afex_aov"})?
Default is \code{"univariate"} which uses the \code{aov} model object (if
present). The other option is \code{"multivariate"} which uses the \code{lm}
model object (which is an object of class \code{"mlm"} in case
repeated-measures factors are present).
\item \code{include_aov}: Should the \code{aov} model be included into ANOVA objects of class \code{"afex_aov"}? Setting this to \code{FALSE} can lead to considerable speed improvements.
\item \code{factorize}: Should between subject factors be factorized (with
note) before running the analysis? Default is \code{TRUE}. (ANOVA functions
only)
\item \code{sig_symbols}: Default significant symbols used for ANOVA and
\code{mixed} printing. Default is\code{c(" +", " *", " **", " ***")}.
\item \code{lmer_function}: Which \code{lmer} function should \code{mixed} or
\code{lmer_alt} use. The default is \code{"lmerTest"} which uses
\code{\link[lmerTest]{lmer}}, \code{"lme4"} is also possible which uses
\code{\link[lme4]{lmer}}. Note that \code{mixed} methods \code{"KR"} and
\code{"S"} only work with \code{"lmerTest"}. For the other methods,
\code{"lme4"} could be minimally faster, but does not allow to use
\code{lmerTest::anova()}.
\item \code{return_aov}: Return value of the ANOVA functions (see
\code{\link{aov_car}}), default is \code{"nice"}.
}
}
\note{
All options are saved in the global R \code{\link{options}} with prefix
\code{afex.}
}
\examples{
afex_options() # see all options
afex_options("return_aov") #get single option
aop <- afex_options() # save current options
\dontrun{
# change options
afex_options(return_aov = "nice")
afex_options("return_aov") #get single option
afex_options(return_aov = "nice", method_mixed = "LRT")
afex_options("method_mixed") #get single option
# do something
}
afex_options(aop) # reset options
}
|
c3db416745b3e9f6be7865cc41c24c00f669dea5 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/abjutils/examples/sample_cnj.Rd.R | b0f5beee23d2ef4c7aa6ef41ffff08fe592eb99d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 646 | r | sample_cnj.Rd.R | library(abjutils)
### Name: sample_cnj
### Title: Generate sample Brazilian lawsuit identification numbers
### Aliases: sample_cnj
### ** Examples
{
#sampling the parameters
sample_cnj(3, foros = "0000",
anos = "2015", orgao = 8, tr = 26,
first_dig = "0",sample_pars = TRUE, return_df = FALSE)
sample_cnj(10, foros = c("0000","0001"),
anos = c("2014","2015"), orgao = 8, tr = 26,
first_dig = "0",sample_pars = TRUE, return_df = FALSE)
#not sampling the parameters
sample_cnj(3, foros = c("0000","0001","0002"),
anos = c("2014","2015","2016"), orgao = rep(8,3), tr = rep(26,3),
first_dig = "0",sample_pars = FALSE, return_df = FALSE)
}
|
32520e2871baa0eabeacc9bfec223cf0326576bb | 5d3d1b0916535dad8a83a9dad9e23ed77b982d8e | /R/var.variance.R | 7646c3be8642a3f4f3487aee464ee127f577aedd | [] | no_license | cran/agrmt | 3d280f0d45e7dcc141556269548296131f2c43cc | 849caf12caabffb97aba71b2b2a54d2d36d2ec4a | refs/heads/master | 2021-11-25T02:48:56.040559 | 2021-11-17T21:20:02 | 2021-11-17T21:20:02 | 17,694,324 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 406 | r | var.variance.R | var.variance = function(V) {
# Calculate the approximate variance of the categorical variance estimator
# V should be a frequency vector
# Based off of Blair and Lacy 2000: 274
k = length(V)
N = sum (V)
p = V / N
mu = mean(expand(V))
const = -sum(2*p*((1:k)-mu))
a = (N / (N - 1)) * ((1:k) * const + ((1:k) - mu)^2)
var.var = (sum(p * a^2) - (sum(p * a))^2)/N
return(var.var)
}
|
7be7afe0f698e05292850eae828130103d4bfd48 | cb4b8d511a14f1655120bb8737266296c5e46059 | /R/veg/Litter.R | b940775522330fe7094a84b13158cebff0852c1d | [] | no_license | Josh-Lee1/JL_honours | 40361e2f8b78fac9676ff32a8e0ce7a0603f6152 | db6792a039d824fdb518f9e06c3cc27ecca6da8a | refs/heads/master | 2023-03-29T22:28:19.500012 | 2021-04-15T04:40:20 | 2021-04-15T04:40:20 | 295,877,409 | 0 | 0 | null | 2021-03-16T06:17:06 | 2020-09-16T00:02:18 | HTML | UTF-8 | R | false | false | 1,513 | r | Litter.R | library(tidyverse)
#veg <- read.csv("Data/Raw/veg.csv")%>%
# filter(Point == 10 | Point == 20 | Point == 30 | Point == 40 | Point == 50)
veg<- read.csv("Data/Processed/vegfull.csv")
veg$Treatment <- as.factor(veg$Treatment)
#Depth
## perform ANOVA
Ldepth.aov <- aov(Litter.Depth ~ Treatment, data = veg)
summary(Ldepth.aov)
Ldepth.aov <- aov(Litter.Depth ~ Formation * Fire, data = veg)
summary(Ldepth.aov)
##look at interaction
interaction.plot(veg$Fire, veg$Formation, veg$Litter.Depth)
##test assumptions
par(mfrow = c(1,2))
hist(Ldepth.aov$residuals)
plot(Ldepth.aov,which=2)
plot(Ldepth.aov,which=1)
##Plot
boxplot(
Litter.Depth ~ Formation*Fire,data = veg,
names = c("Dry Sclerophyll Burnt", "Rainforest Burnt", "Dry Sclerophyll Unburnt", "Rainforest Unburnt"),
ylab="Litter Depth (mm)",xlab="Treatment",ylim=c(0,100))
##### there is a big outlier at 200mm in unburnt Rainforest
#Cover
## perform ANOVA
Lcover.aov <- aov(Litter.Cover ~ Treatment, data = veg)
summary(Lcover.aov)
Lcover.aov <- aov(Litter.Cover ~ Formation * Fire, data = veg)
summary(Lcover.aov)
##look at interaction
interaction.plot(veg$Fire, veg$Formation, veg$Litter.Cover)
##test assumptions
par(mfrow = c(1,2))
hist(Lcover.aov$residuals)
plot(Lcover.aov,which=2)
plot(Lcover.aov,which=1)
##Plot
boxplot(
Litter.Cover ~ Formation*Fire,data = veg,
names = c("Dry Sclerophyll Burnt", "Rainforest Burnt", "Dry Sclerophyll Unburnt", "Rainforest Unburnt"),
ylab="Litter Cover (%)",xlab="Treatment",ylim=c(0,100))
|
7cf5d7c8c2808f5ffa0dc57cffd3208e5b2b7a04 | 7cee004e610147f721dac99e8272652acd4d0fb3 | /plot1.R | 8112acb9842e399965815bb681507001044413fb | [] | no_license | alroru95/ExData_Plotting1 | 2213263895b1504f94e863d8e99715b07b1bd3e3 | 1d6bc83ae0da41e4092b91f1ed9ef7cc61ac127a | refs/heads/master | 2022-11-16T19:19:52.633635 | 2020-07-19T20:13:54 | 2020-07-19T20:13:54 | 280,826,743 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 861 | r | plot1.R | ## Download the file and put it in the data folder:
if (!file.exists("./data")) {dir.create("./data")}
URLzip <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(URLzip, "./data/Electric.zip", method = "curl")
##Unzip files in the created directory:
unzip("./data/Electric.zip", exdir = "./data")
##Read the .txt file and subset the corresponding date:
data <- read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", dec = ".")
February <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
##Plot Global active power and Frequency and copy it as .png:
GAP <- as.numeric(February$Global_active_power) ##x has to be numeric
png("plot1.png", width=480, height=480)
with(February, hist(GAP, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power"))
dev.off()
|
87d5d37e8d94bcccc2310218a6b47c7d5f3e5d6c | 9d1a7ed561ec9ad5f87ae7b8c52c73c53737e4d8 | /Examenes_R.r | c2b3cba628bb12f9d01243e091a20ba93a14fd5e | [] | no_license | Jasielprogramador/examen_R | f5fea0761ab266daf55eabd475205d7520034716 | deced53ea8aa540241687b54a86a336b062b98e5 | refs/heads/master | 2023-01-18T21:02:09.572521 | 2020-12-01T13:33:55 | 2020-12-01T13:33:55 | 317,506,689 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 396 | r | Examenes_R.r | #Azterketako ariketak R
#2.praktikakoak
#7.ariketa
mean()
median()
variantza<-var(arrailak)-n-1/n
sqrt(variantza)
#8.ariketa
kotxeak<-read.table("kotxeak.txt",header=T)
kotxeak
gasolina<-kotxeak$Gasolina.kotxeak
gasolina
gasoil<-kotxeak$Gas.oil.kotxeak
gasoil
#Merge 2 arrays into 1
a<-cbind(gasolina,gasoil)
a
mean(a)
median(a)
variantza<-var(a)-(length(a)-1/length(a))
variantza
gasoil<- |
2f4c60b8b9b27e1fd6cef20a2cb0f3ac7fc56441 | 5bc38b34ca80d6e0264a950958fa1910d51cb40b | /scripts/ERSST-grid/ERSST-grid.R | 93a6de4e376071cd880c1fc929fbe97fc0185afc | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | BigelowLab/oharvester | a024e49cac462af99d3889f5bcbfc00bafa5325a | 5329ef581a1f5f6b7ea9ea65d59f7ca3cf802bad | refs/heads/master | 2022-06-30T08:51:26.992187 | 2022-05-27T16:32:44 | 2022-05-27T16:32:44 | 228,415,019 | 2 | 2 | null | 2020-12-16T14:53:25 | 2019-12-16T15:16:51 | Python | UTF-8 | R | false | false | 6,701 | r | ERSST-grid.R | library(ncdf4)
library(raster)
#' Convert bounding box [0,360] longitudes to [-180, 180]
#'
#' Bounding boxes are 4 element vectors of [left, right, bottom, top]
#'
#' @export
#' @param x numeric bounding box vector, no check is done for being withing 0,360 range
#' @return numeric bounding box vector
to180BB <- function(x) {x[1:2] <- to180(x[1:2]) ; x}
#' Convert [-180,180] bounding box longitudes to [0,360]
#'
#' Bounding boxes are 4 element vectors of [left, right, bottom, top]
#'
#' @export
#' @param x numeric bounding box vector, no check is done for being withing 0,360 range
#' @return numeric bounding box vector
to360BB <- function(x) {x[1:2] <- to360(x[1:2]) ; x}
#' Convert [0,360] longitudes to [-180, 180]
#'
#' @export
#' @param x numeric vector, no check is done for being withing [0, 360] range
#' @return numeric vector
to180 <- function(x) {ix <- x > 180 ; x[ix] <- x[ix]-360; x}
#' Convert [-180,180] longitudes to [0, 360]
#'
#' @export
#' @param x numeric vector, no check is done for being within [0,3 60] range
#' @return numeric vector
to360 <- function(x) {ix <- x < 0 ; x[ix] <- x[ix]+ 360; x}
#' Retrieve navigation info for ERSST
#'
#' @param x character or ncdf4-class, if character it should be a URL.
#' @param bb 4 element numeric, bounding box [west, east, south, north]
#' where south and west are negative
#' @param dates Date-class, sequence of one or more dates to retrieve
#' @param res numeric, 2 element resolution [res_x,res_y]
#' @param varname the name of the variable to extract
#' @return list of items needed to extract data
ERSST_nc_nav <- function(x,
bb = to360BB(c(-88, -48, 24, 52)) ,
dates = as.Date(c("2018-01-01", "2018-01-02", "2018-01-03","2018-01-04")),
varname = 'sst',
res = c(2.0, 2.0)){
stopifnot(inherits(x, "ncdf4"))
lat <- x$dim$lat$vals
lon <- x$dim$lon$vals
if (!(varname[1] %in% names(x$var))) stop("varname not known:", varname[1])
if (length(res) == 1) res <- c(res[1],res[1])
r2 <- res/2
if (x$dim$time$len == 12){
# type = 'ltm'
idx = seq_len(12)
} else {
#type = 'month'
time <- ncdf4::ncvar_get(x, "time") + as.Date("1800-01-01")
idx <- findInterval(dates, time)
}
bb2 <- bb + c(-r2[1], r2[1], -r2[2], r2[2])
iW <- which.min(abs(lon - bb2[1]))
iE <- which.min(abs(lon - bb2[2]))
iS <- which.min(abs(lat - bb2[3]))
iN <- which.min(abs(lat - bb2[4]))
list(bb = bb,
res = res,
start = c(iW, iN, idx[1]),
count = c(iE- iW + 1, iS - iN + 1, length(idx)),
ext = to180BB(c(lon[iW] - r2[1], lon[iE] + r2[1],
lat[iS] - r2[2], lat[iN] + r2[2])),
crs = "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0",
varname = varname)
}
#' Retrieve the URL for the specified type
#'
#' @param type character either 'month' or 'ltm'
#' @param base_url character the root url
#' @return charcater URL
ERSST_nc_url <- function(type = c("month", "ltm")[1],
base_url = "https://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/noaa.ersst.v5"){
fname <- switch(tolower(type[1]),
"ltm" = "sst.mon.ltm.1981-2010.nc",
"sst.mnmean.nc")
file.path(base_url, fname)
}
#' Retrieve a stack of rasters by month
#'
#' @param x ncdf4 object (will be closed upon return)
#' @param nav list, as generated by ERSST_nc_nav
#' @return raster stack
ERSST_fetch <- function(x, nav){
stopifnot(inherits(x, "ncdf4"))
m <- ncdf4::ncvar_get(x,
varid = nav$varname,
start = nav$start,
count = nav$count)
filenm <- x$filename
ncdf4::nc_close(x)
d <- dim(m) # beware that if only one layer is requested
n <- length(d) # that we get an mxn matrix ratehr than mxnxl array
if (n < 3){ # awkward but neccessary
n <- 1
} else {
n <- d[n]
}
SS <- lapply(seq_len(n),
function(i){
raster::raster(t(m[,,i]),
crs = nav$crs,
xmn = nav$ext[1],
xmx = nav$ext[2],
ymn = nav$ext[3],
ymx = nav$ext[4])
})
raster::stack(SS)
}
#' Retrieve a stack of ERSST grids for either monthly means or the long term mean.
#'
#' @param type character, either 'month' (default) or 'ltm' (long term mean')
#' @param bb numeric, 4 element vector of [west, east, south, north] boundaries,
#' where west and south are negative.
#' @param varname character the name of the variable "sst" (default)
#' @param daterange 2 element character in YYYY-mm-dd format or Date day,
#' inclusive start and end dates. Ignored if type is set to 'ltm' in which
#' case 12 months are downloaded.
#' @param outpath character or NA, optional output path to save rasters
#' @param overwrite logical, see \code{raster}
#' @param fmt character either 'raster' or 'GTiff' (default)
ERSST_get_grid <- function(type = c("month", "ltm")[1],
bb = c(-88, -48, 24, 52),
daterange = c('2018-01-01', '2018-04-01'),
outpath = NA,
fmt = c("raster", "GTiff")[2],
overwrite = TRUE){
type <- tolower(type[1])
ersst_url <- ERSST_nc_url(type = type)
if (type == 'ltm'){
dates <- seq.Date(from = as.Date("2018-01-01"),
to = as.Date("2018-12-01"),
by = "month")
} else {
if (!inherits(daterange, "Date")) daterange <- as.Date(daterange)
dates <- seq(from = daterange[1], to = daterange[2], by = "month")
}
x <- ncdf4::nc_open(ersst_url)
nav <- ERSST_nc_nav(x,
bb = to360BB(bb),
dates = dates)
SS <- ERSST_fetch(x, nav)
SS <- raster::setZ(SS, dates)
if (type == 'ltm'){
names(SS) <- format(dates, "%b")
} else {
names(SS) <- format(dates, "%b_%Y")
}
if (!is.na(outpath)){
if (!dir.exists(outpath[1])){
ok <- dir.create(outpath[1], recursive = TRUE, showWarnings = FALSE)
if (!ok) stop("unable to create outpath:", outpath[1])
}
ext <- switch(tolower(fmt[1]),
'gtiff' = "tif",
'raster' = "grd")
if (type == 'ltm'){
ofile <- sprintf("%s-ltm.%s",format(dates, "%m"),ext)
} else {
ofile <- sprintf("%s.%s",format(dates, "%Y-%m"),ext)
}
for (i in seq_along(raster::nlayers(SS))){
raster::writeRaster(SS[[i]], file.path(outpath, ofile[i]),
format = fmt,
overwrite = overwrite)
}
}
SS
}
|
2da383e97e57f43e952d63ee2f8b244b6998f9f7 | b90eeee641deefe6b3fbb981151006e8a7982129 | /Sphaerulina_analysis.R | d8030666352ba9e47d06c21a299c74663b2064b6 | [] | no_license | FionaPaul/Sphaerulina | db62b11ec2ef7c85a056ab22f80eec77fb97c44e | 532a809fe7aeccb8b0203da2cdefd2423462bb6d | refs/heads/master | 2021-01-22T21:58:19.346903 | 2017-07-13T08:33:09 | 2017-07-13T08:33:09 | 92,749,196 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,387 | r | Sphaerulina_analysis.R | ###### Pathogen without borders: Sphaerulina populicola is genetically diverse
# and spreads freely through the host treeโs range
### Fiona Paul, Imke Schmitt, Sunil Mundra, Miklรณs Bรกlint
#####################################################
# Contents
# 1. Import dataset and data cleaning
# 2. Calculate and investigate diversity
# 3. Investigate abundance of oligotypes
# 4. Visualization of sample differences with NMDS
# 5. Making a map of the balsam poplar distribution
# 6. Making the haplotype network
################################################
library(plyr) # (Wickham 2011)
library(corrplot) # (Wei and Simko 2016)
library(vegan) # (Oksanen et al. 2016)
library(MASS) # (Venables and Ripley 2002)
library(effects) # (Fox 2003)
library(lme4) # (Bates et al. 2015)
library(mvabund) # (Wang et al. 2016)
library(maps) # (Becker et al. 2016)
library(maptools) # (Bivand and Lewin-Koh 2016)
library(rgdal) # (Bivand et al. 2017)
library(scales) # (Wickham 2016)
library(raster) # (Hijmans 2015)
library(mapplots) # (Gerritsen 2014)
library(ape) # (Paradis et al. 2004)
library(pegas) # (Paradis 2010)
library(dplyr) # (Wickham and Francois 2016)
library(ade4) # (Dray and Dufour 2007)
save.image("Sphaerulina_oligotypes.image.RData")
load("Sphaerulina_oligotypes.image.RData")
##### 1. Import dataset and data cleaning #####
## Read in abundance table
oligotypeabund = read.csv(file="MATRIX-COUNT.txt", header=T, sep='', row.names = 1)
oligotypeabund = oligotypeabund[order(row.names(oligotypeabund)),]
## Sequencing depth and oligotype abundances
# Frequency distribution of number of reads per sample
readsample = apply(oligotypeabund,1,sum)
summary(readsample)
hist(readsample, main = "Histogram of summed reads per sample",
xlab = "Summed reads per sample")
# Frequency distribution of number of reads per oligotype
readOT = apply(oligotypeabund,2,sum)
hist(readOT, main = "Histogram of summed reads per oligotype",
xlab = "Summed reads per oligotype")
# Freuqeuncy distribution of maximum read number per sample
hist(apply(oligotypeabund, 1, max), main = "Histogram of maximum read number per sample",
xlab = "Max read number per sample", ylab = "Frequency")
# Frequency distribution of highest read count per oligotype
hist(apply(oligotypeabund, 2, max), main = "Histogram of maximum read number per oligotype",
xlab = "Max read number per oligotype", ylab = "Frequency")
## Oligotype abundances
colors = rainbow(43)
pie(readOT, labels = names(readOT), col = colors)
pie(sqrt(readOT), labels = names(readOT), col = colors)
### Clean up negative controls
# Remove the maximum read number of a sequence variant found in a negative control from
# every sample that contains that variant
# Blank controls
Blank = grep("^B.*", row.names(oligotypeabund))
# Negative controls
Negative = grep("^N.*", row.names(oligotypeabund))
# Maximum number of reads in any control sample
MaxControl = apply(oligotypeabund[c(Blank, Negative),], 2, max)
# Extract the highest read number of a sequence variant in a control from every sample
negcleanoligotypeabund = oligotypeabund
negcleanoligotypeabund[grep("^[a-z]", row.names(oligotypeabund)),] <-
sweep(oligotypeabund[grep("^[a-z]", row.names(oligotypeabund)),], 2, MaxControl, "-")
# Set negative values to 0. Warnings are because the non-numeric cells
negcleanoligotypeabund[negcleanoligotypeabund < 0] <- 0
# Remove the negative samples
negcleanoligotypeabund = negcleanoligotypeabund[grep("^[a-z]", row.names(oligotypeabund)),]
# removal of 21 negative control samples
### Filter rare observations
## Per oligotype threshold
# Set counts within an Oligotype falling below 0.1% of the highest count of an Oligotype
# to 0 to remove errors
# Define threshold
rarethreshold = apply(negcleanoligotypeabund, 2, max)*0.001
rm.small.obs = colwise(function(x){return(ifelse(x <= rarethreshold, 0, x))})
rarecleanedabund = rm.small.obs(as.data.frame(t(negcleanoligotypeabund)))
rarecleanedabund = as.data.frame(t(rarecleanedabund))
colnames(rarecleanedabund) = colnames(negcleanoligotypeabund)
## Global threshold
# Filter out observations that fall below 0.05% of globally highest observation
# Define global cutoff value
maxcount = max(rarecleanedabund)
cutoff.global = round(maxcount*0.0005)
# Set the counts falling below cutoff to 0
globallycleanedabund = rarecleanedabund
globallycleanedabund[globallycleanedabund < cutoff.global] = 0
## Filter out oligotypes if they occur in less than 5 samples/trees
low.presence = apply(globallycleanedabund,2,function(vec) sum(vec>0))
IsFreq = low.presence > 5
prescleanedabund = globallycleanedabund[,IsFreq]
## Remove samples with no reads left
finalcleanedabund = prescleanedabund
samples.to.keep = (apply(finalcleanedabund,1,sum)) > 0
finalcleanedabund = finalcleanedabund[samples.to.keep,]
# removal of 393 samples
## Remove oligotypes with no reads left
ots.to.keep = (apply(finalcleanedabund,2,max)) > 0
finalcleanedabund = finalcleanedabund[,ots.to.keep]
# Removal of 14 oligotypes
### Remove problematic samples from the dataset
finalcleanedabund = finalcleanedabund[4:69,]
finalcleanedabund = finalcleanedabund[-c(1,25,48,52,54),]
### Sequencing depth and oligotype abundance in the cleaned dataset
# Frequency distribution of number of reads per sample
# Abundance distribution plots
cleanreadsample = apply(finalcleanedabund,1,sum)
hist(cleanreadsample,breaks = 100 ,main = "Histogram of summed reads per sample",
xlab = "Summed reads per sample")
# Frequency distribution of number of reads per oligotype
cleanreadOT = apply(finalcleanedabund,2,sum)
hist(cleanreadOT,breaks = 100, main = "Histogram of summed reads per oligotype",
xlab = "Summed reads per oligotype")
# Freuqeuncy distribution of maximum read number per sample
hist(apply(finalcleanedabund, 1, max), main = "Histogram of maximum read number per sample",
xlab = "Max read number per sample", ylab = "Frequency")
# Frequency distribution of highest read count per oligotype
hist(apply(finalcleanedabund, 2, max), main = "Histogram of maximum read number per oligotype",
xlab = "Max read number per oligotype", ylab = "Frequency")
png(file="histogram.png", units="mm", height=90, width=90,
pointsize=10, bg="white", res=1200)
## Abundance of oligotypes
###### Oligotype pie charts ######
# colors for final oligotype plotting
my_color = c("#d896ff", "#800080", "#ee4035", "#fdf498", "#7bc043", "#0392cf", "#028900",
"#49796b", "#602320", "#011f4b", "#000000", "#a0d6b4", "#ffbf00", "#a67c00",
"#ff0097", "#ff0000")
par(mar=c(0,0,0,0))
pie(cleanreadOT, labels = c("155", "159", "125", "92", "6", "15", "102", "79", "80", "156", "78",
"117", "58", "105", "8", "161"), col = my_color)
pdf("piechart_oligo_freq.pdf",height = 8.27, width= 11.69, pointsize = 18 )
par(mar=c(0,0,0,0))
pie(sqrt(cleanreadOT), labels = c("155", "159", "125", "92", "6", "15", "102", "79", "80", "156",
"78", "117", "58", "105", "8", "161"), col = my_color)
dev.off()
## Print out abundance table into a csv file
write.table(finalcleanedabund, file = "cleaned_abundance_table.csv",sep = ";",
col.names = NA, row.names = TRUE)
##### 2. Calculate and investigate diversity (creating the diversity boxplot) #####
# load in the abundance table with reads of oligotypes per tree
abundance = read.csv(file="cleaned_abundance_table.csv", header=T, sep=';', row.names = 1)
# load in the metadata by sample with raw read numbers
metadata = read.csv(file = "metadata.csv", header = T, sep = ";", row.names = 1)
## Correlation of explanatory variables
###### Correlation plot of environmental variables ######
pdf("correlation_matrix_environment.pdf",height = 8.27, width= 11.69, pointsize = 18 )
par(mar=c(2,0,0,0), oma=c(2,0,0,0))
cor.data = cbind(metadata[,c((2:10),15,16)])
corrplot.mixed(cor(cor.data), upper = "number", lower = "circle", tl.pos = "lt", order = "hclust",
tl.cex= 0.75, tl.col = "black", tl.srt = 45)
dev.off()
### Calculate Hill diversities
OTHill = renyi(abundance, scale=c(0,1,2), hill=T)
# Hill 1
hill.1 = OTHill$"0"
names(hill.1) = rownames(abundance)
hist(hill.1)
plot(hill.1 ~ metadata$reads, pch = 19, ylab= "richness", xlab="number of raw reads")
shapiro.test(hill.1)
# Hill 2
hill.2 = OTHill$"1"
names(hill.2) = rownames(abundance)
hist(hill.2)
plot(hill.2 ~ metadata$reads, pch = 19)
# Hill 3
hill.3 = OTHill$"2"
names(hill.3) = rownames(abundance)
hist(hill.3)
plot(hill.3 ~ metadata$reads, pch = 19)
### GLMs for factors influencing diversity
## Hill 1
# Read number
hill1.glm.reads = glm(hill.1 ~ reads, data = metadata)
summary(hill1.glm.reads)
hill1.reads = glm.nb(hill.1 ~ reads, data = metadata)
summary(hill1.reads)
anova(hill1.glm.reads,hill1.reads)
AIC(hill1.glm.reads,hill1.reads)
# negative binomial is better than normal
plot(allEffects(hill1.reads))
# Site/Location effect
hill1.site = glm.nb(hill.1 ~ reads + site, data = metadata)
summary(hill1.site)
anova(hill1.site)
anova(hill1.reads,hill1.site)
AIC(hill1.reads,hill1.site)
# Read number and site effect plot
plot(allEffects(hill1.site))
# Replication Effect
hill1.rep = glm.nb(hill.1 ~ reads + replicates, data = metadata)
summary(hill1.rep)
anova(hill1.rep)
anova(hill1.reads,hill1.site,hill1.rep)
AIC(hill1.reads,hill1.site,hill1.rep)
plot(allEffects(hill1.rep))
# Region effect, Canada vs Alaska
hill1.reg = glm.nb(hill.1 ~ reads + Region, data = metadata)
summary(hill1.reg)
plot(allEffects(hill1.reg))
# Latitude effect
hill1.lat = glm.nb(hill.1 ~ reads + latitude, data = metadata)
summary(hill1.lat)
anova(hill1.reads,hill1.site,hill1.lat)
AIC(hill1.reads,hill1.site,hill1.lat)
plot(allEffects(hill1.lat))
hill1.lat2 = glm.nb(hill.1 ~ reads + latitude + site, data = metadata)
summary(hill1.lat2)
anova(hill1.lat2)
# Latitude, read number, site effect plot
plot(allEffects(hill1.lat2))
plot(effect("reads", hill1.lat2), main= NULL, xlab= "Sequencing depth (DNA sequences)",
ylab= "Hill's N0")
plot(effect("latitude", hill1.lat2), main= NULL, xlab= "Latitude (ยฐ N)", ylab= "Hill's N0")
# Landuse type effect
hill1.land = glm.nb(hill.1 ~ reads + Landuse_type, data = metadata)
summary(hill1.land)
anova(hill1.reads,hill1.site,hill1.land)
AIC(hill1.reads,hill1.site,hill1.land)
plot(allEffects(hill1.land))
## Hill 2
# Read number
hill2.glm.nb.reads = glm.nb(hill.2 ~ reads, data = metadata)
summary(hill2.glm.nb.reads)
hill2.glm = glm(hill.2 ~ reads, data = metadata)
summary(hill2.glm)
anova(hill2.glm.nb.reads,hill2.glm)
AIC(hill2.glm, hill2.glm.nb.reads)
plot(allEffects(hill2.glm))
# Site/Location effect
hill2.site = glm(hill.2 ~ reads + site, data = metadata)
summary(hill2.site)
anova(hill2.glm,hill2.site)
AIC(hill2.glm,hill2.site)
plot(allEffects(hill2.site))
# Replication Effect
hill2.rep = glm(hill.2 ~ reads + replicates, data = metadata)
summary(hill2.rep)
anova(hill2.rep)
anova(hill2.glm,hill2.site, hill2.rep)
AIC(hill2.glm,hill2.site, hill2.rep)
plot(allEffects(hill2.rep))
# Region effect, Canada vs Alaska
hill2.reg = glm(hill.2 ~ reads + Region, data = metadata)
summary(hill2.reg)
plot(allEffects(hill2.reg))
# Latitude effect
hill2.lat = glm(hill.2 ~ reads + latitude, data = metadata)
summary(hill2.lat)
plot(allEffects(hill2.lat))
# Landuse type effect
hill2.land = glm(hill.2 ~ reads + Landuse_type, data = metadata)
summary(hill2.land)
plot(allEffects(hill2.land))
## Hill 3
# Read number
hill3.glm.nb.reads = glm.nb(hill.3 ~ reads, data = metadata)
summary(hill3.glm.nb.reads)
hill3.glm = glm(hill.3 ~ reads, data = metadata)
summary(hill3.glm)
anova(hill3.glm.nb.reads,hill3.glm)
AIC(hill3.glm, hill3.glm.nb.reads)
plot(allEffects(hill3.glm))
# Site/Location effect
hill3.site = glm(hill.3 ~ reads + site, data = metadata)
summary(hill3.site)
anova(hill3.glm, hill3.site)
AIC(hill3.glm, hill3.site)
plot(allEffects(hill3.site))
# Replication Effect
hill3.rep = glm(hill.3 ~ reads + replicates, data = metadata)
summary(hill3.rep)
anova(hill3.rep)
anova(hill3.glm,hill3.site, hill3.rep)
AIC(hill3.glm,hill3.site, hill3.rep)
plot(allEffects(hill3.rep))
# Region effect, Canada vs Alaska
hill3.reg = glm(hill.3 ~ reads + Region, data = metadata)
summary(hill3.reg)
plot(allEffects(hill3.reg))
# Latitude effect
hill3.lat = glm(hill.3 ~ reads + latitude, data = metadata)
summary(hill3.lat)
plot(allEffects(hill3.lat))
# Landuse type effect
hill3.land = glm(hill.3 ~ reads + Landuse_type, data = metadata)
summary(hill3.land)
plot(allEffects(hill3.land))
###### Making the boxplots for richness and diversity in the two geographic demes ######
shannon = diversity(abundance, index = "shannon", MARGIN = 1)
pdf(file = "richness_diversity_region_boxplot.pdf", height = 6.5, width= 11.69, pointsize = 18)
par(mar=c(2,4,2,2), las=1, oma=c(2,1,1,1), mfrow=c(1,2))
boxplot(hill.1 ~ metadata$Region, ylab="Oligotype richness", fill=TRUE, col="gray")
boxplot(shannon ~ metadata$Region, ylab="Shannon diversity", fill=TRUE, col="gray")
dev.off()
###
### Mixed effects GLMs for diversity patterns
## Hill 1
# Read number, null model
hill.glmer.reads = glmer.nb(hill.1 ~ (scale(reads)) + (1|site), data = metadata)
summary(hill.glmer.reads)
anova(hill.glmer.reads)
AIC(hill.glmer.reads)
plot(allEffects(hill.glmer.reads))
# Replication number effect
hill.glmer.rep = glmer.nb(hill.1 ~ (scale(reads)) + replicates + (1|site),
data= metadata)
summary(hill.glmer.rep)
anova(hill.glmer.reads, hill.glmer.rep)
plot(allEffects(hill.glmer.rep))
# Region effect
hill.glmer.reg = glmer.nb(hill.1 ~ (scale(reads)) + Region + (1|site),
data = metadata)
summary(hill.glmer.reg)
anova(hill.glmer.reads,hill.glmer.reg)
plot(allEffects(hill.glmer.reg))
# Latitude effect
hill.glmer.lat = glmer.nb(hill.1 ~ (scale(reads)) + latitude + (1|site),
data = metadata)
summary(hill.glmer.lat)
anova(hill.glmer.reads,hill.glmer.lat)
plot(allEffects(hill.glmer.lat))
# Landuse type effect
hill.glmer.land = glmer.nb(hill.1 ~ (scale(reads)) + Landuse_type + (1|site),
data = metadata)
summary(hill.glmer.land)
anova(hill.glmer.reads, hill.glmer.land)
plot(allEffects(hill.glmer.land))
# Temperature effect
hill.glmer.temp = glmer.nb(hill.1 ~ (scale(reads)) + temp_annual + (1|site),
data = metadata)
summary(hill.glmer.temp)
anova(hill.glmer.reads, hill.glmer.temp)
plot(allEffects(hill.glmer.temp))
# Precipitation effect
hill.glmer.ppt = glmer.nb(hill.1 ~ (scale(reads)) + scale(prec_annual) +
(1|site), data = metadata)
summary(hill.glmer.ppt)
anova(hill.glmer.reads, hill.glmer.ppt)
plot(allEffects(hill.glmer.ppt))
anova(hill.glmer.reads,hill.glmer.rep,hill.glmer.reg,hill.glmer.lat,hill.glmer.temp,hill.glmer.ppt)
## Hill 2
# Read number effect
hill2.glmer.reads = glmer.nb(hill.2 ~ (scale(reads)) + (1|site),data = metadata)
summary(hill2.glmer.reads)
anova(hill2.glmer.reads)
AIC(hill2.glmer.reads)
## Hill 3
# Read number effect
hill3.glmer.reads = glmer.nb(hill.3 ~ (scale(reads)) + (1|site),data = metadata)
summary(hill3.glmer.reads)
anova(hill3.glmer.reads)
AIC(hill3.glmer.reads)
##### 3. Investigate abundance of oligotypes #####
# Checking the response to factors of the abundance of individual oligotypes
# Investigating abundance patterns with manyglms
abund.mva
# convert the abundance table into mvabund table
abund.mva = mvabund(abundance)
# plot of the mean-variance relationship
# Mean Variance plot for dataset
meanvar.plot(abund.mva, xlab= "Mean", ylab= "Variance", table=T)
# Read number effect
glm.readnum = manyglm(abund.mva ~ reads, family = "negative.binomial",
data = metadata)
glm.readnum.anova = anova.manyglm(glm.readnum, test = "LR",nBoot=1000)
glm.readnum.anova
plot(glm.readnum)
# Site effect
glm.site = manyglm(abund.mva ~ site, family = "negative.binomial",
data = metadata)
glm.site.anova = anova.manyglm(glm.site, test = "LR",nBoot=1000)
glm.site.anova
# Replication effect
glm.repl = manyglm(abund.mva ~ replicates, family = "negative.binomial",
data = metadata)
glm.repl.anova = anova.manyglm(glm.repl, test = "LR", nBoot = 1000)
glm.repl.anova
# Region effect
glm.region = manyglm(abund.mva ~ Region, family = "negative.binomial",
data = metadata)
glm.region.anova = anova.manyglm(glm.region, test = "LR",nBoot=1000)
glm.region.anova
# Latitude effect
glm.lat = manyglm(abund.mva ~ latitude, family = "negative.binomial",
data = metadata)
glm.lat.anova = anova.manyglm(glm.lat, test = "LR",nBoot=1000)
glm.lat.anova
# Landuse type effect
glm.land = manyglm(abund.mva ~ Landuse_type, data = metadata,
family = "negative.binomial")
glm.land.anova = anova.manyglm(glm.land, test = "LR",nBoot=1000)
glm.land.anova
# Temperature effect
glm.temp = manyglm(abund.mva ~ temp_annual, family = "negative.binomial",
data = metadata)
glm.temp.anova = anova.manyglm(glm.temp, test = "LR", nBoot = 1000)
glm.temp.anova
# Precipitation effect
glm.ppt = manyglm(abund.mva ~ prec_annual, family = "negative.binomial",
data = metadata)
glm.ppt.anova = anova.manyglm(glm.ppt, test = "LR", nBoot = 1000)
glm.ppt.anova
##### 4. Visualization of sample differences with NMDS #####
MDS = metaMDS(abundance, distance = "bray")
MDS = metaMDS(abundance, previous = MDS)
stressplot(MDS)
##### NMDS plot with sample dots coloured by sampling location #####
stand.data= read.csv(file="stand.csv", sep=";", header=T, row.names = 1)
attach(stand.data)
head(stand.data)
mode(stand)
stand<-as.factor(stand)
mode(stand)
is.factor(stand)
par(mfrow=c(1,1), mar=c(4,4,2,2))
gnmds1 <- jitter(MDS$points[,1],600)
gnmds2 <- jitter(MDS$points[,2],600)
mds.df<-data.frame(gnmds1,gnmds2)
pdf("NMDS_locations.pdf",height = 8.27, width= 11.69, pointsize = 18 )
par(mar=c(5,5,2,2))
plot(MDS$points, type="n", xlab="NMDS1", ylab="NMDS2", xlim=c(-4,4), ylim= c(-6,3))
points(gnmds1[stand==1],gnmds2[stand==1],cex=1,pch=16,col="firebrick1")
points(gnmds1[stand==5],gnmds2[stand==5],cex=1,pch=16,col="brown")
points(gnmds1[stand==4],gnmds2[stand==4],cex=1,pch=16,col="chocolate")
points(gnmds1[stand==8],gnmds2[stand==8],cex=1,pch=16,col="yellow")
points(gnmds1[stand==6],gnmds2[stand==6],cex=1,pch=16,col="chartreuse")
points(gnmds1[stand==7],gnmds2[stand==7],cex=1,pch=16,col="darkgreen")
points(gnmds1[stand==2],gnmds2[stand==2],cex=1,pch=16,col="violet")
points(gnmds1[stand==12],gnmds2[stand==12],cex=1,pch=16,col="purple4")
points(gnmds1[stand==9],gnmds2[stand==9],cex=1,pch=16,col="blue")
points(gnmds1[stand==10],gnmds2[stand==10],cex=1,pch=16,col="cadetblue")
points(gnmds1[stand==11],gnmds2[stand==11],cex=1,pch=16,col="skyblue4")
points(gnmds1[stand==3],gnmds2[stand==3],cex=1,pch=16,col="darkgrey")
legend(-4, -1, c("Arctic Village","Fairbanks", "Denali N. Park","Hay River", "Fort McMurray",
"Grande Prairie","Boyle", "Cadxyz","Love","Melville","Portage", "Carnduff"),
fill=c("firebrick1","brown","chocolate","yellow","chartreuse","darkgreen",
"violet","purple4","blue","cadetblue", "skyblue4","darkgrey"), cex = 0.7)
dev.off()
##### NMDS plot by region, i.e. Canada and Alaska #####
region.data= read.csv(file="region.csv", sep=";", header=T, row.names = 1)
attach(region.data)
head(stand.data)
mode(reg)
reg<-as.factor(reg)
mode(reg)
is.factor(reg)
newgnmds1 <- jitter(MDS$points[,1],300)
newgnmds2 <- jitter(MDS$points[,2],300)
mds.df<-data.frame(newgnmds1,newgnmds2)
pdf("NMDS_regions.pdf",height = 8.27, width= 11.69, pointsize = 18 )
par(mar=c(5,5,2,2))
plot(MDS$points, type="n", xlab="NMDS1", ylab="NMDS2")
points(newgnmds1[reg==2],newgnmds2[reg==2],cex=1,pch=16,col="blue")
points(newgnmds1[reg==1],newgnmds2[reg==1],cex=1,pch=16,col="red")
legend(3.5, 2, c("Alaska", "Canada"), fill = c("red","blue"))
dev.off()
##### 5. Making a map of the balsam poplar distribution #####
# Load in the balsam poplar distribution shape file
popdistr = readOGR("popubals.shp")
head(popdistr)
# Load in the sampling location coordinates
locations = read.csv("metadata.csv", header = T, sep = ";", row.names = 1)
head(locations)
# Load in the mountain range raster layer shape file
rockys = raster("alt.grd")
rockys
### Piechart preparations
# Abundances summed for locality
local_abundance_input = cbind(abundance, site = metadata$site_code)
local_abundance = aggregate(. ~ site, data = local_abundance_input, FUN = sum)
# Site coordinates
pop_coord_input = data.frame(lon = locations$longitude,
lat = locations$latitude,
site = metadata$site_code)
pop_coord = aggregate(. ~ site, data = pop_coord_input, FUN = mean)
# only the relatively large read numbers
frequent_abund = local_abundance[,2:17]
# create a pdf file map of oligotype frequency distribution across study area
pdf("poplar_distribution_pies.pdf", height = 8.27, width= 10.8, pointsize = 18)
par(mfrow=c(1,1),mar=c(4,4,2,0), oma=c(0,0,0,0))
plot(rockys, maxpixels= 750000, xlim=c(-155,-95), ylim=c(45,70), useRaster=TRUE,
interpolate=TRUE, box=FALSE, axes=FALSE, col=gray.colors(9,start=1,end= 0.1, gamma=0.2),
legend= F, xlab= "Longitude (ยฐ E)", ylab= "Latitude (ยฐ N)", cex.lab=1)
plot(popdistr, add=T, col=alpha("darkgreen", 0.5) , border=FALSE, xlim=c(-155,-95), ylim=c(45,70))
map(database = "world", add= TRUE, xlim = c(-155,-95), ylim = c(45,70), myborder = c(0,0))
axis(side = 1, lwd = 1, lwd.ticks = 1, tck=-0.02, cex.axis=1)
axis(side = 2, lwd = 1, lwd.ticks = 1, tck=-0.02, cex.axis=1)
for (i in 1:nrow(pop_coord)){
add.pie(as.numeric(sqrt(frequent_abund[i,])), pop_coord[i,2], pop_coord[i,3],
radius=log(sum(frequent_abund[i,]))/5,
col = my_color, labels = "")
}
dev.off()
##### 6. Making the haplotype network #####
input <- "repr_seqs_sphaerulina_aligned.fasta"
d <- ape::read.dna(input, format='fasta')
e <- dist.dna(d)
h <- pegas::haplotype(d)
h <- sort(h, what = "label")
(net <- pegas::haploNet(h))
haplo_size = log(apply(frequent_abund,2,sum))/5
pdf("haplotype_network.pdf",height = 8.27, width= 11.69, pointsize = 18 )
plot(net, scale.ratio=1, bg = my_color,
labels = F,
threshold = 0,
size = haplo_size)
dev.off()
|
2d70aa844bb5406fda8790b1b946c760c97f670b | f439a076bc3fcac2c8d7eb72e69dc8d24a00b263 | /Unit 3 Logistic Regression/HealthCare.R | 9a200cff6a37d9a4f2379798c8f150de09d2ebe9 | [] | no_license | jakehawk34/MIT-Analytics | 73f9afb0cbfbbd8202e415f0c50c8e638aa76db1 | daa2ca2eca44ba6c74ba5773d992f68e8c775b90 | refs/heads/main | 2023-05-07T13:54:40.796512 | 2021-05-21T00:31:11 | 2021-05-21T00:31:11 | 344,290,207 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,725 | r | HealthCare.R | # Unit 3: Logistic Regression
# Modeling the Expert: An Introduction to Logistic Regression
quality = read.csv("quality.csv", stringsAsFactors = TRUE)
str(quality)
table(quality$PoorCare)
# 98 patients received good care, 33 patients received poor care
# Our baseline model has an accuracy of about 75%
98/ 131
install.packages("caTools")
library(caTools)
# Use the caTools package to split the data set randomly into a training and test set
# Initialize the random number generator to 88 with set.seed()
set.seed(88)
split = sample.split(quality$PoorCare, SplitRatio = 0.75)
# TRUE means that we should put an observation in the training set
# FALSE means that we should put an observation in the testing set
split
qualityTrain = subset(quality, split == TRUE)
qualityTest = subset(quality, split == FALSE)
# Build a logistic regression model using OfficeVisits and Narcotics as independent variables.
# We'll call our model QualityLog and use the "glm" function for "generalized linear model" to build
# our logistic regression model.
QualityLog = glm(PoorCare ~ OfficeVisits + Narcotics, data = qualityTrain, family=binomial)
# family=binomial tells the glm function to build a logistic regression model
summary(QualityLog)
# AIC value. This is a measure of the quality of the model and is like Adjusted R-squared in that it accounts
# for the number of variables used compared.
# The preferred model is the one with the minimum AIC.
# Make predictions on the training set using predict()
predictTrain = predict(QualityLog, type="response")
summary(predictTrain)
# Let's see if we're predicting higher probabilities for the actual poor care cases as we expect.
tapply(predictTrain, qualityTrain$PoorCare, mean)
# Quick Question 3
Question3 = glm(PoorCare ~ StartedOnCombination + ProviderCount, data = qualityTrain, family=binomial)
summary(Question3)
# We can convert the probabilities to predictions using what's called a threshold value, t.
# If the probability of poor care is greater than this threshold value, t, we predict poor quality care.
# But if the probability of poor care is less than the threshold value, t, then we predict good quality care.
# Confusion matrix / classification matrix
# The rows are labeled with the actual outcome, and the columns are labeled with the predicted outcome.
# Each entry of the table gives the number of data observations that fall into that category.
# So the number of true negatives, or TN, is the number of observations that are actually good care and for which we predict good care.
# The true positives, or TP, is the number of observations that are actually poor care and for which we predict poor care.
# These are the two types that we get correct.
# The false positives, or FP, are the number of data points for which we predict poor care, but they're actually good care.
# And the false negatives, or FN, are the number of data points for which we predict good care, but they're actually poor care.
# We can compute two outcome measures that help us determine what types of errors we are making.
# They're called sensitivity and specificity.
# Sensitivity is equal to the true positives divided by the true positives plus the false negatives,
# and measures the percentage of actual poor care cases that we classify correctly.
# This is often called the true positive rate.
# Specificity is equal to the true negatives divided by the true negatives plus the false positives,
# and measures the percentage of actual good care cases that we classify correctly.
# This is often called the true negative rate.
# A model with a higher threshold will have a lower sensitivity and a higher specificity
# A model with a lower threshold will have a higher sensitivity and a lower specificity
# Compute classification tables using different threshold values
# Threshold value of 0.5
table(qualityTrain$PoorCare, predictTrain > 0.5)
# 70 cases with actual good care and predicted good care (true positive)
# 10 cases with actual poor care and predicted poor care (true negative)
# 4 cases with actual good care, but predicted poor care (false positive)
# 15 cases with actual poor care, but predicted good care (false negative)
# Sensitivity or true positive rate
10 / (10 + 15)
# Specificity or true negative rate
70 / (70 + 4)
# Threshold value of 0.5
table(qualityTrain$PoorCare, predictTrain > 0.7)
# Sensitivity goes down
8 / (8 + 17)
# Specificity goes up
73 / (73 + 1)
# Threshold value of 0.3
table(qualityTrain$PoorCare, predictTrain > 0.3)
# Sensitivity goes up
13 / (13 + 12)
# Specificity goes down
67 / (67 + 7)
# Which threshold value should we choose?
# A Receiver Operator Characteristic curve, or ROC curve, can help you decide which value of the threshold is best.
# Sensitivity of true positive rate on the y-axis
# False positive or 1 - specificity rate on the x-axis
# ROC curve always starts at (0, 0), which corresponds to a threshold value of 1
# ROC curve always ends at (1, 1), which corresponds to a threshold value of 0
# To generate ROC curves in R, we need to install a new package.
install.packages("ROCR")
library(ROCR)
# Use the predictions from predictTrain to create the ROC curve
# The prediction function takes two arguments
# The first argument is the predictions we made with our model
# The second argument is the true outcomes of the data points
ROCRpred = prediction(predictTrain, qualityTrain$PoorCare)
# We'll call the output of this ROCRperf, and use the performance function,
# which takes as arguments the output of the prediction function, and then what we want on the x and y-axes.
# In this case, it's true positive rate, or "tpr", and false positive rate, or "fpr".
ROCRperf = performance(ROCRpred, "tpr", "fpr")
plot(ROCRperf)
# Add colors to the ROCR plot
plot(ROCRperf, colorize = TRUE)
# Add threshold labels to the plot
plot(ROCRperf, colorize = TRUE, print.cutoffs.at=seq(0,1,0.1), text.adj=c(-0.2,1.7))
# So the Area Under the Curve shows an absolute measure
# N = number of observations
# Overall accuracy = (True Positives (TP) + True Negatives (TN)) / N
# Overall error rate = (False Positives (FP) + False Negatives (FN)) / N
# False negative error rate = FN / (FN + TP)
# False positive error rate = FP / (FP + TN)
# Quick Question 5
Question5 = glm(PoorCare ~ OfficeVisits + Narcotics, data = qualityTrain, family=binomial)
predictTest = predict(Question5, type="response", newdata=qualityTest)
# You can compute the test set AUC by running the following two commands in R:
ROCRpredTest = prediction(predictTest, qualityTest$PoorCare)
auc = as.numeric(performance(ROCRpredTest, "auc")@y.values)
auc
# Altogether, there has been 2,400 studies written using the Framingham data.
|
3e295df37b068646ee26b1eedaf911e5badc03d5 | 0e263b651da80bdf942d45fe037fc7b68be51a39 | /plot1.R | fa295ce99144628f41c4cea9363bcda38fd13d2e | [] | no_license | WaleedJaved/ExData_Plotting1 | 1a36035c230241eb1fc224284024b97788fff137 | 04421cccf8ba743e20f9fb1e082504fb089994bd | refs/heads/master | 2021-01-15T14:02:51.285214 | 2015-01-07T02:36:38 | 2015-01-07T02:36:38 | 28,894,495 | 0 | 0 | null | 2015-01-07T02:33:51 | 2015-01-07T02:33:51 | null | UTF-8 | R | false | false | 354 | r | plot1.R | library(sqldf)
path = "data.1.txt"
data <- read.csv.sql(path, sep=";",sql = 'select * from file where Date = "1/2/2007" or Date = "2/2/2007"')
data$Date <- strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S")
png("plot1.png",width=480,height=480)
hist(data[,3],col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
dcb6704275c8e9adb666ff1c194694506d62ae04 | 68b027be0bdae0975b7a93656ba4712a6288b78b | /man/acquire.Rd | f336869b111263753d2216066a49dda83cdf3e00 | [] | no_license | sidjai/bbscrapeR | 80cb037001c615e448456f68c1584b8d43529139 | e2f78eec69253e8188cefe929633bfbd0593fb11 | refs/heads/master | 2021-01-17T21:33:42.201309 | 2015-08-28T17:32:21 | 2015-08-28T17:32:21 | 40,446,465 | 0 | 0 | null | 2015-08-09T18:39:13 | 2015-08-09T18:39:11 | null | UTF-8 | R | false | false | 2,899 | rd | acquire.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/acquire.R
\name{acquire}
\alias{acquire}
\title{Acquire player tracking data from http://stats.nba.com/}
\usage{
acquire(type = "shot", PlayerID = "2544", DateFrom = "", DateTo = "",
GameSegment = "", LastNGames = "0", LeagueID = "00", Location = "",
Month = "0", OpponentTeamID = "0", Outcome = "", Period = "0",
Season = "2013-14", SeasonSegment = "", SeasonType = "Regular+Season",
TeamID = "0", VsConference = "", VsDivision = "", PerMode = "PerGame")
}
\arguments{
\item{type}{Required type of tracking data. Eligible values are 'shot', 'rebound',
'pass', 'shotdefend', 'reb', 'shots'. The most useful/interesting are 'shot' and 'rebound'.}
\item{PlayerID}{Required player ID. See \code{data(players)}}
\item{DateFrom}{Optionally set date to begin acquiring data (in the format 'YYYY/MM/DD')}
\item{DateTo}{Optionally set date to stop acquiring data (in the format 'YYYY/MM/DD')}
\item{GameSegment}{Optionally filter by 'First+Half', Second+Half', or 'Overtime'.}
\item{LastNGames}{Optionally filter by the last 'n' number of games.}
\item{LeagueID}{Character vector with any combination of '00', '10', and/or '20'.
These codes stand for NBA, WNBA and D-League, respectively.}
\item{Location}{Optionally filter by 'Home' or 'Road' games?}
\item{Month}{Optionally filter by Month (use '1' for Oct, '2' for Nov, etc.)}
\item{OpponentTeamID}{Optionally filter by opposing team}
\item{Outcome}{Optionally filter by wins (use 'W') or losses (use 'L')}
\item{Period}{Optionally filter by quarter/OT (use '1' for 1st Quarter, '2' for 2nd, etc.)}
\item{Season}{Required filter on season year}
\item{SeasonSegment}{Optionally filter by 'Pre All-Star' or 'Post All-Star'}
\item{SeasonType}{Required filter on either 'Regular Season' or 'Playoffs'}
\item{TeamID}{}
\item{VsConference}{}
\item{VsDivision}{}
\item{PerGame}{Aggregate 'PerGame' or 'Totals'. Only relevant for "dashboards"}
}
\description{
All the arguments to this function
}
\examples{
\dontrun{
# Defaults to Lebron's shots from the 2013-14 regular season
shots <- acquire()
# Lebron's rebounds from the 2013-14 regular season
rebounds <- acquire("rebound")
# The rest of these data types are "dashboards"
# That is, they show summary statistics that will change over
# the season. For this reason, you probably don't want to
# store these in a database.
pass_dash <- acquire("pass")
defense_dash <- acquire("shotdefend")
reb_dash <- acquire("reb")
shot_dash <- acquire("shots")
# All the shots for the 2013-14 season!
ids <- players[, 'PlayerID']
lshots <- lapply(ids, function(x) acquire(PlayerID = x))
allshots <- do.call("rbind", lshots)
# All the rebounds for the 2013-14 season!
lrebounds <- lapply(ids, function(x)
acquire(type = "rebound", PlayerID = x))
allrebs <- do.call("rbind", lrebounds)
}
}
|
1605c12d8cf6e13c40cbcba1490665273976fe1e | 79b935ef556d5b9748b69690275d929503a90cf6 | /man/zgibbsmodel.Rd | 541f1a0a126f9809e967886c2e11c06d0166f173 | [] | no_license | spatstat/spatstat.core | d0b94ed4f86a10fb0c9893b2d6d497183ece5708 | 6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70 | refs/heads/master | 2022-06-26T21:58:46.194519 | 2022-05-24T05:37:16 | 2022-05-24T05:37:16 | 77,811,657 | 6 | 10 | null | 2022-03-09T02:53:21 | 2017-01-02T04:54:22 | R | UTF-8 | R | false | false | 960 | rd | zgibbsmodel.Rd | \name{zgibbsmodel}
\alias{zgibbsmodel}
\title{
Gibbs Model
}
\description{
Experimental code. Creates an object representing a Gibbs point
process model. Typically used for theoretical calculations about
such a model.
}
\usage{
zgibbsmodel(beta = 1, interaction = NULL, icoef = NULL)
}
\arguments{
\item{beta}{
First order trend term. A numeric value, numeric vector,
pixel image, function, or a list of such objects.
}
\item{interaction}{
Object of class \code{"interact"} specifying the interpoint
interaction structure, or \code{NULL} representing the Poisson process.
}
\item{icoef}{
Numeric vector of coefficients for the interpoint interaction.
}
}
\details{
Experimental.
}
\value{
Object belonging to the experimental class \code{zgibbsmodel}.
}
\author{
\adrian.
}
\seealso{
\code{\link{methods.zgibbsmodel}}
}
\examples{
m <- zgibbsmodel(10, Strauss(0.1), -0.5)
}
\keyword{spatial}
\keyword{models}
|
7645acf34929a012f6ae09fbead7891bcf031105 | d7e5433372547d45adcae2abad40b68c6d5cf938 | /cdm_ppt_only.r | 7d4f14e8e0c3d873d00e895fcbc7a0c6f0222563 | [] | no_license | salvelinusbob/cumulative_deviation_precip | 635aacc98c553faf0f3855142ab53ecbb7789e7f | 41891014751c9b27b8bef034cf3182ddb37cfec5 | refs/heads/master | 2021-07-10T09:25:13.688620 | 2021-06-25T20:07:26 | 2021-06-25T20:07:26 | 191,414,549 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,168 | r | cdm_ppt_only.r |
require(tidyverse)
require(lubridate)
require(geoknife)
require(stringr)
require(zoo)
site_lat_dd <- 45.627547
site_lon_dd <- -89.437777
############################################
#This section returns the monthly PRISM PPT data for the site_no lat/long
#using the geoknife package and then combines it with the groundwater level data
############################################
stencil<-simplegeom(c(site_lon_dd,site_lat_dd))
ppt_job<-geoknife(stencil, (list(times = as.POSIXct(c('1895-01-01','2018-01-01')),url = 'http://cida.usgs.gov/thredds/dodsC/prism_v2', variables = 'ppt')), wait = TRUE)
ppt_data = result(ppt_job)[,c(1,2)]
colnames(ppt_data)<-c("obs_mo", "ppt_mo_obs")
ppt_data$obs_mo<-as.Date(as.POSIXct(ppt_data$obs_mo))
############################################
#select rolling mean length... 60 was optimal in WI
############################################
rolling_mean_length <- 60
ppt_data$obs_mo <- ymd(ppt_data$obs_mo)
ppt_data <- ppt_data %>%
mutate(ppt_mo_mean = rollmean(ppt_mo_obs, k=rolling_mean_length, align = "right", fill = FALSE))
ppt_data <- ppt_data %>%
filter(obs_mo> (paste0(1894+(rolling_mean_length/12),"-12-01")))
ppt_data <- ppt_data %>%
mutate(ppt_cdm = cumsum(ppt_mo_obs-ppt_mo_mean),
ppt_cdm_z = ((ppt_cdm-mean(ppt_cdm, na.rm = TRUE))/sd(ppt_cdm, na.rm = TRUE)))
ppt_data %>%
ggplot(aes(x=obs_mo))+
geom_line(aes(y=ppt_cdm_z, color = "CDM"), size = .5, color ="steel blue")+
theme(legend.position = c(.15,.9),
legend.title = element_blank(),
legend.background = element_rect(fill="white",
linetype = "solid",
color="light gray"),
legend.key = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(color="light gray"),
axis.line = element_line(color = "gray"))+
labs(y = "CDM60 Z-score",
x="Observation Month")+
scale_x_date(date_breaks = "10 years",
date_labels = "%Y",
limits = as.Date(c('1906-01-01','2018-01-01')))
#####################################################
|
58aa453748780fe6bf68da501e6fb1178ac9073e | 02ac90fd3cb6dd42aa1728ba02aafcaddf59b861 | /Estimation_GVP_20Sept2016_Brooke.R | 92b04227b2b424f9b282b900ff076ff6949c1191 | [] | no_license | brooke-watson/eidr | dfe8d024a3f92a63df88784f88fe4274409425be | c7cee9c55cfee16af121da80ae8be354e8cda145 | refs/heads/master | 2021-01-22T13:03:39.792163 | 2016-11-18T15:57:36 | 2016-11-18T15:57:36 | 68,722,443 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,203 | r | Estimation_GVP_20Sept2016_Brooke.R | rm(list = ls())
library(tidyr)
library(ggplot2)
library(dplyr)
library(viridis)
library(deSolve)
library(knitcitations)
library(purrr)
library(animation)
library(ReporteRsjars)
library(ReporteRs)
library(poweRlaw)
library(grid)
# Set working directory - change
setwd("~/Documents/EIDR")
# Read data from csv files in folder
eidr <- read.csv(file = "eidr.csv", header = T, sep = ",")
eidr_pandemics <- read.csv(file= "pandemics.csv", header =T, sep= ",")
eidr2 <- read.csv2(file = "eidr_with_pandemics.csv", header = T, sep = ",")
# Use only numeric values from eidr$Number.of.Deaths
eidr2$Number.of.Deaths2 <- parse_numeric(eidr2$Number.of.Deaths)
Number.of.Deaths3 <- eidr2$Number.of.Deaths2[!is.na(eidr2$Number.of.Deaths2)]
Number.of.Deaths4 <- Number.of.Deaths3[Number.of.Deaths3!=0]
# SMU = Number.of.Deaths4/World.population
m_nofd = displ$new(Number.of.Deaths4)
est_nofd = estimate_xmin(m_nofd, pars=seq(1, 2, 0.01))
est_nofd = estimate_xmin(m_nofd)
m_nofd$setXmin(est_nofd)
# Plot fitness of Power Law distribution
plot(m_nofd)
lines(m_nofd)
# Set parameters
beta = 6.9e9 # base damage value
gamma = .0124 # convex damage function
years = 100 # length of simulation
reps = 1000 # number of trials
dt = 1 #year
delta = 0.05 #discount rate
wtp = 638e3 # statistical value of each death from WTP=$64 for a 1/10000 risk reduction of death
SMU = 1/10000
world_pop = 7.35e9
years_data = 2013 - 1940
mean_events = 106/years_data
# Draw a number of events (per year) from a Poisson distribution with mean 2
Z1 = matrix(rpois(reps*(years+1),2), reps, years+1)
# Max possible deaths in 1 event - 4 sizes
max_pandemic_size = 110*SMU * world_pop #include pandemics that could kill 1.1% of population
max_pandemic_size2 = 8*SMU * world_pop #include pandemics that could kill 0.08% of population
max_pandemic_size3 = 3*SMU * world_pop #include pandemics that could kill 0.03% of population
max_pandemic_size4 = 220*SMU * world_pop #include pandemics that could kill 2.2% of population
power_samples = dist_rand(m_nofd, 10*sum(Z1))
power_samples = power_samples[power_samples < max_pandemic_size]
power_samples2 = power_samples[power_samples < max_pandemic_size2]
power_samples3 = power_samples[power_samples < max_pandemic_size3]
power_samples4 = power_samples[power_samples < max_pandemic_size4]
deaths_per_year <- function(events) {
sum(base::sample(power_samples, events))
}
deaths_per_year2 <- function(events) {
sum(base::sample(power_samples2, events))
}
deaths_per_year3 <- function(events) {
sum(base::sample(power_samples3, events))
}
deaths_per_year4 <- function(events) {
sum(base::sample(power_samples4, events))
}
#Matrices with total number of deaths for all events in a year (it takes some time to run this part, ~15 min )
D1 = plyr::aaply(Z1, c(1,2), deaths_per_year, .progress = "time")
D2 = plyr::aaply(Z1, c(1,2), deaths_per_year2, .progress = "time")
D3 = plyr::aaply(Z1, c(1,2), deaths_per_year3, .progress = "time")
D4 = plyr::aaply(Z1, c(1,2), deaths_per_year4, .progress = "time")
wtp_D1 = wtp*D1 # multiply each number of deaths per year by the value of each life
wtp_D2 = wtp*D2 # multiply each number of deaths per year by the value of each life
wtp_D3 = wtp*D3 # multiply each number of deaths per year by the value of each life
wtp_D4 = wtp*D4 # multiply each number of deaths per year by the value of each life
# Equations for damage function - linear
damage_fn <- function(Z) {
wtp *(Z)
}
lives_fn <- function(Z) {
}
Z_all <-list(SMU_110=D1, SMU_8=D2, SMU_3=D3, SMU_220=D4) %>%
{ map2(., names(.), function(Z, method_name) {
Z %>%
as.data.frame() %>%
set_colnames(0:(ncol(Z) - 1)) %>%
mutate(rep = 1:n()) %>%
gather("time", "Z", -rep) %>%
mutate(time = as.numeric(time)) %>%
arrange(rep, time) %>%
mutate(method = method_name)
})} %>%
bind_rows() %>%
mutate(damage = damage_fn(Z)) %>%
group_by(method, rep) %>%
mutate(damage_discounted = damage * exp(-delta * time))
Z_ave = Z_all %>%
group_by(method, time) %>%
summarise(Z_ave = mean(Z), se = sd(Z), Z_upper = Z_ave+2*se, Z_lower=0) %>% #lower bound of damages is equal to zero
mutate(damage = damage_fn(Z_ave), d_lower = damage_fn(Z_lower), d_upper = damage_fn(Z_upper)) #%>%
mutate_each(funs(. * exp(-delta * time)), damage, d_lower, d_upper)
Z_tot = Z_all %>%
group_by(rep, method) %>%
summarize(total = sum(damage_discounted), damage_mean=mean(damage) ) %>%
group_by(method) %>%
summarize(method_total = mean(total), method_sd = sd(total), method_max = max(total), median(total))
Z_max = Z_all %>%
group_by(method) %>%
summarize(maxdam = max(damage), maxdeath = max(Z))
#using damages - 4 simulations
#sim 1 - SMU 110
Z1_reps = filter(Z_all, method=="SMU_110", rep %in% 280:284)
plot110 <- ggplot(filter(Z_ave, method=="SMU_110"),
aes(x = time, y = damage))+
theme(legend.position="none")+
labs(title="Maximum SMU= 110")+
# geom_ribbon(mapping=aes(ymin = d_lower, ymax = d_upper)) +
geom_line() +
geom_line(data=Z1_reps, mapping=aes(color=as.factor(rep)))
plot110
#sim 2: SMU 8
Z2_reps = filter(Z_all, method=="SMU_8", rep %in% 90:94)
plot8 <- ggplot(filter(Z_ave, method=="SMU_8"),
aes(x = time, y = damage))+
theme(legend.position="none")+
labs(title="Maximum SMU= 8")+
# geom_ribbon(mapping=aes(ymin = d_lower, ymax = d_upper)) +
geom_line() +
geom_line(data=Z2_reps, mapping=aes(color=as.factor(rep)))
plot8
#sim 3: SMU 3
Z3_reps = filter(Z_all, method=="SMU_3", rep %in% 90:94)
plot3 <- ggplot(filter(Z_ave, method=="SMU_3"),
aes(x = time, y = damage))+
theme(legend.position="none")+
labs(title="Maximum SMU= 3")+
# geom_ribbon(mapping=aes(ymin = d_lower, ymax = d_upper)) +
geom_line() +
geom_line(data=Z3_reps, mapping=aes(color=as.factor(rep)))
plot3
#sim 4: SMU 4
Z4_reps = filter(Z_all, method=="SMU_220", rep %in% 90:94)
plot220 <- ggplot(filter(Z_ave, method=="SMU_220"),
aes(x = time, y = damage))+
theme(legend.position="none")+
labs(title="Maximum SMU= 220")+
# geom_ribbon(mapping=aes(ymin = d_lower, ymax = d_upper)) +
geom_line() +
geom_line(data=Z4_reps, mapping=aes(color=as.factor(rep)))
plot220
#save
ga <- grid.arrange(plot3, plot8, plot110, plot220, nrow=2, top = "Random simulations modelling damages over 100 years")
ggsave("Simulations.pdf", ga)
#plotting averages
#sim 1 - SMU 110
plot110 <- ggplot(filter(Z_ave, method=="SMU_110"),
aes(x = time, y = damage))+
theme(legend.position="none")+
labs(title="Maximum SMU= 110")+
# geom_ribbon(mapping=aes(ymin = d_lower, ymax = d_upper)) +
geom_line() # +
#geom_line(data=Z1_reps, mapping=aes(color=as.factor(rep)))
plot110
#sim 2: SMU 8
plot8 <- ggplot(filter(Z_ave, method=="SMU_8"),
aes(x = time, y = damage))+
theme(legend.position="none")+
labs(title="Maximum SMU= 8")+
# geom_ribbon(mapping=aes(ymin = d_lower, ymax = d_upper)) +
geom_line() #+
#geom_line(data=Z2_reps, mapping=aes(color=as.factor(rep)))
plot8
#sim 3: SMU 3
plot3 <- ggplot(filter(Z_ave, method=="SMU_3"),
aes(x = time, y = damage))+
theme(legend.position="none")+
labs(title="Maximum SMU= 3")+
# geom_ribbon(mapping=aes(ymin = d_lower, ymax = d_upper)) +
geom_line() #+
#geom_line(data=Z3_reps, mapping=aes(color=as.factor(rep)))
plot3
#sim 4: SMU 4
plot220 <- ggplot(filter(Z_ave, method=="SMU_220"),
aes(x = time, y = damage))+
theme(legend.position="none")+
labs(title="Maximum SMU= 220")+
# geom_ribbon(mapping=aes(ymin = d_lower, ymax = d_upper)) +
geom_line() #+
#geom_line(data=Z4_reps, mapping=aes(color=as.factor(rep)))
plot220
#plot all four in same panel, save as PDF
gave <- grid.arrange(plot3, plot8, plot110, plot220, nrow=2, top = "Averages damages over 100 years by maximum pandemic size")
ggsave("Averages.pdf", gave)
# histogram of number of deaths
h3<- ggplot(filter(Z_ave, method=="SMU_3"), aes(Z_ave)) +
geom_histogram(col="black",
fill="darkblue", bins=30)+
labs(title="Maximum SMU = 3", x="Deaths", y="Count")
h3
h8<-ggplot(filter(Z_ave, method=="SMU_8"), aes(Z_ave)) +
geom_histogram(col="black",
fill="darkblue", bins=30)+
labs(title="Maximum SMU = 8", x="Deaths", y="Count")
h8
h110<-ggplot(filter(Z_ave, method=="SMU_110"), aes(Z_ave)) +
geom_histogram(col="black",
fill="darkblue", bins=30)+
labs(title="Maximum SMU = 110", x="Deaths", y="Count")
h110
h220<-ggplot(filter(Z_ave, method=="SMU_220"), aes(Z_ave)) +
geom_histogram(col="black",
fill="darkblue", bins=30)+
labs(title="Maximum SMU = 220", x="Deaths", y="Count")
h220
hs <- grid.arrange(h3,h8,h110,h220, nrow=2, top = "Frequency of years with a given death count, based on maximum pandemic size")
ggsave("DeathHisto.pdf", hs)
#all histograms (lines) in one plot
chs <- ggplot(Z_ave, aes(Z_ave, colour = method)) +
geom_freqpoly(bins = 40, size = 1, alpha=3/4)+
labs(title="Distrubtion of annual death counts over 100 years, based on max pandemic size")+
labs(x="Deaths")
chs
ggsave("ColorfulDeathHisto.pdf", chs) |
bf62efd32b50d474ce1e91f0a8f3887291645b7d | 42c460c1bad109f85222a704265ef54a28192f85 | /man/getCyanoAbund.Rd | 67c07781d798a5603fe21fd1473e5fc943528dc7 | [] | no_license | BKreakie/LakeTrophicModelling | bcc601df843a3db37a4b6f1837f622e439673212 | 5d4ab5fef6487e6aa68bb0c00949cd7dcd94b342 | refs/heads/master | 2021-01-17T14:12:19.493706 | 2015-06-10T15:50:46 | 2015-06-10T15:50:46 | 34,123,223 | 1 | 0 | null | 2015-04-17T15:07:52 | 2015-04-17T15:07:52 | null | UTF-8 | R | false | false | 750 | rd | getCyanoAbund.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/getCyanoAbund.R
\name{getCyanoAbund}
\alias{getCyanoAbund}
\title{Pulls down cyano abundance from NLA website}
\usage{
getCyanoAbund()
}
\value{
a data.frame with four fields: SITE_ID=the NLA site id for the lake,
cyanoCellsPerML=for each SITE_ID the sum of abundance (cells/ml)
of all phytoplankton for Division='Cyanophyta',
cyanoCat=cyano abundance category based on quartile distribution of cyanoCellsPerML
('LOW'<= Q1; 'MED' >Q1 and <Q4; 'HIGH' >=Q4), and
mcyst_conc in ug/l
}
\description{
This function downloads the NLA2007 phytoplankton data and calculates the
abundance (cells/ml) of cyanobacteria by lake
}
|
679387e62cb44e28e28fe6747f630aab431ea85c | 40e70931d7ea0efc47c7076034eb32c0fa0c7557 | /server.R | 3831638599c40f33c1184d650078345bb5ede633 | [] | no_license | btingski/HW-9 | 76045ab2466ea9ed42f47e3cdb42ff3686e331d0 | ada6fddec9109236ae123390f3b1ddc4f7f5137a | refs/heads/master | 2020-05-07T16:41:51.009594 | 2019-04-17T22:12:45 | 2019-04-17T22:12:45 | 180,693,332 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,034 | r | server.R | library(tidyverse)
library(shiny)
library(xgboost)
server <- function(input, output) {
newPredict <- reactive({
data.frame(
Sepal.Length=input$sep.length,
Sepal.Width=input$sep.width,
Petal.Length=input$p.length,
Petal.Width=input$p.width)
})
output$predictions <- DT::renderDataTable({
data.frame(
iris = c("setosa", "versicolor", "virginica")
,Probs = predict(IrisModel, as.matrix(newPredict()))
)
},options = list(ordering=T))
# output$predictions<-DT::renderDataTable({
# label= as.matrix(c("Sepal.Length", "Sepal.Width", "Petal.Length","Petal.Width","Species") )
# newPredictout<-xgb.DMatrix(as.matrix(sapply(newPredict(),as.numeric)),label=label)
# predict(IrisModel,newPredictout)
# })
output$scatter_plot<-renderPlot({
plot(x=iris$Sepal.Length, y=iris$Sepal.Width,
xlab="Sepal Length", ylab="Sepal Width", main="Sepal Length vs. Width")
points(newPredict(), col="red", pch=19)
})
output$plot1<-renderPlot({
den <- density(iris$Sepal.Width)
hist(iris$Sepal.Width, breaks=10, prob=TRUE, xlab="Sepal Width", main="Histogram & Density Curve")
lines(den, lty=2, col="blue")
abline(v=newPredict()$Sepal.Width, col='red')
})
output$plot2<-renderPlot({
den <- density(iris$Sepal.Length)
hist(iris$Sepal.Length, breaks=10, prob=TRUE, xlab="Sepal Length", main="Histogram & Density Curve")
lines(den, lty=2, col="blue")
abline(v=newPredict()$Sepal.Length, col='red')
})
output$plot3<-renderPlot({
den <- density(iris$Petal.Length)
hist(iris$Petal.Length, breaks=10, prob=TRUE, xlab="Petal Length", main="Histogram & Density Curve")
lines(den, lty=2, col="blue")
abline(v=newPredict()$Petal.Length, col='red')
})
output$plot4<-renderPlot({
den <- density(iris$Petal.Width)
hist(iris$Petal.Width, breaks=10, prob=TRUE, xlab="Petal Width", main="Histogram & Density Curve")
lines(den, lty=2, col="blue")
abline(v=newPredict()$Petal.Width, col='red')
})
} |
b72af89fcb3cb220fe3c2bf556dafd4e985d2531 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mix/examples/da.mix.Rd.R | ecf0637efd7d4ca2dc29f43f213e2c796569b2f8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 457 | r | da.mix.Rd.R | library(mix)
### Name: da.mix
### Title: Data Augmentation for Unrestricted General Location Model
### Aliases: da.mix
### Keywords: models
### ** Examples
data(stlouis)
s <- prelim.mix(stlouis,3) # preliminary manipulations
thetahat <- em.mix(s) # find ML estimate
rngseed(1234567) # set random number generator seed
newtheta <- da.mix(s, thetahat, steps=100, showits=TRUE) # take 100 steps
ximp1 <- imp.mix(s, newtheta) # impute under newtheta
|
f0b643a794ef863b91b8bbb273292cd6e1dbb47f | 4c63d31a4bd06b085ba805b31cb3484c72f9ef13 | /R/tab_all.R | 56d3f551d4194bb66d0cafca0b694c323046cfa2 | [] | no_license | squiebble/EpiFunc | fd7b5693fda0531dc86280b4f0f5f8d280741ff0 | 04a31e98643f921bc7fc208a6989be825a2896e3 | refs/heads/master | 2022-02-06T06:03:57.006183 | 2019-07-23T09:45:39 | 2019-07-23T09:45:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,867 | r | tab_all.R | #' Tabulate all variables
#'
#' @import tidyverse
#' @author Daniel Gardiner (daniel.gardiner@phe.gov.uk)
#'
#' @param data a data.frame
#' @param var a character specifying the variable to tabulate, if set to NULL the first variable will be used
#' @param by a character specifying the variable to stratify by, if set to NULL no stratification will be used
#' @param complete a logical specifying whether to use all levels for factor variables
#' @param arrange.factor.by a character with value either "level" or "value" describing how a factor variable should be ordered
#' @param show.percentage a character either 'column' or 'row' or NULL to indicate whether to show percentages in output
#' @param row.break.value a character specifying the value to use as row breaks
#' @param show.na.percentage a logical specifying whether to show percentages for NA values
#' @param n.decimals a numeric specifying the number of decimal places to show
#' @param trunc.length a numeric specifying the maximum character length to be shown in the output
#' @param plotit a logical specifying whether to plot the output
#' @return either (1) a data.frame or (2) a list including a data.frame and a ggplot figure
#'
#' @export
#' @examples
#'
#' # set dummy data
#'
#' set.seed(4)
#'
#' data = data.frame(onset.date = sample(seq(as.Date("2017-01-01"), as.Date("2018-06-01"), 1), 200, replace = TRUE),
#' sex = factor(c("M", "M", "F", NA, NA), c("F", "M", "Unk")),
#' age = sample(c(0:100), 200, replace = TRUE),
#' conf = sample(c("Confirmed", "Probable", "Probable"), 200, replace = TRUE),
#' status = sample(c("Student", "Staff", NA), 200, replace = TRUE),
#' geog = sample(c("South", "North", NA), 200, replace = TRUE))
#'
#' # apply function
#'
#' tab_all(data, var = c("sex", "age", "onset.date", "geog"))
#'
#' tab_all(data, var = c("sex", "age", "onset.date", "geog"), by = "conf")
#'
#' # using dplyr syntax
#'
#' data %>%
#' select(conf, sex, onset.date, geog) %>%
#' tab_all(by = "conf")
#'
#' data %>%
#' select(conf, sex, onset.date, geog) %>%
#' tab_all(by = "conf",
#' show.percentage = "row",
#' row.break.value = "_____")
#'
#' data %>%
#' select(conf, sex, onset.date, geog) %>%
#' tab_all(by = "conf",
#' show.percentage = "row",
#' plotit = TRUE)
#'
tab_all = function(data,
var = NULL,
by = NULL,
complete = FALSE,
arrange.factor.by = "value",
show.percentage = "column",
show.na.percentage = TRUE,
row.break.value = " ",
trunc.length = 60,
n.decimals = 0,
plotit = FALSE){
# convert to data.frame
data = as.data.frame(data)
# if var argument is supplied then restrict to only those var
if(is.null(var)){
NULL
} else {
data = data %>%
select_(.dots = c(var, by))
}
# create a vector of column names to loop over (exclude by)
vars = colnames(data)
vars = vars[!(vars %in% by)]
# initialize temp object
temp = NULL
# loop over each value in var vector of column names
for(i in seq_along(vars)){
# apply tab_1var function amd add variable and type columns
x = data %>%
tab_var(var = vars[i],
by = by,
complete = complete,
arrange.factor.by,
show.percentage,
show.na.percentage,
n.decimals,
trunc.length,
plotit = FALSE)
colnames(x)[is.na(colnames(x))] = ".NA"
x = x %>%
mutate(.variable = vars[i],
.variable = ifelse(duplicated(.variable), "", .variable),
.type = class(data[, vars[i]])[1],
.type = ifelse(duplicated(.type), "", .type)) %>%
select(.variable, .type, everything())
# rename columns
colnames(x)[1:3] = c("variable", "type", "level")
# convert level and value to character
x = x %>%
mutate_all(funs(as.character(.)))
# append each data.frame in loop onto the next (using the row.break.value
# argument to specify value to seperate each data.frame)
temp = rbind(temp, x, row.break.value)
}
# output data.frame
temp
if(plotit) {
output = list()
output[["table"]] = temp
for(i in seq_along(vars)){
p = data %>%
tab_var(var = vars[i],
by = by,
complete = complete,
arrange.factor.by,
show.percentage,
show.na.percentage,
n.decimals,
trunc.length,
plotit = TRUE)
output[[names(p[2])]] = p[[2]]
}
return(output)
} else {
return(temp)
}
}
|
55a08bc42d25f139fcdbfd6e00cb1eefee9ab731 | d2b6515695aa04df268a313ef8cbacf62d9ddec4 | /Size_Of_Adaptation_Space/DistinctFeasibilityDomains/PlotFunction.R | a035715d91e4ac845b306a6baf80958db9170e2f | [] | no_license | MITEcology/J-Th.Bio-Cenci-et-al-2017 | fa237f0e8f31385f35972ba90b89aba0c6578334 | 70a1288653d8496b2f6c5593bee6c691df8a1ab3 | refs/heads/master | 2021-07-23T17:23:35.524823 | 2017-11-02T11:31:20 | 2017-11-02T11:31:20 | 108,122,915 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 827 | r | PlotFunction.R | Tr = as.matrix(read.table('Transition.txt'))
Tr = Tr[-1,]
StrCtr = Tr[, 1]
Fraction_Distinct = Tr[, 2]
realisation = 15 ### Number of realisations for each data point
max_number_Configurations = Tr[,3]
StrCtr = split(StrCtr, ceiling(seq_along(StrCtr)/realisation))
Fraction_Distinct = split(Fraction_Distinct, ceiling(seq_along(Fraction_Distinct)/realisation))
numb_Max_conf = split(max_number_Configurations, ceiling(seq_along(max_number_Configurations)/realisation))
StrCtr = unlist(lapply(1:length(StrCtr), function(i, x) mean(x[[i]]), StrCtr))
Fraction_Distinct = unlist(lapply(1:length(Fraction_Distinct), function(i, x) mean(x[[i]])/mean(numb_Max_conf[[i]]), Fraction_Distinct))
plot(StrCtr, Fraction_Distinct, pch = 20, col = 'blue', xlab = 'Structural Constraint', ylab = 'Fraction of Distinct Domains', cex=1.5)
|
e554065f685b25681171a08e6e72301d8dfff019 | 0878528229e41d3e6f8bacf179fe94572fa2b2f4 | /R/errors.R | 4a75782e421de031d9fbc9051e8679b51c51aca6 | [] | no_license | cran/jsontools | 6e2bc7dc3dc50b762fdae91f06fef1a6bea145e9 | a08f6832bd0103cdce6d9cbf4a370daf4029a2cb | refs/heads/master | 2023-03-15T04:43:32.763878 | 2021-03-22T08:40:09 | 2021-03-22T08:40:09 | 350,398,862 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,024 | r | errors.R | stop_jsontools <- function(message, ..., error_type = NULL) {
class <- "jsontools_error"
if (!is.null(error_type)) {
class <- c(paste0("jsontools_error_", error_type), class)
}
abort(
message = message,
class = class,
...
)
}
#' @export
conditionMessage.jsontools_error_invalid_json <- function(c) {
errors_shown <- 5
n_invalid <- length(c$offsets)
if (n_invalid > 10) {
locs <- c(head(c$locations, 5), "...", utils::tail(c$locations, 5))
} else {
locs <- c$locations
}
locs <- paste0(locs, collapse = ", ")
if (c$x_arg != "") {
c$x_arg <- paste0("`", c$x_arg, "` has ")
}
head <- glue("
{c$x_arg}invalid JSON at {n_invalid} locations:
{locs}
")
size <- min(length(c$errors), errors_shown)
body <- glue::glue_data(c, "{locations}: offset {offsets}\n{errors}")[1:size]
if (n_invalid > errors_shown) {
body <- c(body, paste0("... (", n_invalid - size, " more errors)"))
}
body <- paste0(body, collapse = "\n")
paste0(head, "\n\n", body)
}
|
bf0e211b906dfdbf358be360111cab8f71d2bc3a | c37e17c403b5da91754459221ad9793b9f049386 | /03 Visualizations/innerJoin.R | c60fdacc140409dc1a0c27592dd1c061e72488e9 | [] | no_license | AnnaPeng/DV_RProject3 | 3528d331366ae0a3bf182877c2202479ef3e9063 | 143a97d55320fa343301d46437202ed4d0f3254d | refs/heads/master | 2016-09-01T12:01:36.007158 | 2015-10-17T05:06:22 | 2015-10-17T05:06:22 | 44,152,400 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 360 | r | innerJoin.R | df <- data.frame(dplyr::inner_join(baltimore, florida, by="CURRENT_ANNUAL_SALARY"))
df %>% select(POSITION_TITLE.y, CURRENT_ANNUAL_SALARY, GENDER) %>% ggplot(aes(x = POSITION_TITLE.y, y = CURRENT_ANNUAL_SALARY, color = GENDER)) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + geom_point() + ggtitle("Annual Salary of Each Job's Position in 2014")
|
55779cff169fbc3633f67d91b63332af2d4517cb | cf4759b3b1c2f328081796f962612e2409511a31 | /man/mega_model.Rd | 20efb8a008d63dc29b887bb0ef2f1606ffea76ea | [
"MIT"
] | permissive | tsoleary/proteomixr | c9af171e90abb37411fdfeffe16adb0d540206b7 | 5351c9894c5af6a8fca54b23f23c1ea420c78429 | refs/heads/master | 2020-04-19T20:28:43.148698 | 2019-07-18T14:12:52 | 2019-07-18T14:12:52 | 168,416,162 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,192 | rd | mega_model.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mega_model.R
\name{mega_model}
\alias{mega_model}
\title{MEGA MODEL}
\usage{
mega_model(peptide, deg_old, deg_new, syn, t0_abun, per_lab, time)
}
\arguments{
\item{peptide}{peptide containing Leucine}
\item{deg_old}{degradation rate constant (k) of the "old" material around at the beginning of the experiment}
\item{deg_new}{degradation rate constant (k) of newly synthesized material}
\item{syn}{synthesis rate (amount/time)}
\item{t0_abun}{initial abundance}
\item{per_lab}{percent of D3-Leu in the labeled growth media}
\item{time}{vector of time to be modeled (\emph{e.g.} 0:168)}
}
\description{
Creates a data frame with the change of individual isotopes over time
}
\examples{
# initial conditions
peptide <- "SAMPLLLLLLER"
# initial total abundance at T0
t0_abun <- 1000
# fraction of labeled Leucine in the media
per_lab <- 0.50
# rates
deg_old <- 0.0500
deg_new <- 0.0500
syn <- 50
# length of time
time <- 0:168
mod <- mega_model(peptide, deg_old, deg_new, syn, t0_abun, per_lab, time)
}
\keyword{degredation}
\keyword{mega-model,}
\keyword{peptide}
\keyword{synthesis,}
\keyword{turnover,}
|
44964855b5b52cce3e79fc1b49e9241f76b68805 | fdfcd4b5d5175b7de9add1eee1cdf3335c52126c | /source/usecase2.R | 4b03b259afafeecde48714d7ad838caee609d8ed | [] | no_license | geng-lee/cell_types_signatures | 42b5626c6369d6822139ababcbb8e7f8aeb84d06 | efe497383a9ba7d5f1d0d3710f635b4f23d0dc02 | refs/heads/master | 2023-03-15T20:08:25.483935 | 2017-12-11T15:29:12 | 2017-12-11T15:29:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,547 | r | usecase2.R | #hypomethylated regions
compute_cell_type_scores <- function(mean_score_matrix, sd_score_matrix, invert = FALSE){
if(invert){
sign_for_score <- -1
direction <- "higher"
sign_for_label <- "+"
}
else{
sign_for_score <- 1
direction <- "lower"
sign_for_label <- "-"
}
#the three metrices described above. The first one is simply the rank
mean_score <- t(apply(sign_for_score * mean_score_matrix,
1, rank, ties.method = "max") - 1)
#the other two are more complicated since we need to get the rank of the average
#DNA methylation in the matrix that was modified by subtracting the SD...
#We thus add the average DNA methylation to the SD modified matrix and rank them together
#This has to be done for each cell type indivdually.
compute_ranks_in_sd_matrix <- function(multiplier){
overall_result <- foreach(cell_type_index = seq_len(ncol(mean_score_matrix)),
.export = c("mean_score_matrix", "sd_score_matrix", "sign_for_score"),
.combine = cbind,
.inorder = TRUE, #otherwise the cell types would be mixed up in paralellization
.final = function(x) {
colnames(x) <- colnames(mean_score_matrix)
rownames(x) <- rownames(mean_score_matrix)
return(x)
}) %dopar%
{
current_cell_type <- mean_score_matrix[,cell_type_index]
result <- t(apply(sign_for_score * cbind(current_cell_type,
(mean_score_matrix[,-cell_type_index] - multiplier * sd_score_matrix[,-cell_type_index])),
1, rank, ties.method = "max") - 1)
return(result[,1]) #return only first column with ranks of the respective cell type
}
return(overall_result)
}
mean_1_sd <- compute_ranks_in_sd_matrix(1)
mean_2_sd <- compute_ranks_in_sd_matrix(2)
#the worst rank
worst_rank <- pmax(mean_score, mean_1_sd, mean_2_sd)
#combine regions with ranks
result <- list(mean_score, mean_1_sd, mean_2_sd, worst_rank)
names(result) <- c(paste("number of cell types with score", direction, "than average"),
paste("number of cell types with score", direction, "than (average", sign_for_label, "SD)"),
paste("number of cell types with score", direction, "than (average", sign_for_label, "2*SD)"),
"worst rank")
return(result)
}
#generate a list of cell type signatures
generate_cell_type_signatures <- function(unique_biosources, regions, ranks,
min.num.of.regions = 500,
max.num.of.regions = NULL){
if(!is.null(max.num.of.regions))
if(min.num.of.regions > max.num.of.regions)
stop("max.num.of.regions needs to be equal to or larger than min.num.of.regions.")
foreach(biosource = unique_biosources,
.final = function(x) setNames(x, unique_biosources)) %do% {
regions_ranks <- cbind(regions, ranks)
result <- regions_ranks %>%
select(CHROMOSOME, START, END, celltypes_scoring_better = UQ(biosource)) %>%
top_n(min.num.of.regions, -celltypes_scoring_better)
if(!is.null(max.num.of.regions)){
#return a random sample of regions
result <- result %>% sample_n(min(nrow(result), max.num.of.regions))
}
return(result)
}
}
|
4d68c22d87f3d7a2b3cf3f53d3b08506c05cf391 | 6ca25d99a3212631a8f8558ce601a519d1f9417d | /databricks-connect/R/poc.R | 3680a44c4a862208229c8f4b2f3d76816370100e | [] | no_license | blairj09/databricks-exp | 561f161a23290b0fbcdbdb6ce504dfd1e6978002 | a204bf03a793e33b9e476b25fa027a32bf15ce72 | refs/heads/master | 2022-11-03T01:30:08.297479 | 2022-10-25T17:13:32 | 2022-10-25T17:13:32 | 244,766,431 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 265 | r | poc.R | library(sparklyr)
spark_home <- system("databricks-connect get-spark-home", intern = TRUE)
sc <- spark_connect(method = "databricks",
spark_home = spark_home)
cars_tbl <- copy_to(sc, mtcars, overwrite = TRUE)
cars_tbl
spark_disconnect(sc)
|
fdbb98cb77c5915439d5296c752c2ec30760913d | a10e0c583478ff199d4c9f2db8fb3df5d99a2076 | /data/create_alc.R | 78fa3dad65d23243afc621a7bec3a781d9ce3de9 | [] | no_license | bramberntzen/IODS-project | 34fce96fce2655800d9c57386cf1caa319d2e8ec | 997faea1b065060414f4360ad1fd5fca777669a4 | refs/heads/master | 2020-04-02T12:56:08.029137 | 2018-12-09T14:49:43 | 2018-12-09T14:49:43 | 154,459,049 | 0 | 0 | null | 2018-10-24T07:39:30 | 2018-10-24T07:39:30 | null | UTF-8 | R | false | false | 3,471 | r | create_alc.R | #Bram Berntzen, 7.11.2018, student performance data. data wrangling week 3.
#Read student_mat.csv and student_por.csv into R
student_mat <- read.csv(file = "student-mat.csv")
student_por <- read.csv(file = "student-por.csv")
#data structure
str(student_mat)
str(student_por)
#data dimensions
dim(student_mat)
dim(student_por)
#Check column names of mat and por
colnames(student_mat)
colnames(student_por)
url <- "http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets"
# web address for math class data
url_math <- paste(url, "student-mat.csv", sep = "/")
# print out the address
url_math
# read the math class questionaire data into memory
math <- read.table(url_math, sep = ";" , header=TRUE)
# web address for portuguese class data
url_por <- paste(url, "student-por.csv", sep ="/")
# read the portuguese class questionaire data into memory
por <- read.table(url_por, sep = ";", header = TRUE)
# look at the column names of both data
colnames(math)
colnames(por)
# access the dplyr library
library(dplyr)
#Join the two data sets using the variables "school", "sex", "age", "address", "famsize", "Pstatus", "Medu", "Fedu", "Mjob", "Fjob", "reason", "nursery","internet" as (student) identifiers.
join_by <- c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet")
# join the two datasets by the selected identifiers
math_por <- inner_join(math, por, by = join_by, suffix = c(".math", ".por"))
# see the new column names
colnames(math_por)
# glimpse at the data
glimpse(math_por)
str(math_por)
dim(math_por)
glimpse(math_por)
colnames(math_por)
# dplyr, math_por, join_by are available
# print out the column names of 'math_por'
colnames(math_por)
# create a new data frame with only the joined columns
alc <- select(math_por, one_of(join_by))
# columns that were not used for joining the data
notjoined_columns <- colnames(math)[!colnames(math) %in% join_by]
# print out the columns not used for joining
notjoined_columns
# for every column name not used for joining...
for(column_name in notjoined_columns) {
# select two columns from 'math_por' with the same original name
two_columns <- select(math_por, starts_with(column_name))
# select the first column vector of those two columns
first_column <- select(two_columns, 1)[[1]]
# if that first column vector is numeric...
if(is.numeric(first_column)) {
# take a rounded average of each row of the two columns and
# add the resulting vector to the alc data frame
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
# add the first column vector to the alc data frame
alc[column_name] <- first_column
}
}
# glimpse at the new combined data
glimpse(alc)
#create a new column called alc_use by taking the average of the weekday and weekend alcohol use.
library (ggplot2)
#how is this part of the joined data now?
alc <- mutate(alc, alc_use + (Dalc + Walc) / 2)
alc <- mutate(alc, high_use = alc_use > 2)
7. Glimpse at the joined and modified data to make sure everything
is in order. The joined data should now have 382 observations of 35
variables. Save the joined and modified data set to the 'data'
folder, using for example write.csv() or write.table() functions.
(1 point)
#take a glimpse at the alc data
library(tidyr)
gather(alc) %>% glimpse
alc
str(alc)
dim(alc)
write.csv(modified_alc_dataset, file = "modified_alc_dataset")
|
97d047987f7c7044fb651cbae9b339c48e4d0727 | 1fb788e32550fe3f4add9b3df28ae69404943e8a | /man/outersect.Rd | 630d61bc8a8aecad1201a77f89e05ca352a9ac77 | [] | no_license | chrk623/usefulFunc | 985ffbef746fb3c36d7cd79c4deb934897553e46 | df486a36def86bd78fc3fc43d22bc8b380888d30 | refs/heads/master | 2020-03-19T20:23:00.118265 | 2019-08-14T23:17:56 | 2019-08-14T23:17:56 | 136,899,077 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 278 | rd | outersect.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outersect.R
\name{outersect}
\alias{outersect}
\title{outersect}
\usage{
outersect(x, y)
}
\arguments{
\item{x}{x}
\item{y}{y}
}
\description{
Opposite of intersect.
}
\examples{
outersect(1:3, 3)
}
|
a3b3a6a4f35dedacf14dc40acf85ed3d0b62e63b | d2a3b0d54bd54e488bf0a8a98af7b205e1d912c0 | /man/rename_excel2R.Rd | 5f32648192d1f64ae4d2e13d61e5b19ce9105a47 | [] | no_license | Ajfrick/ajfhelpR | 05feaa2e7625540455af4447271d5b77ddfd32bd | 64d8e60c64d47a9b8bef2ef5f0ecb653ac99d976 | refs/heads/master | 2023-06-24T23:24:15.562243 | 2023-06-15T15:36:12 | 2023-06-15T15:36:12 | 161,224,735 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 972 | rd | rename_excel2R.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rename_excel2R.R
\name{rename_excel2R}
\alias{rename_excel2R}
\title{Pipeable column header rename function}
\usage{
rename_excel2R(dat)
}
\arguments{
\item{dat}{Dataset to rename}
}
\value{
dataframe or tibble containing the updated names
}
\description{
This function takes a data frame or tibble object and replaces
any 'bad' column names (containing whitespace, dashes, slashes, etc) and replaces
them with more R friendly column names. Most often when imported manual Excel
data into R for further analysis
}
\examples{
library(tibble)
library(stringr)
set.seed(1234)
dat = tibble(
"Column 1" = 1:10,
"Column#2" = sample(1:10, size = 10, replace = T),
"Col1/Col2" = `Column 1`/`Column#2`,
"| New~Column |" = "test"
)
newdat = rename_excel2R(dat)
newdat
newdat2 = dat \%>\% rename_excel2R
all.equal(newdat, newdat2)
pipe_test = dat \%>\%
rename_excel2R \%>\%
select_if(is.double)
}
|
e4a4ea72ca629768830117496f41df87ba3c0a09 | cae6bb069c740e1d68716562f286dcaaa5d52942 | /R/zzz.R | 5dd2719cd53bb8cfbfdf0d7bb8ba2031468aeb51 | [] | no_license | briandconnelly/bdcR | 274a5a256798687e62526af2925e4b9be1d9b3ee | b1d7989aaa36f6ae487bd3c70b3ed86d2c51cc36 | refs/heads/master | 2020-06-03T15:05:45.678834 | 2017-07-26T22:26:41 | 2017-07-26T22:26:41 | 24,081,215 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 283 | r | zzz.R |
.onLoad <- function(libname, pkgname)
{
# Make sure the non-CRAN packages are installed
if (!requireNamespace('ggplot2bdc', quietly=TRUE))
{
message('ggplot2bdc not present. Installing...')
devtools::install_github('briandconnelly/ggplot2bdc')
}
}
|
d07fed1f78a5d1c81d204ba216acc3abaea3be72 | cbadd8a51c648af6a3857fb51501f5231b85bddc | /604/Mod 2/Mod 2 (3) continuous distributions.R | 888dcd2b79955ae06c6a13521b18673337503448 | [] | no_license | lukehenslee/Coursework | 90b757805c7f6816361fe8dc0566d98ae374f19b | 6e7e8cf42691a06cca5db68e86dd5a8e1e7e99fa | refs/heads/master | 2023-08-14T10:26:15.992183 | 2021-09-24T20:33:32 | 2021-09-24T20:33:32 | 354,631,358 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,929 | r | Mod 2 (3) continuous distributions.R | #####################################################
#
# FISH 604
# Module 2 (part 3): Continuous distributions
# Franz Mueter
# Last modified: August 3, 2021
#####################################################
# Probability distributions - Continuous distributions
##################### Normal distribution:
## Probability density function
# pdf of N(0, 3^2)
x <- seq(-15, 15, length=100) # Define range of values for which to compute density
y <- dnorm(x, mean=0, sd=3)
par(mfrow=c(2,1), mar=c(2,2,1,1))
plot(x,y,type="l", col=4)
# Add other normal pdfs:
lines(x, dnorm(x, 0, 5), col=2)
lines(x, dnorm(x, 0, 8), col=3)
lines(x, dnorm(x, mean=8, sd=3), col=6)
## Cumulative distribution function:
# cdf of N(0, 3^2)
x <- seq(-15, 15, length=100) # Define range of values for which to compute density
y <- pnorm(x, mean=0, sd=3)
plot(x,y,type="l", col=4)
abline(h=c(0,1), lty=2)
# Add other normal cdfs:
lines(x, pnorm(x, 0, 5), col=2)
lines(x, pnorm(x, 0, 8), col=3)
lines(x, pnorm(x, mean=8, sd=3), col=6)
## Random number generation:
par(mfrow=c(1,1))
y <- rnorm(n=40, mean=0, sd=3)
# Histogram showing frequency of observations within automatically selected bins:
hist(y)
# Histogram showing density of observations within automatically selected bins
# (i.e. area under histogram sums to 1 like a pdf):
hist(y, prob=T, col=5)
# Add "true" pdf (i.e. pdf from which data were drawn at random:
lines(x, dnorm(x, mean=0, sd=3), col=4, lwd=3)
# Add normal pdf using parameters estimated from data
lines(x, dnorm(x, mean=mean(y), sd=sd(y)), lwd=2, lty=2)
# Add empirical density estimate:
lines(density(y), col="purple", lwd=3)
# Repeat above code several times to see variability in random draws!)
# Repeat with larger n (e.g. 1000)
##################### Uniform distribution:
## Simulate spatially random process
# (e.g. distribution of flatfish on sandy substrate):
x <- runif(100)
y <- runif(100)
plot(x,y)
# Repeat to see variability:
par(mfrow=c(3,3))
for(i in 1:9) {
x <- runif(100)
y <- runif(100)
plot(x,y)
}
##################### Log-normal distribution:
# Example: Abundances (where present) of jellyfish
# in Kodiak trawl samples (n = 36)
jelly <- scan(nlines=2)
2 79 110 42 40 21 164 82 13 361 205 17 68 49 37 44 7 28 4 2 6 18 40
7 8 2 7 23 12 29 2 93 15 27 40 32
# After log-transformation, the estimated mean abundance
# and it's variance are:
(mu <- mean(log(jelly)))
(Var <- var(log(jelly)))
par(mfrow=c(1,1))
hist(log(jelly), prob=T, xlab="", ylab="Probability",
main="log-transformed jellyfish abundances", col="slateblue")
x <- seq(0,6,by=0.2)
# Fitted normal distribution:
lines(x, dnorm(x,mean=mu,sd=sqrt(Var)), lwd=2)
hist(jelly, prob=T, xlab="", ylab="Probability", nclass=20,
main="'raw' jellyfish abundances", col="slateblue", ylim=c(0,0.035))
x <- c(seq(0,5,by=0.01), 6:400)
# Fitted log-normal distribution:
lines(x, dlnorm(x,mean=mu,sd=sqrt(Var)), lwd=2)
# Estimated mean and standard deviation of abundances:
(exp(mu + Var/2)) # Mean
sqrt((exp(Var) - 1) * exp(2*mu + Var)) # Variance
# Compare to observed variance of the data:
sqrt(var(jelly))
##################### Gamma distribution
# Waiting time until the rth event for a process that occurs randomly at a rate of beta:
x <- seq(1,10, length=100)
y <- dgamma(x, shape=5, rate=2) # Mean = 5/2 = 2.5
plot(x, y, type="l", col=4)
lines(x, dgamma(x, 5, 1), col=2) # Mean = 5/1 = 5
lines(x, dgamma(x, 10, 2), col=3) # Mean = 10/2 = 5
# Cumulative probability density of, e.g. waiting time in hours to observe at least 20 events
# that occur randomly at an average rate of 3 per hour:
y <- pgamma(x, 20,3)
plot(x, y, type="l", col=4)
# What is the waiting time such that we have at least a 95% probability of observing
# 20 random events occurring at an average rate of 3 per hour:
wt <- qgamma(.95, 20, 3)
abline(h=0.95, v=wt, lty=2)
|
0e9e905f766d89702e8326182d0f08f2625c14a0 | 94aed35f1f7cca636419b88a53799f34e5c5dfee | /R/sizeFactors-methods.R | 8ac651b27bb6b014e0b5aba641ddcad0fa2f58db | [
"MIT"
] | permissive | trichelab/basejump | a4a3b9e58016449faeb9b3d77cf1c09d4eafe4c7 | 6724b10dbf42dd075c7db5854a13d9509fe9fb72 | refs/heads/master | 2020-12-12T11:54:17.660956 | 2020-01-08T13:24:07 | 2020-01-08T13:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,088 | r | sizeFactors-methods.R | #' Size factors
#'
#' @name sizeFactors
#' @note Updated 2019-08-19.
#'
#' @importMethodsFrom SingleCellExperiment sizeFactors sizeFactors<-
#'
#' @inheritParams acidroxygen::params
#' @inheritParams SingleCellExperiment::sizeFactors
#' @param ... Additional arguments.
#'
#' @return `numeric`.
#' Names correspond to object column names.
#'
#' @seealso
#' - `DESeq2::sizeFactors()`.
#' - `DESeq2::estimateSizeFactors()`.
#' - `DESeq2::estimateSizeFactorsForMatrix()`.
#' - `SingleCellExperiment::sizeFactors()`.
#'
#' @examples
#' data(
#' RangedSummarizedExperiment,
#' SingleCellExperiment,
#' package = "acidtest"
#' )
#'
#' ## SummarizedExperiment ====
#' object <- RangedSummarizedExperiment
#' object <- estimateSizeFactors(object)
#' head(sizeFactors(object))
#' mean(sizeFactors(object))
#'
#' ## SingleCellExperiment ====
#' object <- SingleCellExperiment
#' object <- estimateSizeFactors(object)
#' head(sizeFactors(object))
#' mean(sizeFactors(object))
NULL
#' @rdname sizeFactors
#' @name sizeFactors
#' @importFrom BiocGenerics sizeFactors
#' @usage sizeFactors(object, ...)
#' @export
NULL
#' @rdname sizeFactors
#' @name sizeFactors<-
#' @importFrom BiocGenerics sizeFactors<-
#' @usage sizeFactors(object, ...) <- value
#' @export
NULL
## If exporting a numeric value signature for SE, the SE method will mask SCE
## ANY value method. In this case, we need to export a corresponding SCE numeric
## method.
##
## See also:
## - https://github.com/drisso/SingleCellExperiment/pull/34
## nolint start
##
## SE methods are modified versions of the DESeqDataSet methods.
##
## > getMethod(
## > f = "sizeFactors",
## > signature = "DESeqDataSet",
## > where = asNamespace("DESeq2")
## > )
##
## > getMethod(
## > f = "sizeFactors<-",
## > signature = signature(
## > object = "DESeqDataSet",
## > value = "numeric"
## > ),
## > where = asNamespace("DESeq2")
## > )
##
## nolint end
## Updated 2019-08-06.
`sizeFactors,SummarizedExperiment` <- # nolint
function(object) {
if (!"sizeFactor" %in% names(colData(object))) {
return(NULL)
}
sf <- colData(object)[["sizeFactor"]]
names(sf) <- colnames(object)
sf
}
#' @rdname sizeFactors
#' @export
setMethod(
f = "sizeFactors",
signature = signature("SummarizedExperiment"),
definition = `sizeFactors,SummarizedExperiment`
)
## Updated 2019-08-06.
`sizeFactors<-,SummarizedExperiment,ANY` <- # nolint
function(object, value) {
if (!is.null(value)) {
assert(
all(!is.na(value)),
all(is.finite(value)),
all(value > 0L)
)
value <- unname(value)
}
colData(object)[["sizeFactor"]] <- value
validObject(object)
object
}
#' @rdname sizeFactors
#' @export
setReplaceMethod(
f = "sizeFactors",
signature = signature(
object = "SummarizedExperiment",
value = "ANY"
),
definition = `sizeFactors<-,SummarizedExperiment,ANY`
)
|
ba2232988cf1e560fcc6b7bce2a03bf3f6c0a1fa | b7817c19197fc6020e9468bad14246b9cd7401b4 | /R/options.R | 814ea10bafc1fb274a31d6bd6c853a7f7981d812 | [] | no_license | cloudyr/gcloudR | bf929ee4988464a8b099a66127eb0fe92a1da964 | aa7a76c934f5fd95ca5bca52be4b58258e5abdaf | refs/heads/master | 2021-05-10T18:31:07.805485 | 2018-04-30T08:05:39 | 2018-04-30T08:05:39 | 118,128,129 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 502 | r | options.R | to_load <- c("bigQueryR",
# "googleAnalyticsR",
"googleAuthR",
"googleCloudStorageR",
"googleComputeEngineR",
"googleKubernetesR",
"googleLanguageR"
# "searchConsoleR"
)
.onAttach <- function(...) {
needed <- to_load[!is_attached(to_load)]
if (length(needed) == 0)
return()
crayon::num_colors(TRUE)
attach_me()
}
is_attached <- function(x) {
paste0("package:", x) %in% search()
}
|
852735543931015502e1e912cd07724122d685cb | d11dba6dafe5f5204743e03662d8d6d216672393 | /man/validate_range.Rd | 805bf39312640991ef5891c0ca84d5fa0093157a | [] | no_license | ktargows/iptools | d7b6e260296750198444b0edde26a09df4ad3630 | d3d85680cd85d276672a42f4bbdeb8fac3d8758e | refs/heads/master | 2021-01-11T01:55:03.682784 | 2016-10-06T01:54:41 | 2016-10-06T01:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 825 | rd | validate_range.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{validate_range}
\alias{validate_range}
\title{check whether IPv4 ranges are valid}
\usage{
validate_range(ranges)
}
\arguments{
\item{ranges}{a vector of IPv4 ranges}
}
\value{
a logical vector, where TRUE indicates that the
provided entry is valid, and FALSE that it is not (or
isn't an IP range at all)
}
\description{
\code{validate_range} checks whether
a vector of IPv4 CIDR ranges ("127.0.0.1/32") are valid or not.
}
\examples{
validate_range("127.0.0.1/32")
#[1] TRUE
validate_range("127.0.0.1/33")
#[1] FALSE
}
\seealso{
\code{\link{ip_classify}} for classifying
(and, incidentally, validating) IPv4 and IPv6 addresses, or
\code{\link{range_boundaries}} for identifying the minimum
and maximum IPs within a range.
}
|
fe2455ed4ef300614922d08cb2b268838cadcfa0 | f77235eab62c884fd19a97c15edd495cfd94dd06 | /MediCo.R | 3e18ea31296b1dabc28194f06e951c701ab031fe | [] | no_license | corentincamous/medical_multivariate | 750eae406c3fa9e629c3c3fb4400f43016ac2303 | 51f9a1dde284e110dc5d29e88c7374762e35816f | refs/heads/master | 2020-08-06T15:41:21.709148 | 2020-08-04T10:52:46 | 2020-08-04T10:52:46 | 213,060,558 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,588 | r | MediCo.R | ### Loading libraries ----
library(dplyr)
library(ggcorrplot)
library(magrittr)
library(arsenal)
library(MASS)
#' Summarize a data frame.
#'
#' @param df_var data frame to be processed.
#' @param nums_var columns to select for the data frame (default is all).
#' @param by_var indicates variable on which the analysis can be split (default is none).
#' @return a table summarizing descriptive statistics of the data frame.
#' @examples
#' library("gapminder")
#' descriptive_statistic(gapminder,)
descriptive_statistic <- function(df_var, nums_var = '.', by_var = ''){
mycontrols <- tableby.control(test=FALSE, total=FALSE,
numeric.test="kwt", cat.test="chisq",
numeric.stats=c("N","Nmiss", "meansd", "medianrange", "q1q3"),
cat.stats=c("countpct"),
stats.labels=list(N='Count', Nmiss = 'Missing Values', meansd='Mean (sd)', medianrange='Median, Min, Max', q1q3='Q1,Q3'))
df_var_temp <- df_var[nums_var,]
f <- as.formula(paste(by_var, '.', sep = " ~ "))
tableone <- arsenal::tableby(f, data = df_var_temp, control = mycontrols)
return(as.data.frame(summary(tableone, title = paste('Descriptive Statistics by', by_var, sep =' ')), text = TRUE))
}
#' Provide a univariate analysis given a data frame an a target variable.
#'
#' @param df_var data frame to be processed.
#' @param by_var indicates variable on which the univariate analysis will be done.
#' @return a table summarizing univariate analysis of the data frame with coefficient, sdev, t-stat, p-value, odds ratio and its 95% CI.
#' @examples
#' library("gapminder")
#' univariate_analysis(gapminder,by_var = 'gdpPercap')
univariate_analysis <- function(df_var, by_var){
for (col in colnames(df_var)){
print(col)
f <- as.formula(paste(by_var, col, sep = " ~ "))
model1 <- glm(f, data = df_var, family = 'binomial')
model1_sum <- summary(model1)
table_model1 <- as.data.frame(exp(cbind("Odds ratio" = coef(model1), confint.default(model1, level = 0.95))))
table_model2 <- as.data.frame(model1_sum$coefficient)
table_model1 <- cbind(table_model1, table_model2)
table_model1$variable <- col
df_results <- rbind(df_results, table_model1)
}
df_results$rownames = rownames(df_results)
df_results <- df_results[!grepl("(Intercept)", df_results$rownames),]
df_results %<>%
dplyr::mutate(flag = ifelse(`97.5 %`>1 & `2.5 %` <1, 0,1))
return(df_results[,c('variable','rownames','Estimate','Std. Error','z value','Pr(>|z|)','Odds ratio','2.5 %','97.5 %','flag')])
}
#' Provide a multivariate analysis given a data frame, a list specifying explanatory variables to do the regression and a target variable.
#'
#' @param df_var data frame to be processed.
#' @param x_variables a list indicating the variables for multivariate analysis (can be flagged from univariate analysis).
#' @param scope_variables a list indicating the variables on which the multivariate analysis will be done.
#' @param by_var indicates variable on which the multivariate analysis will be done.
#' @return a table summarizing univariate analysis of the data frame with coefficient, sdev, t-stat, p-value, odds ratio and its 95% CI.
#' @examples
#' library("gapminder")
#' univariate_analysis(gapminder,)
multivariate_analysis_model <- function(df_var, by_var, x_variables, scope_variables){
f <- as.formula(paste(by_var, paste(intersect(x_variables, scope_variables), collapse = " + "), sep = " ~ "))
df_var2 <- df_var[,scope_variables]
model1 <- glm(f, data = df_var2, family = 'binomial')
stepw <- step(model,direction = "both")
return(stepw)
}
#' Provides the coefficients estimates and statistics for a model (multivariate_analysis_model output)
#'
#' @param model model to be processed.
#' @return a table summarizing multivariate analysis of the data frame with coefficient, sd error, z value, p-value, odds ratio and its 95% CI.
#' @examples
#' library("gapminder")
#' univariate_analysis(gapminder,)
multivariate_analysis_summary <- function(model){
stepw_summary <- summary(model)
table_model1 <- as.data.frame(stepw_summary$coefficients)
table_model2 <- as.data.frame(exp(cbind("Odds ratio" = coef(model), confint.default(stepw, level = 0.95))))
table_model1 <- cbind(table_model1, table_model2)
return(table_model1)
}
#' Provides the ROC curve for a model (multivariate_analysis_model output)
#'
#' @param model model to be processed.
#' @return a data frame with one column with different thresholds tested, an other column with sensitivity and an other with specificity
#' @examples
#' library("gapminder")
#' univariate_analysis(gapminder,)
multivariate_analysis_ROC <- function(model){
threshold_list=seq(0, 1, by=0.01)
table_roc <- data.frame(Threshold=numeric(),
specificity = numeric(),
sensitivity = numeric())
for (threshold in threshold_list){
print(threshold)
predicted_values<-ifelse(predict(model,type="response")>threshold,1,0)
actual_values<-model$y
TN <- sum(predicted_values*actual_values) #True negative
TP <- sum((1-predicted_values)*(1-actual_values)) #True positives
FN <- sum(predicted_values*(1-actual_values)) #False negatives
FP <- sum((1-predicted_values)*actual_values) #False positives
speci <- TN / (TN + FP)
sensi <- TP / (TP + FN)
table_roc <- rbind(table_roc,c(threshold, sensi, speci))
colnames(table_roc) <- c('Threshold', 'Sensitivity', 'Specificity')
}
return(table_roc)
}
|
a227743f15005efb031315172dd3de8eb062dbfb | 29585dff702209dd446c0ab52ceea046c58e384e | /FitAR/R/DetAR.R | 8629d623242f6a36e082b55468c57f28a655627c | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 84 | r | DetAR.R | `DetAR` <-
function(phi){
z<-ARToPacf(phi)
1/prod((1-z^2)^(1:length(phi)))
}
|
96eb22b8474032ec486b441cd6c05b09ab324087 | 6410990169c60f2379927dae2eb4a0eea417023d | /NeuralNet.R | b76123c5ded4c0e95ba1e4f70d8303c5a34a86e6 | [] | no_license | vkrit/chula_datamining | 67bafc0d02eeb0515222ede47f0d439ea90ffa9d | 09c3ec45292ee3b6dfe95c5a019ff900efbee8ae | refs/heads/master | 2021-01-18T22:47:34.363881 | 2018-04-18T09:39:06 | 2018-04-18T09:39:06 | 30,960,597 | 9 | 11 | null | null | null | null | UTF-8 | R | false | false | 2,405 | r | NeuralNet.R | <<<<<<< HEAD
#install.packages('neuralnet')
library(neuralnet)
?neuralnet
iris <- read.csv("iris.data.csv", header=TRUE)
# Prepare iris
set.seed(567)
ind <- sample(2, nrow(iris), replace=TRUE, prob=c(0.7, 0.3))
trainData <- iris[ind==1,]
testData <- iris[ind==2,]
nnet_iristrain
nnet_iristrain <- trainData
#Binarize the categorical output
nnet_iristrain <- cbind(nnet_iristrain, trainData$Species == 'Iris-setosa')
nnet_iristrain <- cbind(nnet_iristrain, trainData$Species == 'Iris-versicolor')
nnet_iristrain <- cbind(nnet_iristrain, trainData$Species == 'Iris-virginica')
names(nnet_iristrain)[6] <- 'setosa'
names(nnet_iristrain)[7] <- 'versicolor'
names(nnet_iristrain)[8] <- 'virginica'
set.seed(567)
nn <- neuralnet(setosa+versicolor+virginica ~ Sepal.Length+Sepal.Width+Petal.Length+Petal.Width,
data=nnet_iristrain, hidden=c(3))
plot(nn)
mypredict <- compute(nn, testData[-5])$net.result
# Put multiple binary output to categorical output
maxidx <- function(arr) {
return(which(arr == max(arr)))
}
idx <- apply(mypredict, c(1), maxidx)
idx
prediction <- c('setosa', 'versicolor', 'virginica')[idx]
prediction
table(prediction, testData$Species)
=======
install.packages('neuralnet')
library(neuralnet)
?neuralnet
iris <- read.csv("iris.data.csv", header=TRUE)
# Prepare iris
set.seed(567)
ind <- sample(2, nrow(iris), replace=TRUE, prob=c(0.7, 0.3))
trainData <- iris[ind==1,]
testData <- iris[ind==2,]
nnet_iristrain <- trainData
#Binarize the categorical output
nnet_iristrain <- cbind(nnet_iristrain, trainData$Species == 'setosa')
nnet_iristrain <- cbind(nnet_iristrain, trainData$Species == 'versicolor')
nnet_iristrain <- cbind(nnet_iristrain, trainData$Species == 'virginica')
names(nnet_iristrain)[6] <- 'setosa'
names(nnet_iristrain)[7] <- 'versicolor'
names(nnet_iristrain)[8] <- 'virginica'
nn <- neuralnet(setosa+versicolor+virginica ~ Sepal.Length+Sepal.Width+Petal.Length+Petal.Width,
data=nnet_iristrain, hidden=c(3))
plot(nn)
mypredict <- compute(nn, testData[-5])$net.result
# Put multiple binary output to categorical output
maxidx <- function(arr) {
return(which(arr == max(arr)))
}
idx <- apply(mypredict, c(1), maxidx)
idx
prediction <- c('setosa', 'versicolor', 'virginica')[idx]
prediction
table(prediction, testData$Species)
>>>>>>> 0dc063d26dd6533029c726c4c7dd851ef93710f5
|
24e1e0098acff61c2a07b41c23a2c22719ec7fdb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/depth/examples/sdepth.Rd.R | 4dcceae8ac192d3cc34501be2366538f663bdb55 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 566 | r | sdepth.Rd.R | library(depth)
### Name: sdepth
### Title: Calculation of spherical depth
### Aliases: sdepth
### Keywords: multivariate nonparametric robust directional
### ** Examples
## Tukey spherical depth for a dataset on the circle
set.seed(2011)
sdepth(pi,runif(50,min=0,max=2*pi))
## Tukey spherical depth for data in spherical coordinates.
sdepth(c(pi,pi/2),cbind(runif(50,min=0,max=2*pi),runif(50,min=0,max=pi)))
## Tukey spherical depth for data in Eudlidean coordinates.
x=matrix(rnorm(150),ncol=3)
x=t(apply(x,1,function(y){y/sqrt(sum(y^2))}))
sdepth(x[1,],x)
|
f43aaa767572d0f0655b3788042aa65b689f4ecc | 133459423391690024e97d4ec3c652742edca9e7 | /R/configure_logging.R | 87df63a521887d20314126ce77ae55cd6e54f0c6 | [] | no_license | mjlassila/logtime | 7fa2f1357c9e9635f61dcfe46a30dbaa5aa9e1a0 | 4515434d5a0bd5aa2d73a29d9eaf9e579d8cc956 | refs/heads/master | 2021-05-07T05:23:44.005628 | 2017-02-05T11:04:55 | 2017-02-05T11:04:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,569 | r | configure_logging.R | #' Configure logging options
#'
#' @param threshold_level Defines threshold level for printing logs.
#' Should be one of 'DEBUG', 'INFO', 'WARNING' or 'ERROR'. Default to 'DEBUG'.
#' It defines which logs should be printed. It prints logs which have level higher or equal to threshold_level.
#' The hierarchy is 'DEBUG' < 'INFO' < 'WARNING' < 'ERROR'.
#' @param output_file Either a path to a file or a connection. With default settings "" logs are printed to console.
#'
#' @details \code{configure_logging} sets global behaviour for logging and acts as a filter of how detailed logs should appear in the output.
#'
#' @seealso \code{\link{create_logger}}
#' @examples
#'
#' # set logging threshold level to INFO
#' configure_logging(threshold_level = 'INFO')
#'
#' # set logging threshold level to WARNING and
#' # set writing all logs to 'log.log' file
#' configure_logging(threshold_level = 'WARNING', output_file = 'log.log')
#'
#' @export
#'
configure_logging <- function (threshold_level = 'DEBUG', output_file = '') {
set_logging_level(level = threshold_level)
set_logging_file(file = output_file)
}
#' Get logging configs
#'
#' Get logging configs which were set in \code{configure_logging}
#'
#' @return A list with 'logging_level' and 'file'
#' @seealso \code{\link{configure_logging}}
#'
#' @examples
#'
#' get_logging_configs()
#' # $logging_level
#' # [1] "DEBUG"
#' #
#' # $file
#' # [1] ""
#'
#'
#' @export
get_logging_configs <- function () {
list(logging_level = get_logging_level(),
file = get_logging_file()
)
}
|
515f8faa81f40d7f7c669076eea6191b51faca98 | 41c5ead4d095362fc41a7f4cfb3570dd40076084 | /man/PairWiseOverlappingIdents.Rd | 515dacb92dba21177f0966081210edfdc0acb0c7 | [
"MIT"
] | permissive | crazyhottommy/scclusteval | b5532a561cc49d5fe75013e3cf6cd007a76c26b1 | b1b22c794c6603aaa4469d94fb6ee4d81f445d4d | refs/heads/master | 2021-08-03T04:34:47.308572 | 2021-07-28T01:25:46 | 2021-07-28T01:25:46 | 159,884,093 | 73 | 11 | NOASSERTION | 2021-04-26T03:10:38 | 2018-11-30T22:10:26 | R | UTF-8 | R | false | true | 822 | rd | PairWiseOverlappingIdents.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scclusterboot.R
\name{PairWiseOverlappingIdents}
\alias{PairWiseOverlappingIdents}
\title{Calculate pair-wise overlapping cluster identities for @ident slots from two Seurat objects}
\usage{
PairWiseOverlappingIdents(ident1, ident2)
}
\arguments{
\item{ident1}{a named factor vector. names are the cell names, the values are
the cluster id.}
\item{ident2}{a named factor vector. names are the cell names, the values are
the cluster id.}
}
\value{
A matrix of pairwise number of common cell identities for each cluster.
}
\description{
Calculate pair-wise overlapping cluster identities for two named factor vector. e.g.
seurat_obj1@ident and seurat_obj2@ident
}
\examples{
\dontrun{
PairWiseOverlappingIdents(pbmc@ident, pbmc_small@ident)
}
}
|
51fc78a4c355f6f2548df585bef5e212e0fa367a | 184180d341d2928ab7c5a626d94f2a9863726c65 | /issuestests/RJafroc/man/datasetBinned124.Rd | 9939004a921c37acf6527952f4920f0b7f00fe6f | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,421 | rd | datasetBinned124.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{datasetBinned124}
\alias{datasetBinned124}
\title{Binned dataset suitable for checking \code{\link{FitCorCbmRoc}}; seed = 124}
\format{A list with 8 elements:
\itemize{
\item{\code{NL}}{ Ratings array [1, 1:2, 1:10000, 1], of non-lesion localizations, NLs}
\item{\code{LL}}{ Ratings array [1, 1:2, 1:5000, 1], of lesion localizations, LLs}
\item{\code{lesionVector}}{ array [1:5000], number of lesions per diseased case, all set to one}
\item{\code{lesionID}}{ array [1:5000, 1], lesions labels on diseased cases, all set to one}
\item{\code{lesionWeight}}{ array [1:5000, 1], weights, all set to one}
\item{\code{dataType}}{ "ROC", the data type}
\item{\code{modalityID}}{ "1", treatment label}
\item{\code{readerID}}{ [1:2] "1" "2", reader labels}
}}
\usage{
datasetBinned124
}
\description{
A binned dataset suitable for analysis by \code{\link{FitCorCbmRoc}}. It was generated by
\code{\link{DfCreateCorCbmDataset}} by setting the \code{seed} variable to 124.
Otherwise similar to \code{\link{datasetBinned123}}.
}
\examples{
str(datasetBinned124)
}
\references{
Zhai X, Chakraborty DP (2017). A bivariate contaminated binormal model for robust
fitting of proper ROC curves to a pair of correlated, possibly degenerate,
ROC datasets. Medical Physics. 44(6):2207--2222.
}
\keyword{datasets}
|
520ede41d4c2f9fb836fa6b2b588621f6c70a656 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/deTS/examples/correction_factor.Rd.R | 681352c2c69a18ebc7f216f4c7c046d309f76449 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 226 | r | correction_factor.Rd.R | library(deTS)
### Name: correction_factor
### Title: Gene average expression level and standard deviation in GTEx
### data
### Aliases: correction_factor
### Keywords: datasets
### ** Examples
data(correction_factor)
|
6428835fa7936117dcb310489c612c2111923180 | 9b9cfe21efe8d9b29f7dbd38da3849ec329ce30e | /ui.r | 234d23c5f7c9bb9f87b80c43bffacb5edae7d0b2 | [] | no_license | Naams/HealthyTweet | b0d5562db3efc0619020ab2863b4ca7b8ca72bc9 | 72516dfecd8a49bc93e9ddfd816f527632eeb063 | refs/heads/master | 2021-01-15T18:58:56.998337 | 2014-11-25T11:37:36 | 2014-11-25T11:37:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,722 | r | ui.r | library(markdown)
shinyUI(navbarPage("Health Tweet!",
tabPanel("Graph Output",
sidebarLayout(
sidebarPanel(
selectInput("selection", "Choose a dataset:" ,choices = datas),
#actionButton("update", "Change"),
hr(),
helpText("This is data of tweets on some infectious diseases. Select the disease on the drop down and see what people said on
social twiiter --> For more click on the About tab under More")
),#end sidebar panel
mainPanel(
div(class="span6" , "On the Tweet", plotOutput('plot', width="auto")),
div(class="span6", "Word cloud", plotOutput("wordPlot",width="auto"))
)#end main panel
)#end sidebar layout
),#tabpanel
navbarMenu("More",
tabPanel("Table",
dataTableOutput("table")
),
tabPanel("About",
fluidRow(
column(6,includeMarkdown("about.md")
))
)#end of tabpanel
)#end of navbar menu
)) |
b6d70f7c86c428a7502c0dae532a781f354d432c | 27673f3fde5cad0cc54f989615ae00d18e90c900 | /WCAS Econometrics/hw3 code.R | 4092c1c636c428d1612f07e5eab05187eee2b5a5 | [] | no_license | jw388743/WCAS | a3ea287e64447914ba772ef6909b6fd27ab32661 | 10eeafeb80e1b0266049d1e42be643ddd211c1be | refs/heads/master | 2023-08-25T16:57:01.584229 | 2021-11-11T19:07:31 | 2021-11-11T19:07:31 | 205,045,411 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,925 | r | hw3 code.R | wine=read.csv("desktop/WCAS.ECONOMETRICS/wine.training.data.csv", na.strings=c(""," ","NA", "n.a.", "-", "--"))
head(wine)
wine.test=read.csv("desktop/WCAS.ECONOMETRICS/wine.test.data.csv", na.strings=c(""," ","NA", "n.a.", "-", "--"))
na.cols <- which(colSums(is.na(wine)) > 0)
sort(colSums(sapply(wine[na.cols], is.na)), decreasing = TRUE)
hist(wine$TARGET)
hist(wine$FixedAcidity)
hist(wine$VolatileAcidity)
hist(wine$CitricAcid)
hist(wine$ResidualSugar)
hist(wine$Chlorides)
hist(wine$FreeSulfurDioxide)
hist(wine$TotalSulfurDioxide)
hist(wine$Density)
hist(wine$pH)
hist(wine$Sulphates)
hist(wine$Alcohol)
hist(wine$LabelAppeal)
hist(wine$AcidIndex)
hist(wine$STARS)
require(psych)
describe(wine)
require(corrplot)
corrplot(wine)
wine$STARS=ifelse(is.na(wine$STARS),2,wine$STARS)
wine$Sulphates=ifelse(is.na(wine$Sulphates),mean(wine$Sulphates, na.rm = T),wine$Sulphates)
wine$TotalSulfurDioxide=ifelse(is.na(wine$TotalSulfurDioxide),mean(wine$TotalSulfurDioxide, na.rm = T),wine$TotalSulfurDioxide)
wine$Alcohol=ifelse(is.na(wine$Alcohol),mean(wine$Alcohol, na.rm = T),wine$Alcohol)
wine$FreeSulfurDioxide=ifelse(is.na(wine$FreeSulfurDioxide),mean(wine$FreeSulfurDioxide, na.rm = T),wine$FreeSulfurDioxide)
wine$Chlorides=ifelse(is.na(wine$Chlorides),mean(wine$Chlorides, na.rm = T),wine$Chlorides)
wine$ResidualSugar=ifelse(is.na(wine$ResidualSugar),mean(wine$ResidualSugar, na.rm = T),wine$ResidualSugar)
wine$pH=ifelse(is.na(wine$pH),mean(wine$pH, na.rm = T),wine$pH)
wine=wine[,-1]
head(wine)
cor(wine)
wine$STARS[wine$STARS == 4] <- "Great"
wine$STARS[wine$STARS == 3] <- "Good"
wine$STARS[wine$STARS == 2] <- "Fair"
wine$STARS[wine$STARS == 1] <- "Poor"
head(wine.subset)
wine$LabelAppeal[wine$LabelAppeal == 2] <- "Exeptional"
wine$LabelAppeal[wine$LabelAppeal == 1] <- "Great"
wine$LabelAppeal[wine$LabelAppeal == 0] <- "Good"
wine$LabelAppeal[wine$LabelAppeal == -1] <- "Fair"
wine$LabelAppeal[wine$LabelAppeal == -2] <- "Poor"
wine.subset <- wine[which( wine$TARGET > 0), ]
describe(wine.subset)
mod1 <- lm(wine$TARGET ~. , data = wine)
summary(mod1)
hist(mod1$residuals)
plot(mod1$residuals)
qqnorm(mod1$residuals)
qqline(mod1$residuals)
skew(mod1$residuals)
require(MASS)
mod2=lm(wine$TARGET~wine$VolatileAcidity+wine$Chlorides+wine$FreeSulfurDioxide+wine$TotalSulfurDioxide+wine$pH+wine$Sulphates+wine$Alcohol+wine$AcidIndex+wine$LabelAppeal+wine$STARS)
summary(mod2)
hist(mod2$residuals)
plot(mod2$residuals)
qqnorm(mod2$residuals)
qqline(mod2$residuals)
skew(mod2$residuals)
summary(mod2)
sum(wine$TARGET==0)
mod3=glm(wine$TARGET~ wine$VolatileAcidity+wine$FreeSulfurDioxide+wine$Sulphates+wine$AcidIndex+wine$LabelAppeal+wine$STARS, family="poisson")
summary(mod3)
hist(mod3$residuals)
plot(mod3$residuals)
qqnorm(mod3$residuals)
qqline(mod3$residuals)
skew(mod3$residuals)
install.packages("pscl")
require(pscl)
help(zeroinfl)
mod4= zeroinfl(wine$TARGET~ wine$VolatileAcidity+wine$FreeSulfurDioxide+wine$pH+wine$AcidIndex+wine$LabelAppeal+wine$STARS, data=wine, dist = 'poisson', link = 'log')
summary(mod4)
hist(mod4$residuals)
plot(mod4$residuals)
qqnorm(mod4$residuals)
qqline(mod4$residuals)
skew(mod4$residuals)
hist(target)
mod5=glm.nb(wine$TARGET~wine$Chlorides+wine$TotalSulfurDioxide+wine$pH+wine$Alcohol+wine$LabelAppeal+wine$STARS)
summary(mod5)
hist(mod5$residuals)
plot(mod5$residuals)
qqnorm(mod5$residuals)
qqline(mod5$residuals)
skew(mod5$residuals)
mod6= zeroinfl(wine$TARGET~wine$Chlorides+wine$TotalSulfurDioxide+wine$Sulphates+wine$Alcohol+wine$LabelAppeal+wine$STARS, data=wine, dist = 'negbin')
summary(mod6)
hist(mod6$residuals)
plot(mod6$residuals)
qqnorm(mod6$residuals)
qqline(mod6$residuals)
skew(mod6$residuals)
mod7=zeroinfl(wine$TARGET~wine$Chlorides + wine$TotalSulfurDioxide+wine$Sulphates+wine$Alcohol+wine$Sulphates+wine$LabelAppeal+wine$STARS, data=wine, dist = 'negbin')
summary(mod7)
dispersiontest(mod3)
plot(mod7$residuals)
qqnorm(mod7$residuals)
qqline(mod7$residuals)
skew(mod7$residuals)
cbind(AIC(mod2, mod6, mod7))
wine.test$STARS=ifelse(is.na(wine.test$STARS),2,wine.test$STARS)
wine.test$Sulphates=ifelse(is.na(wine.test$Sulphates),mean(wine.test$Sulphates, na.rm = T),wine.test$Sulphates)
wine.test$TotalSulfurDioxide=ifelse(is.na(wine.test$TotalSulfurDioxide),mean(wine.test$TotalSulfurDioxide, na.rm = T),wine.test$TotalSulfurDioxide)
wine.test$Alcohol=ifelse(is.na(wine.test$Alcohol),mean(wine.test$Alcohol, na.rm = T),wine.test$Alcohol)
wine.test$FreeSulfurDioxide=ifelse(is.na(wine.test$FreeSulfurDioxide),mean(wine.test$FreeSulfurDioxide, na.rm = T),wine.test$FreeSulfurDioxide)
wine.test$Chlorides=ifelse(is.na(wine.test$Chlorides),mean(wine.test$Chlorides, na.rm = T),wine.test$Chlorides)
wine.test$ResidualSugar=ifelse(is.na(wine.test$ResidualSugar),mean(wine.test$ResidualSugar, na.rm = T),wine.test$ResidualSugar)
wine.test$pH=ifelse(is.na(wine.test$pH),mean(wine.test$pH, na.rm = T),wine.test$pH)
head(wine.test)
wine.test=wine.test[,-c(1,2)]
head(wine)
cor(wine)
wine.test$STARS[wine.test$STARS == 4] <- "Great"
wine.test$STARS[wine.test$STARS == 3] <- "Good"
wine.test$STARS[wine.test$STARS == 2] <- "Fair"
wine.test$STARS[wine.test$STARS == 1] <- "Poor"
wine.test$LabelAppeal[wine.test$LabelAppeal == 2] <- "Exeptional"
wine.test$LabelAppeal[wine.test$LabelAppeal == 1] <- "Great"
wine.test$LabelAppeal[wine.test$LabelAppeal == 0] <- "Good"
wine.test$LabelAppeal[wine.test$LabelAppeal == -1] <- "Fair"
wine.test$LabelAppeal[wine.test$LabelAppeal == -2] <- "Poor"
na.cols <- which(colSums(is.na(wine.test)) > 0)
sort(colSums(sapply(wine.test[na.cols], is.na)), decreasing = TRUE)
length(wine.test$TARGET)
pred.mod2=predict(mod2, wine.test)
pred.mod6=predict(mod6, wine.test)
pred.mod7= predict(mod7, wine.test)
head(wine)
pred.mod2
head(wine.test)
rmse(wine$TARGET[1:3335], pred.mod2)
rmse(wine$TARGET[1:3335], pred.mod6)
rmse(wine$TARGET[1:3335], pred.mod7)
write.csv(pred.mod7,file= "prediction.model7.csv", row.names = F)
|
4d71e4bcc697cb420ae3a0e74aa9e73440aadeea | 4cac01fec2de5704e3c18eb5aa194e4edc0a6ebe | /qdm/footballScrape.R | e83691b60a714eb66ecb5a46dbc185e2f0f23056 | [] | no_license | wilsone21/courses | 45b97e691cef7f058cf19ecf5bc81f77af81fcff | 8630a2378fb9bd430df2f236c1af045148c2b425 | refs/heads/master | 2023-03-30T20:30:43.517674 | 2021-04-01T15:43:12 | 2021-04-01T15:43:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,708 | r | footballScrape.R | library(dplyr)
library(rvest)
scores = read_html("https://www.sports-reference.com/cfb/years/2018-schedule.html") %>%
html_table(., header = FALSE) %>%
`[[`(1)
scoresGood = scores %>%
select(X6, X7, X8, X9, X10) %>%
filter(X6 != "Winner") %>%
mutate(X6 = sub("\\([0-9]+\\)", "", X6),
X6 = stringr::str_trim(X6),
X9 = sub("\\([0-9]+\\)", "", X9),
X9 = stringr::str_trim(X9),
homeTeam = ifelse(X8 == "@", X9, X6),
awayTeam = ifelse(homeTeam == X9, X6, X9),
homeScore = ifelse(X8 == "@", X10, X7),
awayScore = ifelse(homeTeam == X9, X7, X10)) %>%
select(homeTeam, awayTeam, homeScore, awayScore)
scoresGood = scoresGood[1:844, ]
cutTeams = c("William & Mary", "Wofford", "Towson", "Mercer", "McNeese State", "Nicholls State",
"Incarnate Word", "Liberty", "Elon", "Gardner-Webb", "Bethune-Cookman",
"Kennesaw State", "Prairie View A&M", "Wagner", "Central Connecticut State",
"Colgate", "Delaware State", "Indiana State", "Grambling State", "Howard",
"Holy Cross")
scoresGood = scoresGood[-c(which(scoresGood$awayTeam %in% cutTeams)), ]
scoresGood = scoresGood[-c(which(scoresGood$homeTeam %in% cutTeams)), ]
# writeClipboard(sort(unique(unlist(c(scoresGood$homeTeam, scoresGood$awayTeam)))))
#
# writeClipboard(scoresGood$homeTeam)
#
# writeClipboard(scoresGood$awayTeam)
#
# writeClipboard(scoresGood$homeScore)
#
# writeClipboard(scoresGood$awayScore)
teams = c(sort(unique(unlist(c(scoresGood$homeTeam, scoresGood$awayTeam)))), rep("NA", 609))
scoresGood = cbind(teams, scoresGood)
write.csv(scoresGood, "scoresGood.csv", row.names = FALSE, na = "")
|
1a89e74062642336aae7f185afbddec8a0bf5a45 | d4918568929a592a40ee705dc91614be17603c2c | /man/dict2xcat.Rd | 8ea6db2a98130914e33ae533f6b9ce14f97189e7 | [] | no_license | kevin05jan/iop | d722f6c8520cd457872f9a4f2d83294e1a3dc675 | 8a8b391976982985f1cfe66535d58a1606d4099b | refs/heads/master | 2020-08-02T14:25:27.256169 | 2019-10-19T09:43:06 | 2019-10-19T09:43:06 | 211,387,761 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 365 | rd | dict2xcat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tmp.R
\name{dict2xcat}
\alias{dict2xcat}
\title{Extract categorical x variables}
\usage{
dict2xcat(model, newx, ...)
}
\arguments{
\item{model}{: data2dict object}
\item{newx}{: data.frame}
}
\value{
character vector
}
\description{
Extract categorical x variables
}
|
443dff575c34947ca902fef1e658c21a0ac2c53c | 912b41188b1cc689fd3d0dc868303bafd8d16144 | /man/get_evidence_1.Rd | b75703a8d203a71bfd74d8f85badc2b6791d1cb3 | [] | no_license | liesb/BIITE | 05bb0ae4960e7c93d657782f43e0f96cc72a4c3a | 068c9b3a261ebf5afa91c13e90c567e5007d5243 | refs/heads/master | 2021-01-17T09:49:12.360545 | 2016-06-03T11:15:03 | 2016-06-03T11:15:03 | 41,549,687 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 609 | rd | get_evidence_1.Rd | % Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/immunogen_evidence.r
\name{get_evidence_1}
\alias{get_evidence_1}
\title{Evidence function}
\usage{
get_evidence_1(eli.dat, H, mol.names, pep)
}
\arguments{
\item{eli.dat}{the dataframe containing the outcome of the ELISpot experiments}
\item{H}{hypothesis, a string of values between 0 and 1 separated by commas. There should be length(mol.names) values}
\item{mol.names}{Names of the molecules in your ELISpot subjects}
\item{pep}{Name of the peptide currently being processed}
}
\description{
Computes P(D|H)
}
|
d68b70b28ac82859ee9807d5e031ee00702c600f | 9fe6d9a146dec62cda87efda4510b8b7563a0ed3 | /Mode/roadster/spaces/_DEV SANDBOX/Activity By Hour.abe869456dbb/notebook/cell-number-6.fe0ba3f83d2e.r | 844f1bd4d5a0e2c8f20a62119c15e266bfe8e2f8 | [] | no_license | DMilmont/mode | 666087e1d7d93b907a4d510712cfc71d66fb99e1 | f69a425900719c9854281565135f2a9ec8fa9999 | refs/heads/master | 2020-09-13T13:47:59.863926 | 2019-11-18T21:45:38 | 2019-11-18T21:45:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 960 | r | cell-number-6.fe0ba3f83d2e.r | options(repr.plot.width=13, repr.plot.height=5.5)
data_graph <- data %>%
mutate(open_char = ifelse(open == 1, 'Open', 'Closed')) %>%
group_by(open_char, day_week) %>%
summarize(total_events_by_open_close = sum(exist)) %>%
ungroup()
data_graph <- data_graph %>%
left_join(totals)
data_graph <- data_graph %>%
mutate(perc_time_day = total_events_by_open_close / total_events)
# head(data_graph)
# Create Bar Graph
data_graph %>%
ggplot(aes(x = fct_rev(fct_relevel(day_week, c('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'))),
y = perc_time_day, fill = open_char)) + geom_col() +
theme(text = element_text(family="Gotham")) +
theme_minimal() +
scale_y_continuous(breaks = NULL) +
geom_text(aes(label = scales::percent(perc_time_day)), position = position_stack(vjust = 0.5), color = 'white') +
labs(x = '', y = '', fill = '') + scale_fill_grey(end = .6) +
coord_flip() |
302febc498a0f7d5d7b9257e60c0619270e6fd59 | ae92050ef3669b8a0727bd229be894181a822416 | /Lab05 - descriptive statistics and data preprocessing/lab5.R | 767bb4a1aa3d69a7cc9cf3c439660e2e2177b4aa | [] | no_license | Lauralicja/Advanced-Statistics-Laboratories | 8c6a98fe2189d1821fe8b718deaad2681db03671 | 1f0d70840d03aa7bf822adefccc2ef873108dff6 | refs/heads/master | 2021-05-21T03:28:31.376258 | 2020-09-07T11:08:30 | 2020-09-07T11:08:30 | 252,522,968 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 909 | r | lab5.R | # Exercise 1. and 2.
fill_data_diferent_methods <- function(df){
A <- impute(df, fun="random")
B <- impute(df, fun=mean)
C <- impute(df, fun=median)
print("Random method")
print(A[1])
print("By mean")
print(B[1])
print("By median")
print(C[1])
}
# Exercise 3. (FALSE = 0, TRUE = 1)
data_scaling <- function(df, mean, sd){
D <- scale(df, center=mean, scale=sd)
print(D)
}
# Exercise 4.
transform_column_as_factors <- function(df, col){
E <- transform(df, col = as.factor(col))
print(E)
}
# Exercise 5.
find_IQR_and_corr <- function(df, col1, col2){
iqr <- IQR(col1)
cat("IQR = ", iqr)
cors = cor(col1, col2)
cat(", Correlation = ", cors)
}
# Exercise 6.
find_summaries_and_corr <- function(col1, col2){
A_sum <- summary(col1)
B_sum <- summary(col2)
AB_cors = cor(A_sum, B_sum)
print(A_sum)
print(B_sum)
print(AB_cors)
} |
99bbe00c4a3a1178b8426f2efc43999809e97e3c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/photobiology/examples/waveband.Rd.R | 59af94a57eb6bfdd6710ae174e3ecfa8542c766e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 183 | r | waveband.Rd.R | library(photobiology)
### Name: waveband
### Title: Waveband constructor method
### Aliases: waveband new_waveband
### ** Examples
waveband(c(400,700))
new_waveband(400,700)
|
be81c59ca9d5d1868d638b012e34dbb8003fceac | 7559eb0cfcac2d4977a9ed34972fb5a1166d532d | /2.1_Reformat_TranscriptCounts.R | a74c596df91550bd302cfff7d13d4995de727ff1 | [] | no_license | ACharbonneau/zealous-octo-tanuki | 30d4c4810ab746c08b36d446f6c019f36883b1d9 | 212e1cc0b0de075dac7ede5c743a22f924575fd7 | refs/heads/master | 2021-04-22T06:38:04.884903 | 2020-07-06T14:08:36 | 2020-07-06T14:08:36 | 42,743,649 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 792 | r | 2.1_Reformat_TranscriptCounts.R | # Convert EST counts from Samtools to same format as HTseq count files and naming scheme
rm(list = ls())
# Install function for packages
packages<-function(x){
x<-as.character(match.call()[[2]])
if (!require(x,character.only=TRUE)){
install.packages(pkgs=x,repos="http://cran.r-project.org")
require(x,character.only=TRUE)
}
}
packages(dplyr)
countsDir <- file.path(".")
ALLTHEFILES <- list.files(countsDir, pattern = "*.samtools.*")
for( X in 1:length(ALLTHEFILES)){
FileName <- readLines(file(ALLTHEFILES[ X ]), n=1)
TempFile <- read.table(ALLTHEFILES[ X ], sep = "\t", header = F, skip = 1, comment.char = "")
NewFile <- dplyr::select(TempFile, V1, V3 )
write.table(NewFile, paste(FileName, ".counts.txt", sep=""), sep="\t", row.names=F, col.names=F, quote=F)
}
|
d67e475979df896ea0cf64aac68f659ef34b07fb | 737137f04de987936bb1fef7d5f25e1b77fd58e0 | /code/G_create_gifs/organize-images.R | ceca27b6620bacdf3e57db00343be64e3467ae79 | [] | no_license | ben-tanen/most-font | 271c2ae34d70c48c230a780b3e1c3ea4a8e28166 | 302cf628adeb4c52bf03243bc95921697fb97ca8 | refs/heads/master | 2021-06-17T23:21:36.036000 | 2021-02-20T17:35:53 | 2021-02-20T17:35:53 | 178,571,846 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 760 | r | organize-images.R | library(data.table)
setwd("~/Desktop/Projects/most-font/data")
dt <- data.table(read.csv('letter_font_scores.csv'))
dt[, r := rank(.SD$scale_y_letter, ties.method = 'last'), by = list(letter, score)][order(letter, score, r)]
dt <- dt[r <= 3][order(letter, score, r)]
# organize images for overlap-outline gifs
setwd("~/Desktop/Projects/most-font/images/overlap-outline")
for (l in LETTERS) {
dt_a <- dt[letter == l]
dt_a <- dt_a[order(score)]
for (i in 1:nrow(dt_a)) {
font <- as.character(dt_a$font[i])
print(font)
file.copy(paste0("overlap-outline_", l, "_", font, ".png"), "_gif")
file.rename(paste0("_gif/overlap-outline_", l, "_", font, ".png"),
sprintf("_gif/%03d_overlap-outline_%s_%s.png", i, l, font))
}
}
|
558711d99da8600b2a08dd6e0a7bbebfd0d46676 | 8a20f077120fcc5e4d5828bbd6eb3c59d98deaaf | /man/get_statevars_ts.Rd | c0ffd0338fd3ec34ba9a4010a2f6dd51ad5be875 | [
"MIT"
] | permissive | diazrenata/cats | 98a6ce9c8918e55d33a46abb76e3abd7ed2865c0 | 545ef1a6d12476242093c8cfba86cf71f2a2fe0d | refs/heads/master | 2020-09-10T15:47:08.180197 | 2019-11-19T17:25:44 | 2019-11-19T17:25:44 | 221,743,982 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 362 | rd | get_statevars_ts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_handling.R
\name{get_statevars_ts}
\alias{get_statevars_ts}
\title{Get statevars on a TS}
\usage{
get_statevars_ts(spab)
}
\arguments{
\item{spab}{from make_spab}
}
\value{
dataframe with cols site singletons dat source sim timestep s0 n0
}
\description{
Get statevars on a TS
}
|
f3689ab88c5a68e680a42aac120904168a99f60a | 398a62a62591c973eedbf85975468e4976de40ed | /R/hello.R | 65495d5ec31b5d27731a2d109849d58d8518e1a9 | [] | no_license | uvarc/scratchr | 7a9deb4c68da94d0470d6df9d58b6c5fb303219b | 3fda6880b9a25260a9c9690d0f5f786a709d3275 | refs/heads/master | 2021-09-10T21:29:11.419881 | 2018-03-30T18:53:33 | 2018-03-30T18:53:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 680 | r | hello.R | #' Say hello
#'
#' @param name specify what you would like the program to call you; defaults to user
#' @param animal this argument allows you to choose an animal to accompany your message; for more information on animals to choose from see \code{\link[cowsay]{animals}}
#'
#' @export
#'
#' @examples
#' hello()
#' hello("hal")
hello <- function(name = "user", animal = NULL) {
msg <- paste0("hello ",
name,
" ...\n",
"you better get busy\n",
next_year(),
" will be here soon :)")
if(is.null(animal)) {
animal <- sample(names(cowsay::animals), 1)
}
cowsay::say(msg, by = animal)
}
|
67ab14e1fdbe9948041d04afea30e7f030bda796 | 7f81568d24ab2a116437947f95f69b1376e023b0 | /seqcovid_sequenceFiltered_for_clinicalData.R | d57062ae61fbfb34f2aae62310f62451c8705618 | [] | no_license | eugeUlVe/Nucleotide_Rcode | f6cb1ddabf3e703411e75492a0d028b304306a37 | 233ff85979f9ec17855df2c61c87e7db1066bb22 | refs/heads/main | 2023-09-05T22:29:26.917642 | 2021-11-23T15:24:56 | 2021-11-23T15:24:56 | 431,150,792 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,287 | r | seqcovid_sequenceFiltered_for_clinicalData.R | # **************************************************************************
# *
# * Authors:
# *
# * Center for Biological Research Margarita Salas (CIB), CSIC
# * Biocomputing Unit (BCU) of the National Center for Biotechnology (CNB), CSIC
# * Eugenia Ulzurrun (mariaeugenia.ulzurrun@cib.csic.es)
# *
# Short description: (FIRST DRAFT) This program makes fasta file for those Id's with query column in the dataset with values different to NA, 0 and 99.
#
# **************************************************************************
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
if(length(args) < 4){
stop("USE: Rscript seqcovid_sequenceFiltered_for_clinicalData.R <BioChemMolCli.tsv> <ColumnSelectedTSVfile> <sequences.fasta> <Output_DNA_sequences.fasta>")
}
data <- read.csv(args[1], header = TRUE, sep = "\t")
colum_data <- as.numeric(args[2])
# setwd("/home/eugenia/Documents/MIREIA_DATOS/seqcovid")
# data <- read.csv("seqcovid_20210804.tsv", header = TRUE, sep = "\t")
# colum_data <- 11 # this is for Ct/Cq.PCR.gen.S
v_id <- c()
for(id in 1:length(data[,1])){
v_id <- c(v_id, data[id,1])
}
# print(v_id)
data_id_filter <- unique(data[,1])
sample_row <- c()
for(fila in 1:length(data$Ct.Cq.PCR.gen.S)){
if(is.na(data[fila,as.numeric(args[2])]) == FALSE){
if(data[fila,as.numeric(args[2])] != 0 && data[fila,as.numeric(args[2])] != 99){
# print(fila)
sample_row <- c(sample_row, fila)
}
}
}
samples_ID <- c()
for(s in 1:length(sample_row)){
samples_ID <- c(samples_ID, data[sample_row[s],1])
}
# setwd("/home/eugenia/Documents/MIREIA_DATOS/seqcovid")
secuencias <- scan(args[3], what = character(), quiet = TRUE)
# secuencias <- scan("seqcovid_20210804.fasta", what = character(), quiet = TRUE)
for(cov in 1:length(samples_ID)){
sm_sample_ID <- paste(">", samples_ID[cov], sep = "")
for(line in 1:(length(secuencias)-1)){
if(sm_sample_ID == secuencias[line]){
write(secuencias[line], file = args[4], append = TRUE)
write(secuencias[line + 1], file = args[4], append = TRUE)
# write(secuencias[line], file = "available_sequence_geneS_filtrado.fasta", append = TRUE)
# write(secuencias[line + 1], file = "available_sequence_geneS_filtrado.fasta", append = TRUE)
}
}
}
print("Done")
|
696af900f7ccea7a779c5ecbf232bed83c088825 | 41d342303cf04be627a3e71e8d76cbcef0085a41 | /man/artifactRejection.Rd | aff278c165c8cdf1ff12ebb66e56ebf688e6e046 | [] | no_license | tdeenes/eegR | 03fe37ba0cb1678be0795d5dcaffe277489ec9be | 8796d980aac9400ad6e9eee269021ecfb502d86e | refs/heads/master | 2021-07-16T14:24:26.920801 | 2021-04-18T16:03:05 | 2021-04-18T16:03:05 | 23,415,305 | 13 | 5 | null | null | null | null | UTF-8 | R | false | true | 1,406 | rd | artifactRejection.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/erp_preprocess.R
\name{artifactRejection}
\alias{artifactRejection}
\title{Artifact rejection}
\usage{
artifactRejection(
dat,
markers = NULL,
artrej_options = artrejOptions(),
return_data = TRUE,
return_details = TRUE,
print_result = TRUE
)
}
\arguments{
\item{dat}{numeric array (EEG-data) with the following named dimensions
(dimension order does not matter): chan, time, trial}
\item{markers}{if not NULL (default), a matrix or data.frame containing the
characteristics of the trials (markers)}
\item{artrej_options}{a named list containing the parameters for the
artifact rejection criteria. See \code{\link{artrejOptions}} for details.}
\item{return_data}{logical value, if TRUE (default), dat and markers without
rejected trials are returned}
\item{return_details}{logical value, if TRUE (default), the full array of
results (e.g., bad trials for each channel and for each criterion) is
returned as an attribute of bad_trials (see Values section)}
\item{print_result}{logical value, if TRUE (default), a summary of the
results is printed to the console}
}
\value{
A named list containing bad_trials (trials identified with artifacts)
and the modified input data (dat and markers without contaminated trials)
}
\description{
\code{artifactRejection} performs artifact rejection on segmented data.
}
|
1b98e53fb08148c0a2b5af62fe21da6936901a1e | 977e25b030bc27e923f52b08305a6dec2cfd02fd | /finance_basics_with_r/intro_r_finance/1_basics/basic_datatypes.R | 3b3226d0d25d96c1decc3c338da45f9d693215c5 | [] | no_license | printfCRLF/rr | d4cd813fafef7d64da2722ade9e14220c12e17ff | 4116f726f5ad7a8cadbe6841d13abbdb998ee294 | refs/heads/master | 2021-04-15T15:08:37.032087 | 2019-07-12T08:29:26 | 2019-07-12T08:29:26 | 126,468,211 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 217 | r | basic_datatypes.R |
data_type_exploration <- function() {
apple_stock <- 150.45 # numeric data
credit_rating <- "AAA" # string
my_answer <- TRUE # boolean
class(my_answer)
print(my_answer)
}
data_type_exploration() |
50101cf6e3bb419fa426a89d8e84ee4a7c7cb04e | 8d34a5846b55474e1db54cc3595ac725b1a96404 | /R/recode.R | d26d2caa3cdb7b93f6d556f6901dbd8b8f0e476f | [] | permissive | federman/poorman | 6f1f76ea1c262a430dbd80b840897d4f1b603b76 | 3cc0a9920b1eb559dd166f548561244189586b3a | refs/heads/master | 2023-05-14T12:33:25.016104 | 2022-12-29T18:35:59 | 2022-12-29T18:35:59 | 243,791,745 | 0 | 0 | MIT | 2020-02-28T15:19:14 | 2020-02-28T15:19:13 | null | UTF-8 | R | false | false | 9,130 | r | recode.R | #' Recode values
#'
#' @description
#' This is a vectorised version of [switch()]: you can replace `numeric` values based on their position or their name,
#' and `character` or `factor` values only by their name. This is an S3 generic: `{poorman}` provides methods for
#' `numeric`, `character`, and `factor`s. For `logical` vectors, use [if_else()]. For more complicated criteria, use
#' [case_when()].
#'
#' You can use `recode()` directly with `factor`s; it will preserve the existing order of levels while changing the
#' values. Alternatively, you can use `recode_factor()`, which will change the order of levels to match the order of
#' replacements.
#'
#' This is a direct port of the `dplyr::recode()` function.
#'
#' @param .x A vector to modify
#' @param ... Replacements. For `character` and `factor` `.x`, these should be named and replacement is based only on
#' their name. For `numeric` `.x`, these can be named or not. If not named, the replacement is done based on position
#' i.e. `.x` represents positions to look for in replacements. See examples.
#'
#' When named, the argument names should be the current values to be replaced, and the argument values should be the new
#' (replacement) values.
#'
#' All replacements must be the same type, and must have either length one or the same length as `.x`.
#' @param .default If supplied, all values not otherwise matched will be given this value. If not supplied and if the
#' replacements are the same type as the original values in `.x`, unmatched values are not changed. If not supplied and
#' if the replacements are not compatible, unmatched values are replaced with `NA`.
#'
#' `.default` must be either length 1 or the same length as `.x`.
#' @param .missing If supplied, any missing values in `.x` will be replaced by this value. Must be either length 1 or
#' the same length as `.x`.
#' @param .ordered `logical(1)`. If `TRUE`, `recode_factor()` creates an ordered `factor`.
#'
#' @return A vector the same length as `.x`, and the same type as
#' the first of `...`, `.default`, or `.missing`.
#' `recode_factor()` returns a factor whose levels are in the same order as
#' in `...`. The levels in `.default` and `.missing` come last.
#'
#' @seealso
#' [na_if()] to replace specified values with a `NA`.
#'
#' [coalesce()] to replace missing values with a specified value.
#'
#' [replace_na()] to replace `NA` with a value.
#'
#' @examples
#' # For character values, recode values with named arguments only. Unmatched
#' # values are unchanged.
#' char_vec <- sample(c("a", "b", "c"), 10, replace = TRUE)
#' recode(char_vec, a = "Apple")
#' recode(char_vec, a = "Apple", b = "Banana")
#'
#' # Use .default as replacement for unmatched values. Note that NA and
#' # replacement values need to be of the same type.
#' recode(char_vec, a = "Apple", b = "Banana", .default = NA_character_)
#'
#' # Throws an error as NA is logical, not character.
#' \dontrun{
#' recode(char_vec, a = "Apple", b = "Banana", .default = NA)
#' }
#'
#' # For numeric values, named arguments can also be used
#' num_vec <- c(1:4, NA)
#' recode(num_vec, `2` = 20L, `4` = 40L)
#'
#' # Or if you don't name the arguments, recode() matches by position.
#' # (Only works for numeric vector)
#' recode(num_vec, "a", "b", "c", "d")
#' # .x (position given) looks in (...), then grabs (... value at position)
#' # so if nothing at position (here 5), it uses .default or NA.
#' recode(c(1, 5, 3), "a", "b", "c", "d", .default = "nothing")
#'
#' # Note that if the replacements are not compatible with .x,
#' # unmatched values are replaced by NA and a warning is issued.
#' recode(num_vec, `2` = "b", `4` = "d")
#' # use .default to change the replacement value
#' recode(num_vec, "a", "b", "c", .default = "other")
#' # use .missing to replace missing values in .x
#' recode(num_vec, "a", "b", "c", .default = "other", .missing = "missing")
#'
#' # For factor values, use only named replacements
#' # and supply default with levels()
#' factor_vec <- factor(c("a", "b", "c"))
#' recode(factor_vec, a = "Apple", .default = levels(factor_vec))
#'
#' # Use recode_factor() to create factors with levels ordered as they
#' # appear in the recode call. The levels in .default and .missing
#' # come last.
#' recode_factor(num_vec, `1` = "z", `2` = "y", `3` = "x")
#' recode_factor(num_vec, `1` = "z", `2` = "y", `3` = "x", .default = "D")
#' recode_factor(num_vec, `1` = "z", `2` = "y", `3` = "x", .default = "D", .missing = "M")
#'
#' # When the input vector is a compatible vector (character vector or
#' # factor), it is reused as default.
#' recode_factor(letters[1:3], b = "z", c = "y")
#' recode_factor(factor(letters[1:3]), b = "z", c = "y")
#'
#' @export
recode <- function(.x, ..., .default = NULL, .missing = NULL) {
UseMethod("recode")
}
#' @export
recode.numeric <- function(.x, ..., .default = NULL, .missing = NULL) {
values <- dotdotdot(...)
nms <- have_name(values)
if (all(nms)) {
vals <- as.double(names(values))
} else if (all(!nms)) {
vals <- seq_along(values)
} else {
stop("Either all values must be named, or none must be named.")
}
n <- length(.x)
template <- find_template(values, .default, .missing)
res <- template[rep(NA_integer_, n)]
replaced <- rep(FALSE, n)
for (i in seq_along(values)) {
res <- replace_with(res, .x == vals[i], values[[i]], paste0("Vector ", i))
replaced[.x == vals[i]] <- TRUE
}
.default <- validate_recode_default(.default, .x, res, replaced)
res <- replace_with(res, !replaced & !is.na(.x), .default, "`.default`")
res <- replace_with(res, is.na(.x), .missing, "`.missing`")
res
}
#' @export
recode.character <- function(.x, ..., .default = NULL, .missing = NULL) {
.x <- as.character(.x)
values <- dotdotdot(...)
val_names <- names(values)
have_names <- have_name(values)
if (!all(have_names)) {
bad <- which(!have_names) + 1L
stop("Argument", if (length(bad) > 1L) "s", " ", paste(bad, sep = ", "), " must be named, not unnamed.")
}
n <- length(.x)
template <- find_template(values, .default, .missing)
res <- template[rep(NA_integer_, n)]
replaced <- rep(FALSE, n)
for (nm in val_names) {
res <- replace_with(res, .x == nm, values[[nm]], paste0("`", nm, "`"))
replaced[.x == nm] <- TRUE
}
.default <- validate_recode_default(.default, .x, res, replaced)
res <- replace_with(res, !replaced & !is.na(.x), .default, "`.default`")
res <- replace_with(res, is.na(.x), .missing, "`.missing`")
res
}
#' @export
recode.factor <- function(.x, ..., .default = NULL, .missing = NULL) {
values <- dotdotdot(...)
if (length(values) == 0) stop("No replacements provided.")
have_names <- have_name(values)
if (!all(have_names)) {
bad <- which(!have_names) + 1
stop(bad, " must be named, not unnamed.")
}
if (!is.null(.missing)) {
stop("`.missing` is not supported for factors.")
}
n <- length(levels(.x))
template <- find_template(values, .default, .missing)
res <- template[rep(NA_integer_, n)]
replaced <- rep(FALSE, n)
for (nm in names(values)) {
res <- replace_with(res, levels(.x) == nm, values[[nm]], paste0("`", nm, "`"))
replaced[levels(.x) == nm] <- TRUE
}
.default <- validate_recode_default(.default, .x, res, replaced)
res <- replace_with(res, !replaced, .default, "`.default`")
if (is.character(res)) {
levels(.x) <- res
.x
} else {
res[as.integer(.x)]
}
}
have_name <- function(x) {
nms <- names(x)
if (is.null(nms)) rep(FALSE, length(x)) else !(nms == "" | is.na(nms))
}
compact <- function(.x) Filter(length, .x)
find_template <- function(values, .default = NULL, .missing = NULL) {
x <- compact(c(values, .default, .missing))
if (length(x) == 0L) {
stop("No replacements provided.")
}
x[[1]]
}
validate_recode_default <- function(default, x, res, replaced) {
default <- recode_default(x, default, res)
if (is.null(default) && sum(replaced & !is.na(x)) < length(res[!is.na(x)])) {
warning(
"Unreplaced values treated as NA as .x is not compatible. ",
"Please specify replacements exhaustively or supply .default",
call. = FALSE
)
}
default
}
recode_default <- function(x, default, res) {
UseMethod("recode_default")
}
recode_default.default <- function(x, default, res) {
same_type <- identical(typeof(x), typeof(res))
if (is.null(default) && same_type) x else default
}
recode_default.factor <- function(x, default, res) {
if (is.null(default)) {
if ((is.character(res) || is.factor(res)) && is.factor(x)) {
levels(x)
} else {
res[NA_integer_]
}
} else {
default
}
}
#' @rdname recode
#' @export
recode_factor <- function(.x, ..., .default = NULL, .missing = NULL, .ordered = FALSE) {
recoded <- recode(.x, ..., .default = .default, .missing = .missing)
values <- dotdotdot(...)
all_levels <- unique(c(values, recode_default(.x, .default, recoded), .missing))
recoded_levels <- if (is.factor(recoded)) levels(recoded) else unique(recoded)
levels <- intersect(all_levels, recoded_levels)
factor(recoded, levels, ordered = .ordered)
}
|
89411f43a105601fc50b71579a22a18cb35a0490 | 82d13bf9a710db201208b614f0fef141e4cadda3 | /man/I2C2.mcCI.Rd | 382a1d943316af8cb3cc53ace5a3db733a80cd67 | [] | no_license | muschellij2/I2C2 | 64316bbcf30a40e6bf0d0a57c5b66b65c33bfe20 | 3cb224d6c20d3dce240552f0f857623351d7c042 | refs/heads/master | 2021-01-23T12:34:55.622900 | 2019-08-15T19:02:28 | 2019-08-15T19:02:28 | 93,177,059 | 2 | 4 | null | 2019-06-20T19:19:53 | 2017-06-02T15:04:41 | R | UTF-8 | R | false | true | 617 | rd | I2C2.mcCI.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/I2C2.mcCI.R
\name{I2C2.mcCI}
\alias{I2C2.mcCI}
\title{Compute the Confidence Interval}
\usage{
I2C2.mcCI(..., rseed = 1234, R = 100, mc.cores = 1, ci = 0.95)
}
\arguments{
\item{...}{arguments passed to \code{\link{I2C2.rsample}}}
\item{rseed}{Seed number}
\item{R}{The bootstrap repetition size}
\item{mc.cores}{Number of Cores}
\item{ci}{100*ci\% The level of the Confidence Interval}
}
\value{
List of the lambdas and then the confidence interval
}
\description{
Computing the confidence interval of I2C2 via multicore computing.
}
|
abe9e915e28a96a89407be998b1aab17bcb63c17 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Fragman/examples/reals.Rd.R | 35127c08210f3017969c29d6b6d8c6e16a6903bb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 330 | r | reals.Rd.R | library(Fragman)
### Name: reals
### Title: Finding the real peaks
### Aliases: reals
### ** Examples
data(my.plants)
x <- big.peaks.col(my.plants[[1]][,1],100)#for any color
#reals(x, panel=c(260,280), shi=1, ploidy=2)
#still needs weight information in order to find the reals,
#works internally of score.easy function
|
b2668d74c2517270d5644ff6ae0a399e72132246 | ae6b48296c0fd2a3b37d6258897a60023145c588 | /src/STATS_GBM.R | 3408ab5410c572cc00992cda12264717af4da4b8 | [
"Apache-2.0",
"GPL-3.0-only"
] | permissive | IBMPredictiveAnalytics/STATS_GBM | 0df356d1b19e5d74a2b372a59817b2c5c8a20d65 | ac6a4eccec8d882103e192a7ecd0f48e8b3c9933 | refs/heads/master | 2022-10-26T02:10:51.075447 | 2022-10-21T01:35:58 | 2022-10-21T01:35:58 | 38,214,177 | 2 | 1 | Apache-2.0 | 2022-04-27T14:14:19 | 2015-06-28T21:04:56 | R | UTF-8 | R | false | false | 19,964 | r | STATS_GBM.R | # STATS GBM extension command
#Licensed Materials - Property of IBM
#IBM SPSS Products: Statistics General
#(c) Copyright IBM Corp. 2014
#US Government Users Restricted Rights - Use, duplication or disclosure
#restricted by GSA ADP Schedule Contract with IBM Corp.
# author= 'jkp, IBM'
# version= '1.0.3'
# history
# 03-30-2013 original version
# 04-10-2013 ignore errors in monkey patching for older R/gbm versions
# 05-24-2013 work around problem in older R that caused Relative Importance table
# to have no variable names in it
helptext = 'STATS GBM DISTRIBUTION=GAUSSIAN, LAPLACE, TDIST, BERNOULLI,
HUBERIZED, MULTINOMIAL, ADABOOST, POISSON, COXPH, or QUANTILE
ALPHA=quantile TDF=df
DEPENDENT=depvar INDEPENDENT=independent variable list
INTERACTIONS=interaction level OFFSET=offset variable
MONOTONE = list of monotone specifications
/OPTIONS NTREES=integer CVFOLDS=integer
SHRINKAGE=shrinkage factor for steps MINNODESIZE=minimum node size
BAGFRAC = fraction
TRAINFRAC = fraction
CVSTRAT = TRUE or FALSE
MISSING = EXCLUDE or INCLUDE
/SAVE MODEL=file specification WORKSPACE= CLEAR or RETAIN
/OUTPUT MARGINALPLOTS = variable list MARGINALPLOTCOUNT=number
BOOSTPLOT=YES or NO BOOSTPLOTMETHOD=oob or test or cv
RELIMP = YES or NO
/HELP.
All items are optional except DISTRIBUTION, DEPENDENT,
and INDEPENDENT
DISTRIBUTION specifies the distribution to use.
GAUSSIAN - squared error
LAPLACE - absolute error
TDIST - t distribution, requires TDF specifying degrees of freedom
BERNOULLI - logistic regression with dependent 0 or 1.
Variable level should be scale.
HUBERIZED - huberized hinge loss with dependent 0 or 1
MULTINOMIAL - discrete dependent with more than two categories
Variable level should be categorical (nominal or ordinal)
ADABOOST - AdaBoost exponential loss with dependent 0 or 1
POISSON - dependent is count
COXPH - right censored dependent
QUANTILE - quantile of dependent. Requires ALPHA specifying the quantile
as a fraction.
DEPENDENT specifies the dependent variable
INDEPENDENT specifies one or more independent variables
OFFSET - specifies an offset variable for the dependent variable in the equation
DEPENDENT specifies the dependent variable. The nature of the variable
determines which distributions might be appropriate. Cases with missing
values for the dependent variable are discarded.
INDEPENDENT specifies a list of independent variables. Missing values
are allowed.
INTERACTIONS specifies what interaction terms are included in the model.
1 = no interactions, 2 = all two-way interactions, etc. Default is 1.
OFFSET names an offset variable.
MONOTONE specifies monotonicity of the effect for each independent
variable. If used, one value must be specified for each variable.
Use p for a positive effect, n for negative, and z for no assumption.
NTREES specifies the number of trees or iterations. Larger numbers
generally give better accuracy but take more time. Default is 100.
CVFOLDS specifies the number of cross-validation folds. Default is 0.
SHRINKAGE specifies the learning rate between 0 and 1. Values close to
zero tend to do better, but they may require more iterations.
MINNODESIZE is the minimum number of cases in a node. Default is 10.
TRAINFRAC specifies the fraction of the cases used for the training set.
Default is 1, i.e., no holdout set.
BAGFRAC specifies the fraction of the training set to be used to compute
an out-of-bag estimate of the improvement as the number of trees
increases. Default is .5.
CVSTRAT specifies for cross validation whether it should be stratified
by class. Default is yes.
MISSING specifies inclusion or exclusion of user missing values. Default
is exclude.
MODELFILE specifies a file for saving the estimation results.
WORKSPACE specifies whether or not to keep the workspace contents
in memory. The saved model can be used to make predictions for
new data with the STATS GBMPRED command.
MARGINALPLOTS specifies up to three independent variable whose
marginal effect (after integrating out other variables) is plotted.
MARGINALPLOTCOUNT specifies the number of independent variables to
plot up to three. The first n variables in the independent list
are plotted. You can specify either of these keywords but not
both.
BOOSTPLOT specifies a plot of the error against the number
of iterations. The calculation is based on the BOOSTPLOTMETHOD.
That can be OOB (out of bag), TEST (holdout sample), or
CV (cross validation). OOB is always valid but is said
to be less accurate than the other two. The default is OOB.
RELIMP specifies whether to display and plot the relative
importance of the independent variables.
/HELP displays this text and does nothing else.
'
library(gbm)
# override for api to account for extra parameter in V19 and beyond
StartProcedure <- function(procname, omsid) {
if (substr(spsspkg.GetSPSSVersion(),1, 2) >= 19) {
spsspkg.StartProcedure(procname, omsid)
}
else {
spsspkg.StartProcedure(omsid)
}
}
# monkey patching lattice to print
xyplot <- function(...) {
print(lattice::xyplot(...))
return(NULL)
}
levelplot <- function(...) {
print(lattice::levelplot(...))
return(NULL)
}
stripplot <- function(...) {
print(lattice::stripplot(...))
return(NULL)
}
# There is a bug in gbm in that it does not always wrap calls to lattice functions
# in print, and lattice charts do not print by default. So we monkeypatch the
# relevant lattice functions by wrapping in the functions above to force printing
# With R2.12 and the corresponding gbm package, some of these names do not
# occur, so we ignore any error here.
library(lattice)
tryCatch({
unlockBinding(sym="xyplot", env=parent.env(environment(gbm)))
assign("xyplot", xyplot, envir= parent.env(environment(gbm)))
unlockBinding(sym="levelplot", env=parent.env(environment(gbm)))
assign("levelplot", levelplot, envir= parent.env(environment(gbm)))
unlockBinding(sym="stripplot", env=parent.env(environment(gbm)))
assign("stripplot", stripplot, envir= parent.env(environment(gbm)))
}, error=function(e) {return})
doGbm <- function(distribution, dep, indep, interactions=1, offset=NULL, alpha=NULL,
tdf=NULL, monotone=NULL, ntrees=100, cvfolds=0, shrinkage=.001,
minnodesize=10, bagfrac=.5, trainfrac=1., cvstrat=TRUE, marginalplots=NULL,
marginalplotcount=NULL, modelfile=NULL, missingvalues="exclude",
boostplot=FALSE, boostplotmethod="oob",
relimp=TRUE, workspace="clear") {
setuplocalization("STATS_GBM")
tryCatch(library(gbm), error=function(e){
stop(gtxtf("The R %s package is required but could not be loaded.","gbm"),call.=FALSE)
}
)
indlen = length(indep)
dist = distribution
inputdistribution = distribution
if (distribution == "tdist") {
if(is.null(tdf))
stop(gtxt("Degrees of freedom must be specified for the t distribution"),
call.=FALSE)
dist = list(name=distribution, df=tdf)
distribution = sprintf("%s, d.f.=%d", distribution, tdf)
} else if (distribution == "quantile") {
if(is.null(alpha))
stop(gtxt("Quantile value must be specified for quantile distribution"),
call.=FALSE)
dist = list(name=distribution, alpha=alpha)
distribution = sprintf("%s, quantile=%.3f", distribution, alpha)
}
if (!is.null(monotone)) {
if (length(monotone) != indlen)
stop(gtxt("The number of monotone specifications is different from the number of
independent variables"), call.=FALSE)
# map monotone syntax values to 1, -1, 0 form
monotone = unlist(monotone)
monotoneInts = unlist(c(p=1, n=-1, z=0)[monotone], use.names=FALSE)
} else {
monotoneInts = NULL
monotone = gtxt("Unspecified")
}
if (!is.null(marginalplots) && !is.null(marginalplotcount))
stop(gtxt("Cannot specify both marginalplots list and marginalplotcount"), call.=FALSE)
# for marginalplotcount, select up to the number of independent variables up to 3
if (!is.null(marginalplotcount) && marginalplotcount > 0)
marginalplots = indep[1:min(length(indep), marginalplotcount)]
if (length(marginalplots) > 3)
stop(gtxt("No more than three variables can be specified to plot"),
call.=FALSE)
if (length(intersect(indep, marginalplots)) != length(marginalplots))
stop(gtxt("All variables listed for plots must also appear as independent variables"),
call.=FALSE)
allvars = c(dep, indep)
model = paste(indep, collapse="+")
if (!is.null(offset)) {
model = paste("offset(", offset, ")+", model, collapse="")
allvars = c(allvars, offset)
}
model = paste(dep, model, sep="~")
keepUserMissing = ifelse(missingvalues == "exclude", FALSE, TRUE)
dta <- spssdata.GetDataFromSPSS(allvars, missingValueToNA = TRUE,
keepUserMissing=keepUserMissing, factorMode = "levels")
if (distribution == "multinomial" && !is.factor(dta[[1]]))
stop(gtxt("Dependent variable must be categorical for the multinomial distribution"),
call.=FALSE)
if (distribution == "bernoulli" && is.factor(dta[[1]]))
stop(gtxt("Dependent variable must be scale measurement level for the Bernoulli distribution"),
call.=FALSE)
predel = nrow(dta)
# remove cases where dependent variable is missing
dta = dta[complete.cases(dta[,1]),]
casesdel = predel - nrow(dta)
if (nrow(dta) == 0)
stop(gtxt("All cases are missing for the dependent variable", call.=FALSE))
# gbm will complain if class.stratify.cv is supplied for noncategorical model :-)
# avoiding do.call here in order to avoiding making another copy of the data
if (inputdistribution %in% c("bernoulli", "multinomial")) {
res = tryCatch(gbm(distribution=dist, formula=as.formula(model), data=dta,
var.monotone=monotoneInts, n.trees = ntrees, interaction.depth=interactions,
n.minobsinnode=minnodesize, shrinkage=shrinkage,
bag.fraction=bagfrac, train.fraction = trainfrac,
cv.folds = cvfolds, keep.data=FALSE, verbose=FALSE, class.stratify.cv = cvstrat),
error=function(e) {stop(e$message, call.=FALSE)})
}
else {
res = tryCatch(gbm(distribution=dist, formula=as.formula(model), data=dta,
var.monotone=monotoneInts, n.trees = ntrees, interaction.depth=interactions,
n.minobsinnode=minnodesize, shrinkage=shrinkage,
bag.fraction=bagfrac, train.fraction = trainfrac,
cv.folds = cvfolds, keep.data=FALSE, verbose=FALSE),
error=function(e) {stop(e$message, call.=FALSE)})
}
summarylabels=c(gtxt("Distribution"),
gtxt("Dependent Variable"),
gtxt("Independent Variables"),
gtxt("Interaction Level"),
gtxt("Offset"),
gtxt("Monotone Specification"),
gtxt("Number of Trees"),
gtxt("Shrinkage"),
gtxt("Cross-Validation Folds"),
gtxt("Minimum Node Size"),
gtxt("Bag Fraction"),
gtxt("Training Fraction"),
gtxt("Cases used for Fitting"),
gtxt("Cross Validation Stratified by Class"),
gtxt("User Missing Values"),
gtxt("Cases Discarded Due to System Missing Data"),
gtxt("Save Model As File"),
gtxt("Date Fit")
)
if (!(inputdistribution %in% c("bernoulli", "multinomial"))) {
classstrat = gtxt("NA")
} else {
classstrat = ifelse(cvstrat, gtxt("Yes"), gtxt("No"))
}
if (is.null(modelfile)) {
modelfile = gtxt("Not saved")
}
summaryvalues = c(distribution,
dep,
paste(indep, collapse=" "),
interactions,
ifelse(is.null(offset), "--None--", offset),
paste(monotone, collapse=" "),
res$n.trees,
res$shrinkage,
res$cv.folds,
res$n.minobsinnode,
res$bag.fraction,
res$train.fraction,
res$nTrain,
classstrat,
missingvalues,
casesdel,
modelfile,
as.character(Sys.time())
)
names(summaryvalues) = summarylabels
settingsdf = data.frame(cbind(summaryvalues))
colnames(settingsdf) = gtxt("Values")
StartProcedure(gtxt("Generalized Boosted Regression"), "STATSGBM")
spsspivottable.Display(settingsdf,
title = gtxt("Settings"),
templateName = "GBMSUMMARY",
outline=gtxt("Summary"),
caption = gtxt("Results calculated by the R gbm procedure")
)
if (relimp) {
relimpdf = tryCatch(summary.gbm(res, main=gtxt("Variable Relative Importance")),
error = function(e) {
print(gtxt("Unable to plot relative importance"))
return(tryCatch(summary.gbm(res, plotit=FALSE)))
}
)
relimpnames = relimpdf[,"var"]
relimpdf = relimpdf['rel.inf']
row.names(relimpdf) = relimpnames
names(relimpdf) = gtxt("Relative Influence")
spsspivottable.Display(relimpdf,
title = gtxt("Variable Relative Importance"),
templateName = "GBMRELIMP",
outline = gtxt("Relative Importance"),
caption = gtxt("Importance normalized to sum to 100")
)
}
if (length(marginalplots) > 0) {
title = paste(gtxt("Marginal Effects of Variables"),
paste(marginalplots, collapse=" "),collapse="")
tryCatch(plot(res, i.var=unlist(marginalplots), main=title),
error = function(e) {
print(gtxt("Marginal Plots"))
print(e)})
}
bestiter = NULL
if (boostplot) {
if (boostplotmethod == "oob")
boostplotmethod = toupper(boostplotmethod)
# No access to title for this plot
bestiter = tryCatch(gbm.perf(res, oobag.curve=TRUE, method=boostplotmethod),
error = function(e) {print(e)})
if (is.numeric(bestiter)) { # could be an error message
spsspivottable.Display(data.frame("Value"=bestiter, row.names=gtxt("Best Iteration")),
title=gtxt("Best Number of Iterations"),
templateName="GBMBESTITER", caption=sprintf(gtxt("Method:%s"), boostplotmethod))
} else {
bestiter = NULL
}
}
# save model results for future use in scoring
modelproperties = list()
vdict = spssdictionary.GetDictionaryFromSPSS()
# save dictionary entry (column)Q for dependent variable
modelproperties["depvar"] = vdict[match(dep, vdict["varName",])]
modelproperties["offset"] = ifelse(is.null(offset), "<None>", offset)
modelproperties["missingvalues"] = keepUserMissing
modelproperties["bestiter"] = bestiter
if (!is.null(modelfile)) {
save(res, settingsdf, modelproperties, file=modelfile, precheck=FALSE)
}
# clean up workspace, keeping only what is necessary for predictions
# if workspace == "retain".
# The variables are put in the global environment so that they will be retained
if (workspace == "clear") {
tryCatch(rm(list=ls()), warning = function(e) {return(NULL)})
} else {
rm(list = setdiff(ls(), list("res", "settingsdf", "modelproperties")))
assign("res", res, envir=.GlobalEnv)
assign("settingsdf", settingsdf, envir=.GlobalEnv)
assign("modelproperties", modelproperties, envir=.GlobalEnv)
}
spsspkg.EndProcedure()
}
# localization initialization
setuplocalization = function(domain) {
# find and bind translation file names
# domain is the root name of the extension command .R file, e.g., "SPSSINC_BREUSCH_PAGAN"
# This would be bound to root location/SPSSINC_BREUSCH_PAGAN/lang
fpath = Find(file.exists, file.path(.libPaths(), paste(domain, ".R", sep="")))
bindtextdomain(domain, file.path(dirname(fpath), domain, "lang"))
}
gtxt <- function(...) {
return(gettext(...,domain="STATS_GBM"))
}
gtxtf <- function(...) {
return(gettextf(...,domain="STATS_GBM"))
}
Run <- function(args) {
#Execute the STATS GBM extension command
cmdname = args[[1]]
args = args[[2]]
oobj = spsspkg.Syntax(list(
spsspkg.Template("DISTRIBUTION", subc="", ktype="str", var="distribution",
vallist=list("gaussian","laplace","tdist","bernoulli","huberized",
"multinomial", "adaboost", "poisson", "coxph",
"quantile")),
spsspkg.Template("DEPENDENT", subc="", ktype="existingvarlist", var="dep"),
spsspkg.Template("INDEPENDENT", subc="", ktype="existingvarlist", var="indep", islist=TRUE),
spsspkg.Template("INTERACTIONS", subc="", ktype="int", var="interactions",
vallist=list(1)),
spsspkg.Template("OFFSET", subc="", ktype="existingvarlist", var="offset"),
spsspkg.Template("ALPHA", subc="", ktype="float", var="alpha", vallist=list(0,1.)),
spsspkg.Template("TDF", subc="", ktype="int", var="tdf", vallist=list(1)),
spsspkg.Template("MONOTONE", subc="", ktype="str", var="monotone",
vallist=list("p", "n", "z"), islist=TRUE),
spsspkg.Template("NTREES", subc="OPTIONS", ktype="int", var="ntrees", vallist=list(1)),
spsspkg.Template("CVFOLDS", subc="OPTIONS", ktype="int", var="cvfolds", vallist=list(0)),
spsspkg.Template("SHRINKAGE", subc="OPTIONS", ktype="float", var="shrinkage",
vallist=list(0,1.)),
spsspkg.Template("MINNODESIZE", subc="OPTIONS", ktype="int", var="minnodesize",
vallist=list(1)),
spsspkg.Template("BAGFRAC", subc="OPTIONS", ktype="float", var="bagfrac",
vallist=list(0,1.)),
spsspkg.Template("TRAINFRAC", subc="OPTIONS", ktype="float", var="trainfrac",
vallist=list(0,1.)),
spsspkg.Template("CVSTRAT", subc="OPTIONS", ktype="bool", var="cvstrat"),
spsspkg.Template("MISSING", subc="OPTIONS", ktype="str", var="missingvalues",
vallist=list("include","exclude")),
spsspkg.Template("MODELFILE", subc="SAVE", ktype="literal", var="modelfile"),
spsspkg.Template("WORKSPACE", subc="SAVE", ktype="str", var="workspace",
vallist = list("retain", "clear")),
spsspkg.Template("MARGINALPLOTS", subc="OUTPUT", ktype="existingvarlist",
var="marginalplots", islist=TRUE),
spsspkg.Template("MARGINALPLOTCOUNT", subc="OUTPUT", ktype="int",
var="marginalplotcount", vallist=list(0,3)),
spsspkg.Template("BOOSTPLOT", subc="OUTPUT", ktype="bool", var="boostplot"),
spsspkg.Template("BOOSTPLOTMETHOD", subc="OUTPUT", ktype="str", var="boostplotmethod",
vallist=list("oob", "test", "cv")),
spsspkg.Template("RELIMP", subc="OUTPUT", ktype="bool", var="relimp"),
spsspkg.Template("HELP", subc="", ktype="bool")
))
# A HELP subcommand overrides all else
if ("HELP" %in% attr(args,"names")) {
#writeLines(helptext)
helper(cmdname)
}
else {
res <- spsspkg.processcmd(oobj, args, "doGbm")
}
}
helper = function(cmdname) {
# find the html help file and display in the default browser
# cmdname may have blanks that need to be converted to _ to match the file
fn = gsub(" ", "_", cmdname, fixed=TRUE)
thefile = Find(file.exists, file.path(.libPaths(), fn, "markdown.html"))
if (is.null(thefile)) {
print("Help file not found")
} else {
browseURL(paste("file://", thefile, sep=""))
}
}
if (exists("spsspkg.helper")) {
assign("helper", spsspkg.helper)
} |
3004bb840c037b5255aa661c0d69c3a100240ddb | 476171664236ce5a9173ec11dd06a66187d22ee0 | /man/isolate_sphingoids.Rd | 3d9094fd72f6017b63b59e520506b6b87ead5361 | [] | no_license | michaelwitting/lipidomicsUtils | d03ad4a12f58b7a3728baa4d13e3a25422690dd8 | b818a01bc7ecfc9901cb8ebec381c3c3da444835 | refs/heads/master | 2021-06-13T06:17:53.352351 | 2019-11-06T15:26:43 | 2019-11-06T15:26:43 | 187,207,427 | 2 | 1 | null | 2019-11-06T15:26:44 | 2019-05-17T11:49:28 | R | UTF-8 | R | false | true | 1,074 | rd | isolate_sphingoids.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Parser_sphingoid_base.R
\name{isolate_sphingoids}
\alias{isolate_sphingoids}
\title{Get all sphingoid bases
This functions isolates all sphingoid bases from a given lipid shorthand notation and returns them as vector. upported modifications are currently hydroxy groups (OH), hydroperoxy groups (OOH), keto groups (O) and amino groups (NH2)}
\usage{
isolate_sphingoids(lipids)
}
\arguments{
\item{lipid}{Vector or list of shorthand notations, e.g. c("Cer(d16:1(4E,1OH,3OH,15Me)/22:0)", "HexCer(d18:1/22:0)")}
}
\description{
Get all sphingoid bases
This functions isolates all sphingoid bases from a given lipid shorthand notation and returns them as vector. upported modifications are currently hydroxy groups (OH), hydroperoxy groups (OOH), keto groups (O) and amino groups (NH2)
}
\examples{
library(lipidomicsUtils)
lipids <- c("Cer(d16:1(4E,1OH,3OH,15Me)/22:0)", "HexCer(d18:1/22:0)")
isolate_sphingoids(lipids)
}
\author{
Michael Witting, \email{michael.witting@helmholtz-muenchen.de}
}
|
3f4a08d856dacc5d858351c8bf3d693471ce9954 | b39d03b53c47f9e5b7c6e9b251e9996d4ca48450 | /src/jag/generate_single_qq.R | e785da9729f0556f5c6d948b2577419e032d09d4 | [] | no_license | dposthuma/jag | 147e9751868cf22a16975c762f00def5486be873 | 2159719b1a528c94d992b02ce61d7835a2be8d8b | refs/heads/master | 2021-01-18T13:59:42.191489 | 2012-07-17T15:29:55 | 2012-07-17T15:29:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 676 | r | generate_single_qq.R |
generate_qq <- function(pvalues, group, filename, adjusted)
{
pdf.title <- paste(filename)
pdf(file=pdf.title)
plot_qq(pvalues,group)
dev.off()
}
#read all arguments given by python
#DO NOT REMOVE!
args=(commandArgs(TRUE))
if(length(args)==0){
print("No arguments supplied.")
}else{
for(i in 1:length(args)){
eval(parse(text=args[[i]]))
}
}
#loading R file wit all functions
source(paste(r_path,"global_functions.R",sep=""))
input <- read.delim(assocfile, sep = '', header = TRUE, colClasses = "character")
if(adjusted == 'True')
{
pvalues <- input$GC
}
if(adjusted == 'False')
{
pvalues <- input$P
}
generate_qq(pvalues, header, filename)
|
037b8ecb9b314bc39da12945d103604f34c39483 | d87946647dbd59cd48674cc31d9f835162fb053e | /machine_learning_algorithm/em_document_clustering.r | 6edcf85c94a54be81e797caa2819cba93b575aab | [] | no_license | TomSteve1102/HelloWorld | 77ec9478ff5253c28338cc32968f5b367de1cde7 | f03c551d5478fdc99a9e94b3d0eb7302be98f8e3 | refs/heads/master | 2023-09-03T11:26:10.171969 | 2021-10-28T21:31:05 | 2021-10-28T21:31:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,010 | r | em_document_clustering.r | library(dplyr)
library(tm)
eps=1e-10
# reading the data
read.data <- function(file.name, sample.size=1000, seed=100, pre.proc=TRUE, spr.ratio= 0.90) {
# Read the data
text <- readLines(file.name)
# select a subset of data if sample.size > 0
if (sample.size>0){
set.seed(seed)
text <- text[sample(length(text), sample.size)]
}
## the terms before the first '\t' are the lables (the newsgroup names) and all the remaining text after '\t' are the actual documents
docs <- strsplit(text, '\t')
# store the labels for evaluation
labels <- unlist(lapply(docs, function(x) x[1]))
# store the unlabeled texts
# docs <- data.frame(unlist(lapply(docs, function(x) x[2])))
uid <- paste0("doc_", formatC(1:length(text), width = 4, format = "d", flag = "0"))
docs <- data.frame(doc_id = uid, text = unlist(lapply(docs, function(x) x[2])))
# create a corpus
docs <- DataframeSource(docs)
corp <- Corpus(docs)
# Preprocessing:
if (pre.proc){
corp <- tm_map(corp, removeWords, stopwords("english")) # remove stop words (the most common word in a language that can be find in any document)
corp <- tm_map(corp, removePunctuation) # remove pnctuation
corp <- tm_map(corp, stemDocument) # perform stemming (reducing inflected and derived words to their root form)
corp <- tm_map(corp, removeNumbers) # remove all numbers
corp <- tm_map(corp, stripWhitespace) # remove redundant spaces
}
# Create a matrix which its rows are the documents and colomns are the words.
dtm <- DocumentTermMatrix(corp)
## reduce the sparcity of out dtm
dtm <- removeSparseTerms(dtm, spr.ratio)
## convert dtm to a matrix
word.doc.mat <- as.matrix(dtm)
# Return the result
return (list("docs" = docs, "word.doc.mat"= word.doc.mat, "labels" = labels))
}
logSum <- function(v) {
m = max(v)
return (m + log(sum(exp(v-m))))
}
initial.param <- function(vocab_size, K=4, seed=123456){
rho <- matrix(1/K,nrow = K, ncol=1) # assume all clusters have the same size (we will update this later on)
mu <- matrix(runif(K*vocab_size),nrow = K, ncol = vocab_size) # initiate Mu
mu <- prop.table(mu, margin = 2) # normalization to ensure that sum of each row is 1
return (list("rho" = rho, "mu"= mu))
}
train_obj <- function(model, counts) {
N <- dim(counts)[1] # number of documents
K <- dim(model$mu)[1]
nloglike = 0
for (n in 1:N){
lprob <- matrix(0,ncol = 1, nrow=K)
for (k in 1:K){
lprob[k,1] = sum(counts[n,] * log(model$mu[k,] + eps))
}
nloglike <- nloglike - logSum(lprob + log(model$rho))
}
return (nloglike)
}
cluster.viz <- function(doc.word.mat, color.vector, title=' '){
p.comp <- prcomp(doc.word.mat, scale. = TRUE, center = TRUE)
plot(p.comp$x, col=color.vector, pch=1, main=title)
}
E.step <- function(gamma, model, counts, mode = "soft"){
# Model Parameter Setting
N <- dim(counts)[1] # number of documents
K <- dim(model$mu)[1]
# E step:
for (n in 1:N){
for (k in 1:K){
## calculate the posterior based on the estimated mu and rho in the "log space"
gamma[n,k] <- log(model$rho[k,1] + eps) + sum(counts[n,] * log(model$mu[k,] + eps))
}
# normalisation to sum to 1 in the log space
logZ = logSum(gamma[n,])
gamma[n,] = gamma[n,] - logZ
}
# converting back from the log space
gamma <- exp(gamma)
# implement the hard E step
if (mode == "hard") {
max_ind <- gamma == apply(gamma, 1, max)
gamma[max_ind] <- 1 - (K - 1) * eps
gamma[!max_ind] <- eps
}
return (gamma)
}
M.step <- function(gamma, model, counts){
# Model Parameter Setting
N <- dim(counts)[1] # number of documents
W <- dim(counts)[2] # number of words i.e. vocabulary size
K <- dim(model$mu)[1] # number of clusters
# M step: Student needs to write this part for soft/hard EM
# calculate rho(Aka. phi)(N_k for all clusters)
model$rho <- matrix(colSums(gamma), ncol=1) / N
for (k in 1:K) {
# calculate mu
model$mu[k,] <- ((gamma[,k] * counts) %>% colSums) / ((gamma[,k] * counts) %>% sum)
}
# Return the result
return (model)
}
EM <- function(counts, K=4, max.epoch=10, seed=123456, mode = "soft"){
# Model Parameter Setting
N <- dim(counts)[1] # number of documents
W <- dim(counts)[2] # number of unique words (in all documents)
# Initialization
model <- initial.param(W, K=K, seed=seed)
gamma <- matrix(0, nrow = N, ncol = K)
print(train_obj(model,counts))
# Build the model
for(epoch in 1:max.epoch){
# E Step
gamma <- E.step(gamma, model, counts, mode)
# M Step
model <- M.step(gamma, model, counts)
print(train_obj(model,counts))
}
# Return Model
return(list("model"=model,"gamma"=gamma))
}
EM.main = function(mode, K, epochs=5, seed = 5201, real.label.visualize = FALSE) {
set.seed(seed)
data <- read.data(file.name='train6.txt', sample.size=0, seed=seed, pre.proc=TRUE, spr.ratio= .99)
# word-document frequency matrix
counts <- data$word.doc.mat
# calling the EM algorithm on the data
res <- EM(counts, K=K, max.epoch=epochs, mode = mode, seed = seed)
# visualization
## find the culster with the maximum probability (since we have soft assignment here)
label.hat <- apply(res$gamma, 1, which.max)
## normalize the count matrix for better visualization
counts <- counts %>% t %>% scale %>% t # only use when the dimensionality of the data (number of words) is large enough
## visualize the stimated clusters
cluster.viz(counts, label.hat, paste0('Estimated Clusters (', mode,' EM)'))
## visualize the real clusters
if (real.label.visualize) cluster.viz(counts, factor(data$label), 'Real Clusters')
}
EM.main("soft", 4, epochs=10, seed = NULL, real.label.visualize = FALSE)
EM.main("hard", 4, epochs=10, seed = NULL, real.label.visualize = TRUE)
|
79201ca422ab6381707a79cf42b2f51e710c668e | 1df696ce96d96c3207b6e455449934efaeffb695 | /cachematrix.R | 84504cb42e0886eae8c8c2f5315d56ccb14a9e85 | [] | no_license | willstanleyus/datasciencecoursera | c30086c42c8f21b63f56aa843d5f26342f83df27 | c45a667e8fd8da4883f7822196903682b2cc5e6f | refs/heads/master | 2022-08-05T02:37:19.141247 | 2020-05-26T12:45:55 | 2020-05-26T12:45:55 | 260,250,201 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 839 | r | cachematrix.R | #The first function, makeCacheMatrix creates a special "matrix"
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
# set the value of the matrix
set <- function(y) {
x <<- y
m <<- NULL
}
# get the value of the matrix
get <- function() x
# set the value of the inverse
setInverse <- function(inverse) m <<- inverse
# get the value of the mean
getInverse <- function() j
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
# The following function calculates the inverse of the special "matrix" created with the above function.
cacheSolve <- function(x, ...) {
# Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
mat <- x$get()
m <- mean(mat, ...)
x$setmean(m)
m
}
|
f6087c5e2c44da6468b957698600dda270414bfb | a055c25d1865cd24f45819d53e6315d03ac24a4a | /Poly Regression - 1.R | 43b80a4818e2323040ce9dd213bb06dc2535e309 | [] | no_license | naazsheena/Machine-Learning-Rep | ad3fb31d7bcfa7f014cc0c04c973b51281b09064 | 42e227454fceb715489f0d3c25ce5505d2c9a1e9 | refs/heads/master | 2020-04-08T10:11:06.422277 | 2018-11-27T01:37:24 | 2018-11-27T01:37:24 | 159,257,964 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,152 | r | Poly Regression - 1.R | #This code is for polynomial regression
Input = ("
Length Clutch
284 3
290 2
290 7
290 7
298 11
299 12
302 10
306 8
306 8
309 9
310 10
311 13
317 7
317 9
320 6
323 13
334 2
334 8
")
Data = read.table(textConnection(Input), header = TRUE)
Data$Length = as.numeric(Data$Length)
y_linear = lm(Clutch~Length, data = Data)
y_linear
summary(y_linear)
length2 = Data$Length * Data$Length
length2
y2_linear = lm(Clutch~Length + length2, data = Data)
summary(y2_linear)
length3 = length2 * Data$Length
y3_linear = lm(Clutch~Length + length2+ length3, data = Data)
summary(y3_linear)
summary(y3_linear)$r.squared
y4_linear = lm(Clutch~Length + length2+ length3 +Data$Length^4, data = Data)
summary(y4_linear)
y5_linear = lm(Clutch~Data$Length + length2+ length3 +Data$Length^4 + Data$Length^5, data = Data)
summary(y5_linear)
len2 = Data$Length * Data$Length
len2
len2_1 = Data$Length^2
len2_1
y2_linear_1 = lm(Clutch~ Data$Length + length2+ length3 +Data$Length^4 + Data$Length^5, data = Data)
|
a2d907235a57e4adc4a759f4f4e1478f76cab3ab | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RSDA/examples/sym.mds.Rd.R | a301fc9d556662d3483941d23d3a692e68991ed0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 399 | r | sym.mds.Rd.R | library(RSDA)
### Name: sym.mds
### Title: Symbolic Multidemensional Scaling
### Aliases: sym.mds
### Keywords: MDS Symbolic
### ** Examples
## Not run:
##D data(oils)
##D res<-sym.mds(oils)
##D plot(res,pch = 23, bg = 'red', xlab = 'Score 1', ylab = 'Score 2')
##D res<-sym.mds(oils,distance='centers')
##D plot(res,pch = 23, bg = 'red', xlab = 'Score 1', ylab = 'Score 2')
## End(Not run)
|
2fd07239b8e37ac2f97a01dd2ead39d3a547701e | ff420b886e7d2883fe882b866b3bfe802babccea | /1 - Data Preprocessing/data_precess.R | f397932224f88aa35eaeb670c8e72d0370d01185 | [] | no_license | vlad-danaila/machine-learning-workout | 391e5dd98c61f26c3618b01c01577ddceace9bfb | aeda66b71af6bd3a9b8c5a23de641e5fc3b57141 | refs/heads/master | 2020-07-04T14:26:50.239956 | 2019-11-03T00:15:48 | 2019-11-03T00:15:48 | 202,311,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 928 | r | data_precess.R | dataset = read.csv("C:\\DOC\\Workspace\\Machine Learning A-Z Template Folder\\Part 1 - Data Preprocessing\\Data.csv")
# Missing data
fill_missing_data = function(features) {
mean = mean(features, na.rm = TRUE)
ifelse(is.na(features), mean, features)
}
dataset$Age = fill_missing_data(dataset$Age)
dataset$Salary = fill_missing_data(dataset$Salary)
# Categorical data
make_factors = function(features) {
levels_features = levels(features)
factor(features, levels = levels_features, labels = seq(length(levels_features)))
}
dataset$Country = make_factors(dataset$Country)
dataset$Purchased = factor(dataset$Purchased, levels = c('No', 'Yes'), labels = c(0, 1))
# Feature scaling
dataset[, 2:3] = scale(dataset[, 2:3])
# Split
#install.packages('caTools')
library('caTools')
dataset_split = sample.split(dataset$Purchased, SplitRatio = 0.7)
train = subset(dataset, dataset_split)
test = subset(dataset, !dataset_split)
|
0e78006c56ac9870815665484612e42736c9da4e | 652ba47ad0144ab51db6a99f1c8ee1852a4fe341 | /tests/testthat.R | cf7619dbfb0563540c1a13c02cb7c5692cf09896 | [] | no_license | PhoenixTreeember/diffeRenTES | 7d140d6020b9c458cfad0e6d211f7a56c0e9d3db | daada1fce6ad98329ed78efffb5aaeb827f24b2f | refs/heads/master | 2022-01-14T03:34:51.397747 | 2019-06-11T13:49:48 | 2019-06-11T13:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 43 | r | testthat.R | library(testthat)
test_check("diffeRenTES") |
cbb12cae0731c831821348955c6018c3ea8de776 | edce54a4b71011fa65bd50143f03c95c04acb071 | /r/r-social_media_analytic/src/009_wordcloud_text-network.R | a1ec6234efd095c7d30f97ed528493ad1f1651cb | [] | no_license | andrybrew/all-files | 783dcf62386847ddeb94349cc6c3df67750384ab | 44b402f538a0716492faec11f88c89d785a0b5c0 | refs/heads/master | 2020-12-27T11:05:05.131460 | 2020-04-05T03:23:45 | 2020-04-05T03:23:45 | 237,879,830 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,321 | r | 009_wordcloud_text-network.R | # Import Library
library(readr)
library(e1071)
library(tm)
library(wordcloud)
library(RColorBrewer)
library(igraph)
# Import Data
df <- read_csv("data/processed/berita-batubara.csv")
# Create a Wordcloud Function
wc <- function(df) {
corpus <- Corpus(VectorSource(df$text))
corpus <- tm_map(corpus, removeWords, c(
"sdh", "the", "krn", "nya", "udh", "gua", "ttg",
"liat", "gimana", "cari", "iya", "tolong", "isi", "deh", "gara", "tujuan", "makan",
"bbrp", "dengan", "yuk", "kau", "dgn", "makanan", "kqf", "cek", "gak", "via",
"nak", "dpt", "mknn", "with", "pic", "cod", "dri", "hub", "samping", "pin", "ready", "dijamin", "terbukti",
"best", "hrga", "isu", "bahas", "menemukan", "rilis", "kto", "sms", "our", "kat", "hai",
"wajahmu", "takyah", "relay", "fda", "komedo", "bcc", "tga", "chitin", "atasi", "bitly",
"pengen", "diikuti", "jiexpo", "sedia", "arealampung", "areajakarta", "googlovku",
"idealkanlah", "bihalal", "diada", "bitlyjfysvi", "bitlylkwtt", "bitlyjfchou",
"bitlylbvun", "bitlyyxqwio", "partymsh", "soon", "salamcoming", "and", "sent", "booth", "areasumut",
"gemilangmalangorgberitabupati", "gemilangmalangorgberitahalalb", "hadirihalalbihalalpgri ",
"ihalalkeluargabesardinkes", "hadirihalalbihalalkmbkecamatankepanjen", "hadirisilaturahmidanhalalbihalaldismpsmkislamdonomulyo",
"ihalalhutrikesimbolsemangatnasionalisme", "pencarian", "dilirik", "ajak", "smswa", "terpercaya",
"termurah", "apli", "pincb", "fdatga", "aee", "pinbf", "berminyak", " pinaaa", "callsms", "pinff",
"mengencangkan", "peninggibadanherbalwebid", "flickrcomphotos", "obatpeninggibadanasia", "pindbb ", "sis", "ampuh", "berbpom", "abc", "loh", "only",
"jgn", "pinbdefb", "pictwittercomcvcggdi ", "pictwittercomaqnqxkvpd ", "pinecb", "bitlyphpwtc", "bed", "hadiahnya", "jepangkoreajepangtaiwanchinese",
"alhamdulillah", "worry", "promosikan", "menghilangkan ", "more", "busui", "tinyurlcomonj", "bitlywoik", "bitlywojoe", "luncurkan", "apnih", "dtozym",
"gaesaman", "solusinya", "idealkan", "pinfbbd", "dicari", "mimin", "agen", "oncomcontentviewarticleidamoscozyhotelconventionhalljakartagelarhalalbihalalcatiditemid oncomcontentviewarticleidamoscozyhotelconventionhalljakartagelarhalalbihalalcatiditemid
travelxposecomindexphpopti", "sertifikasi", "bitlybsplx", "oncomcontentviewarticleidamoscozyhotelconventionhalljakartagelarhalalbihalalcatiditemid 35
travelxposecomindexphpopti", "oncomcontentviewarticleidamoscozyhotelconventionhalljakartagelarhalalbihalalcatiditemid", "travelxposecomindexphpopti", "pilih", "detikcom", "republikacoidberitagayahi", "lifevivacoidnewsread", "twittercomberitasatusta",
"gosumbarcomberitabaca", "pustakalewinetmodberitaid", "bitlyrbwlb", "astroawanicomberitamalaysi", "khazanahrepublikacoidberitaduniai", "goriaucomberitapemerin", "republikacoidberitaekonomi", "antaranewscomberita", "bitlyxsnjl", "republikacoidberitaduniai",
"bitlylnth", "jtan", "all", "butuh", "pictwittercomndbndtzx ",
"bilang", "trmsk", "inc", "pictwittercomndbnvur", "pictwittercomwglqgxquc", "pictwittercomwglqgprma",
"maksimalkan", "pinde", "mengganggu", "pictwittercomaqnqxkvpd", "pictwittercomndbndtzx", "pictwittercomcvcggdi", "stp", "info", "klik", "friendly", "gritc", "for", "tinyurlcomybxccqb", "republikaonline", "wisatacehcomalasan", "kotabandaacehjadiwisatahtml", "bitlyspkok", "villadilembangcoid",
"nasionalnewsvivacoidnewsread", "sikit", "raya", "pustakalewicommodberitaid", "feedproxygooglecomrislamposmed", "pictwittercombbcqckl", "memintamintahrbukhari", "twittercomnasibakarjkt", "pictwittercomprhxrewy"
))
DTM <- TermDocumentMatrix(corpus, control = list(weighting = weightTfIdf))
DTM <- removeSparseTerms(DTM, 0.98)
mat <- as.matrix(DTM)
f <- sort(rowSums(mat), decreasing = TRUE)
dat <- data.frame(word = names(f), freq = f)
wc <- wordcloud(
words = dat$word,
freq = dat$freq,
random.order = FALSE,
rot.per = 0.30,
colors = brewer.pal(8, "Dark2")
)
q <- mat %*% t(mat)
g <- graph.adjacency(q, weighted = T, mode = "undirected")
g <- simplify(g)
V(g)$label <- V(g)$name
V(g)$degree <- degree(g)
layout <- layout_in_circle(g)
plot(g, layout = layout)
}
# Apply to Dataset
wc(df)
|
cb2e11d6cee962e50092d1bbbada9b5dab1dadd0 | 5949335f6c11c836a343139d79847d1e1fdbd3f9 | /R/plots_qrun.R | 3115c59fd2f1ea505b526c2ec4ea24997818fe71 | [] | no_license | alexgenin/econetmodel | 2402a61a7e0bdecdbc584bb718c0f37ce6a9b466 | e715174971c78fc72d8f7bd88f5098394025fa15 | refs/heads/master | 2021-03-27T16:43:39.454549 | 2019-04-08T23:28:18 | 2019-04-08T23:28:18 | 30,194,706 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,438 | r | plots_qrun.R | #
# Quicly run/format/plot results of a system preset
#
qplotsys <- function(dat) {
plot.obj <- ggplot(dat) +
geom_line(aes(time, ab, color=sp, group=id))
return(plot.obj)
}
# Select relevant time ranges
qselect <- function(dat) {
in_range1 <- with(dat, time >= 0 & time <= 300)
in_range2 <- with(dat, time >= 2900 & time <= 3400)
# Range factor variable
dat$ranges <- factor(rep("initial", nrow(dat)),
levels=c("initial", "after_removal"))
dat$ranges[in_range2] <- "after_removal"
dat <- subset(dat, in_range1 | in_range2)
dat
}
# Format data from a quick run
qformat <- function(dat,is.gathered=FALSE) {
if (!is.data.frame(dat)) {
dat <- as.data.frame(dat)
}
if ( ! is.gathered ) {
cols <- colnames(dat)[is_nodecol(colnames(dat), prefix="sp")]
dat <- gather_(dat, "sp", "ab", cols)
}
return(dat)
}
# Quickly runs 10 replicates of a system
qrun <- function(preset,
nruns=10,
...) { # passed to plyr
# Load and compile system
qrun_sys <- preset(...)
system <- compile.system(qrun_sys)
ddply(data.frame(id=seq.int(nruns)), ~ id,
function(id) {
system %>%
alter_system(list(state=runif(get_size(system), 0, 1))) %>%
run %>%
zero_below(1e-10) %>%
cbind(id, .) %>%
adjust_names()
})
}
|
ac5cacd5e5e18eea7da0cb7b4da26972f322ac33 | 933c674278e2b3b8ebc0a90a70ac4fd629ac72e9 | /man/subsets.Rd | c8c3961d3c34c0a3c74854c3280d4b0d6dbbf1c0 | [] | no_license | dkahle/algstat | bc63249b8adca4005016939a6e7db11f5253ee01 | a705514d3a3c592361cd7ee222d1c743ed8808c9 | refs/heads/master | 2023-05-27T22:17:47.006699 | 2023-05-17T17:18:06 | 2023-05-17T17:18:06 | 27,615,285 | 14 | 11 | null | 2022-08-18T13:44:36 | 2014-12-05T23:50:12 | R | UTF-8 | R | false | true | 807 | rd | subsets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subsets.R
\name{subsets}
\alias{subsets}
\title{Compute Subsets}
\usage{
subsets(set, sizes = 1:length(set), include_null = FALSE)
}
\arguments{
\item{set}{the original set}
\item{sizes}{desired size(s) of subsets}
\item{include_null}{should the empty vector be included?}
}
\value{
a list of subsets as vectors
}
\description{
Compute the subsets of a given set.
}
\details{
Note that this algorithm is run in R: it is therefore not
intended to be the most efficient algorithm for computins
subsets.
}
\examples{
subsets(1:3)
subsets(1:3, size = 2)
subsets(1:3, include_null = TRUE)
subsets(c('a','b','c','d'))
subsets(c('a','b','c','d'), include_null = TRUE)
}
\seealso{
\code{\link[utils:combn]{utils::combn()}}
}
|
a8cd2cafd2ae760222500fdb4f577b39e4bd85a7 | 017e1d3c8002e6b0835a97985168d6fb2bb652f0 | /R/gadgets.R | 8a71e52d9d6c5143add49225535c13819f124bcf | [] | no_license | wnk4242/Rcheatsheet | e38baa4b09713c931caaef64eee5505b2b3a17b8 | 70054150c84b00affe6f525ce0f900755dd3e919 | refs/heads/master | 2021-07-26T19:55:23.175155 | 2020-07-03T14:19:32 | 2020-07-03T14:19:32 | 196,735,006 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,084 | r | gadgets.R | ######gadgets##########
#' clear page and start a new prompt
#' @export
exit <- function() {
cat("\014")
cat('See ya!')
}
#' ask if go to the main menu
#' @export
ask <- function(){ #this one is used to ask if the user need more help
cat(bold$cyan('Need more help?'))
switch(menu(c("Sure! Go back to main menu!", "I'm good.")),
mmenu(),
exit())
}
#' ask if go to Environment setup menu
#' @export
ask_setup <- function(){
cat(bold$cyan('Need more help?'))
switch(menu(c("Sure! Go back to the previous menu!", "Sure! Go back to the main menu!", "I'm good.")),
setup(),
mmenu(),
exit())
}
#' ask if go to Data wrangling menu
#' @export
ask_wrangle <- function(){
cat(bold$cyan('Need more help?'))
switch(menu(c("Sure! Go back to the previous menu!", "Sure! Go back to the main menu!", "I'm good.")),
wrangle(),
mmenu(),
exit())
}
#' ask if go to Lifesaving packages menu
#' @export
ask_lifesaver <- function(){
cat(bold$cyan('Need more help?'))
switch(menu(c("Sure! Go back to the previous menu!", "Sure! Go back to the main menu!", "I'm good.")),
lifesaver(),
mmenu(),
exit())
}
#' ask if go to dplyrpkg menu
#' @export
ask_dplyrpkg <- function(){
cat(bold$cyan('Need more help?'))
switch(menu(c("Sure! Go back to the previous menu!", "Sure! Go back to the main menu!", "I'm good.")),
dplyrpkg(),
mmenu(),
exit())
}
#' ask if go to csbuilder
#' @export
ask_csbuilder <- function(){
cat(bold$cyan('Do you need to add more examples?'))
switch(menu(c("Continue adding examples (will not clear console)","Build a new cheat sheet (warning: will clear console!)", "Go back to cheat sheet builders", "Go back to main menu","I'm good.")),
extraeg(),
csbuilder(),
builder(),
mmenu(),
exit())
}
#' ask if go to package update diary menu
#' @export
ask_diary <- function(){
cat(bold$cyan('Need more help?'))
switch(menu(c("Sure! Go back to the previous menu!", "Sure! Go back to the main menu!", "I'm good.")),
diary(),
mmenu(),
exit())
}
#' ask if go to cheat sheet building menu
#'@export
ask_builder <- function(){
cat(bold$cyan('Need more help?'))
switch(menu(c("Sure! Go back to the previous menu!", "Sure! Go back to the main menu!", "I'm good.")),
builder(),
mmenu(),
exit())
}
#' ask if go to Statistical analysis menu
#' currently not using
#'@export
ask_stanalysis <- function(){
cat(bold$cyan('Need more help?'))
switch(menu(c("Sure! Go back to the previous menu!", "Sure! Go back to the main menu!", "I'm good.")),
stanalysis(),
mmenu(),
exit())
}
#' ask if go to APA formatting menu
#' currently not using
#' @export
ask_apastyle <- function(){
cat(bold$cyan('Need more help?'))
switch(menu(c("Sure! Go back to the previous menu!", "Sure! Go back to the main menu!", "I'm good.")),
apastyle(),
mmenu(),
exit())
}
|
16b204a3b463b632be5b533f6010b43308d30abd | 321efc730be43541fc612b7a1fb70ac6085d81c8 | /R Nuts and Bolts/SW Midterm.R | fe6965826e71f7d0fc5788804c2f2d6bf0c06d4c | [] | no_license | Markli25/Midterm_repo | 277c1d7d9cae4c1df63c9fe42cb0c2eb80b4110f | 2c7234faeb5360e5b19b81e9156d4f535d2d5d6f | refs/heads/master | 2020-03-22T05:08:31.904402 | 2018-09-18T12:03:42 | 2018-09-18T12:03:42 | 139,544,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 877 | r | SW Midterm.R | dataset<- read.csv("midetrmseatwork_data.csv")
subset_data <- function(dataset,min=25,max=70){
sub <- ifelse(dataset$Ozone>min & dataset$Temp>max,dataset$Wind,NA)
mean(sub,na.rm = TRUE)
}
subset_data(dataset)
MeanFunction <- function(dataset,Month=9,Day=8){
col_num<-ncol(dataset)
for (element in 1:nrow(dataset)) {
col_num[element]<- ifelse(dataset[element,5]==Month & dataset[element,6]==Day,dataset[element,4],NA)
}
mean(col_num,na.rm = TRUE)
}
MeanFunction(dataset)
MinFunction <- function(dataset,Month){
col_num<-ncol(dataset)
for (element in 1:nrow(dataset)) {
col_num[element]<- ifelse(dataset[element,5]==Month,dataset[element,1],NA)
}
min(col_num,na.rm = TRUE)
}
MinFunction(dataset,5)
MinFunction(dataset,6)
MinFunction(dataset,7)
MinFunction(dataset,8)
MinFunction(dataset,9) |
408ebc86a1cb7e3efaf39db3fcc16d67037f482e | 17599442579623cb1ef00358322170787a8ecc41 | /man/frame_dots.Rd | 2f693849d3f66e13907c52c2eed588da8ce0c63b | [] | no_license | rlugojr/rlang | 792c40f51bfe527810e7e2acfd83193cd14cc669 | 73164435cc3b46069c8f451e78edb28cda1d0c83 | refs/heads/master | 2021-01-13T01:07:59.722058 | 2017-02-22T14:44:45 | 2017-02-22T14:44:45 | 81,410,996 | 0 | 0 | null | 2017-02-09T05:01:54 | 2017-02-09T05:01:54 | null | UTF-8 | R | false | true | 1,047 | rd | frame_dots.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dots.R
\name{frame_dots}
\alias{frame_dots}
\alias{frame_dots_lsp}
\alias{dots}
\alias{dots_lsp}
\title{Extract dots}
\usage{
frame_dots(frame = NULL)
frame_dots_lsp(frame = NULL)
dots(...)
dots_lsp(...)
}
\arguments{
\item{frame}{The environment from which the dots should be
retrieved. Can be a frame, an environment, or a formula from
which to retrieve an environment. If not supplied, the calling
frame is used.}
\item{...}{Arguments to extract. Can be both forwarded dots and
direct arguments.}
}
\description{
\code{frame_dots()} extracts dots from a frame and
\code{dots()} extracts dots from its arguments. The
\code{_lsp()} versions return a pairlist that is ready to be
spliced into a call, while the regular versions return a regular
list that is usually easier to work with.
}
\details{
\code{frame_dots()} and \code{frame_dots_lsp()} never fail, even if
the frame does not contain dots. Instead they return an empty list
or \code{NULL} respectively.
}
|
f460f7c1f0de786087f59afdfd3a9a738fcec065 | f61064bb7d0013f111123206b230482514141d9e | /man/compute_tv_poisbinom_translatedpois.Rd | 5ba8b4ffff61c548204cc1071f6483b930e78bc7 | [] | no_license | nianqiaoju/agents | 6e6cd331d36f0603b9442994e08797effae43fcc | bcdab14b85122a7a0d63838bf38f77666ce882d1 | refs/heads/main | 2023-08-17T05:10:49.800553 | 2021-02-18T23:01:47 | 2021-02-18T23:01:47 | 332,890,396 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 611 | rd | compute_tv_poisbinom_translatedpois.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_tv_poisbinom_translatedpois.R
\name{compute_tv_poisbinom_translatedpois}
\alias{compute_tv_poisbinom_translatedpois}
\title{Compute the TV distance between Poisson-Binomial distribution and its translated Poisson approximation}
\usage{
compute_tv_poisbinom_translatedpois(alpha)
}
\arguments{
\item{alpha}{vector of probabilities for the Poisson-Binomial distribution}
}
\value{
total variation distance
}
\description{
Compute the TV distance between Poisson-Binomial distribution and its translated Poisson approximation
}
|
74b602203e537c570d0e54ca05cd7c995619e317 | 6a8fb893e295d2ab65f7d3cbdb7d7790208742e7 | /scripts/04_Bayesian.r | 07775d171080bc494ee848c23467162329dfc6e5 | [
"MIT"
] | permissive | BiodiversityModelling2021/Francis | c38f52db798709ee44dbe1ce814e12ca2015de3b | c42b58ab78cb88f770d5cb9d75f5a3c12bd2faf2 | refs/heads/main | 2023-07-14T20:49:21.842877 | 2021-08-27T19:11:03 | 2021-08-27T19:11:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,946 | r | 04_Bayesian.r | #### Biodiversity modeling course 2021
## Day 7 - Bayesian approaches
## Grid search
# Generate data
set.seed(1859)
x_obs <- rbinom(10, 6, 0.6)
x_obs
# Make a vector of probabilities for p
p <- seq(from = 0.1, to = 0.99, length.out = 99)
# Think about the priors for alpha and phi
mu <- 0.5 # average probability
phi <- 4 # concentration around this value
curve(dbeta(x, mu * phi, (1-mu)*phi))
# Find the probability of each of these values in p (prior)
prior_dens <- dbeta(p, 2, 2)
plot(p, prior_dens)
# Find the likelihood for each of these values of p (likelihood)
likelihood_fct <- function(p) prod(dbinom(x = x_obs, size = 6, prob = p, log=FALSE))
likelihood_dens <- sapply(p, likelihood_fct)
# Multiply these two columns (prior and likelihood) together and normalize
norm <- sum(prior_dens * likelihood_dens)
post_p <- prior_dens * likelihood_dens / norm
plot(p, post_p)
points(p, prior_dens / 50, col = "red")
## Conjugate priors
post_conj <- function(x) dbeta(x, 2 + sum(x_obs), 2 + sum(6 - x_obs))
curve(post_conj(x))
post_conj_p <- post_conj(p)
factor <- sum(post_conj_p)
curve(post_conj(x))
points(p, post_p*factor)
### Coates & Burton (1999) problem
### DOI:10.1139/cjfr-29-9-1374
library(purrr)
library(truncnorm)
# Number of observations for each species
N <- c(93, 77, 72, 91, 80)
# Parameter estimates for each species (a_mean, a_sd, s_mean, s_sd)
a_estimates <- c(198.5, 260.6, 125.7, 279.0, 506.8)
a_SE <- c(12.1, 23.3, 16.0, 28.6, 71.1)
s_estimates <- c(11.92, 7.56, 2.77, 4.39, 4.55)
s_SE <- c(1.62, 1.04, 0.44, 0.42, 0.37)
# Compute the standard deviations from the standard error for each species
a_sd <- a_SE * sqrt(N)
s_sd <- s_SE * sqrt(N)
# Range of values of L
L <- seq(from = 0, to = 100, length.out = 101)
# Generate fake data
generate_data <- function(L, a_mean, a_sd, s_mean, s_sd) {
# Sample parameter values
a <- rtruncnorm(n = 1, a = 0, b = Inf, mean = a_mean, sd = a_sd)
s <- rtruncnorm(n = 1, a = 0, b = Inf, mean = s_mean, sd = s_sd)
sigma <- rexp(n = 1, rate = 2)
# Compute the average of the distribution (scientific model)
mu <- (a*L)/((a/s) + L)
# Generate a value of y
y <- rtruncnorm(n = 1, a = 0, b = Inf, mean = mu, sd = sigma)
return (y)
}
# Generate fake data (without sampling parameters)
generate_data_clean <- function(L, a, s, sigma) {
# Compute the average of the distribution (scientific model)
mu <- (a*L)/((a/s) + L)
# Generate a value of y
y <- rtruncnorm(n = 1, a = 0, b = Inf, mean = mu, sd = sigma)
return (y)
}
# Simulate data for the first species
y_sim <- sapply(L, function(x) generate_data(L = x, a_mean = a_estimates[1], a_sd = a_sd[1], s_mean = s_estimates[1], s_sd = s_sd[1]))
plot(L, y_sim)
# Simulate data for the first species
y_sim_clean <- sapply(L, function(x) generate_data_clean(L = x, a = a_estimates[1], s = s_estimates[1], sigma = 10))
plot(L, y_sim_clean)
# Likelihood function
log_likelihood <- function(a, s, sigma) {
mu <- (a*L)/((a/s) + L)
sum(log(dtruncnorm(x = y_sim_clean, a = 0, b = Inf, mean = mu, sd = sigma)))
}
# Define a set of parameter values
a <- seq(from = 185, to = 215, by = 1)
s <- seq(from = 5, to = 15, by = 1)
sigma <- seq(from = 8, to = 15, by = 1)
params = expand.grid(a = a, s = s, sigma = sigma)
# Compute log-likelihood for all combinations of parameter values
params$ll <- 0
for (i in 1:nrow(params)) {
params[i, "ll"] <- log_likelihood(params[i, "a"], params[i, "s"], params[i, "sigma"])
}
params[which.max(params$ll),]
# Effects of priors on the maximum likelihood estimates
params$prior_a <- dtruncnorm(params$a, a = 0, b = Inf, mean = a_estimates[1], sd = a_sd[1])
params$prior_s <- dtruncnorm(params$s, a = 0, b = Inf, mean = s_estimates[1], sd = s_sd[1])
params$prior_sigma <- rexp(params$sigma, rate = 2)
params$post <- with(params, exp(ll) * prior_a * prior_s * prior_sigma)
params[which.max(params$post),]
|
6213f4f0d07809654a41652d7aee3bd4259d3285 | 3e1d64105a88959f5ae96f0dc75af781d775bc1e | /man/Dhat.Rd | e25af588e705357e318a9386c721960dc99894ef | [] | no_license | EricMarcon/dbmss | ce396dd12f72f41d8691d32116c10eb068dee9f8 | a814b98c7d7d708f43894045a45d9ae32f2dd684 | refs/heads/master | 2023-08-07T14:45:18.530925 | 2023-07-21T08:53:33 | 2023-07-21T08:53:33 | 95,145,114 | 6 | 6 | null | 2022-01-25T15:25:17 | 2017-06-22T18:30:25 | R | UTF-8 | R | false | false | 2,387 | rd | Dhat.Rd | \name{Dhat}
\alias{Dhat}
\title{
Estimation of the D function
}
\description{
Estimates the \emph{D} function}
\usage{
Dhat(X, r = NULL, Cases, Controls = NULL, Intertype = FALSE, CheckArguments = TRUE)
}
\arguments{
\item{X}{
A weighted, marked, planar point pattern (\code{\link{wmppp.object}}).
}
\item{r}{
A vector of distances. If \code{NULL}, a sensible default value is chosen (512 intervals, from 0 to half the diameter of the window) following \strong{spatstat}.
}
\item{Cases}{
One of the point types.
}
\item{Controls}{
One of the point types. If \code{NULL}, controls are all types except for cases.
}
\item{Intertype}{
Logical; if \code{TRUE}, \emph{D} is computed as \emph{Di} in Marcon and Puech (2012).
}
\item{CheckArguments}{
Logical; if \code{TRUE}, the function arguments are verified. Should be set to \code{FALSE} to save time in simulations for example, when the arguments have been checked elsewhere.
}
}
\details{
The \emph{Di} function allows comparing the structure of the cases to that of the controls around cases, that is to say the comparison is made around the same points. This has been advocated by Arbia et al. (2008) and formalized by Marcon and Puech (2012).
}
\value{
An object of class \code{fv}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}.
}
\references{
Arbia, G., Espa, G. and Quah, D. (2008). A class of spatial econometric methods in the empirical analysis of clusters of firms in the space. \emph{Empirical Economics} 34(1): 81-103.
Diggle, P. J. and Chetwynd, A. G. (1991). Second-Order Analysis of Spatial Clustering for Inhomogeneous Populations. \emph{Biometrics} 47(3): 1155-1163.
Marcon, E. and F. Puech (2017). A typology of distance-based measures of spatial concentration. \emph{Regional Science and Urban Economics}. 62:56-67.
}
\note{
The computation of \code{Dhat} relies on spatstat functions \code{\link{Kest}} and \code{\link{Kcross}}.
}
\seealso{
\code{\link{Khat}}, \code{\link{DEnvelope}}, \code{\link{Kest}}, \code{\link{Kcross}}
}
\examples{
data(paracou16)
autoplot(paracou16)
# Calculate D
r <- 0:30
(Paracou <- Dhat(paracou16, r, "V. Americana", "Q. Rosea", Intertype = TRUE))
# Plot (after normalization by pi.r^2)
autoplot(Paracou, ./(pi*r^2) ~ r)
}
|
9fc9087e2c42d4f13d4a1a0b39259fb5660514ac | 2a88035a5a2f4bb1df75c4526c50d8a4bf905c0e | /Part1_Simple and Multivariate Regression/Rproject/gradienteDescendente.R | b6333ada0afc14edf433ac37a0a579d3b5b98ebc | [] | no_license | yaniela/Stanford-Machine-Learning-Course | fc2255beea0989aee1bab4ad15c1a6a3000f05f7 | cbf2dd5cc283cc50b6b23d258f3cf3587864bdf1 | refs/heads/main | 2023-07-10T14:10:46.625237 | 2021-08-16T23:00:08 | 2021-08-16T23:00:08 | 371,366,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 846 | r | gradienteDescendente.R | gradienteDescendente<-function (X, y, theta, alpha, iters){
m = length(y); # number of training examples
J_history = c(computeCost(X, y, theta))
cat( "Compute cost:", computeCost(X, y, theta))
for (iter in 1:iters){
temp0 = 0;
temp1 = 0;
for (i in 1:m){
temp0 = temp0 +((X[i,1]*theta[1,1] + X[i,2]*theta[2,1]) - y[i,1])
temp1 = temp1 + ((X[i,1]*theta[1,1] + X[i,2]*theta[2,1]) - y[i,1])*X[i,2]
}
theta[1,1] = theta[1,1] - ((alpha/m)*temp0)
theta[2,1] = theta[2,1] - ((alpha/m)*temp1)
# ============================================================
#Save the cost J in every iteration
J_history = c(J_history, computeCost(X, y, theta))
cat( "Compute cost:", computeCost(X, y, theta))
}
result=c(theta, J_history)
return(result)
} |
3291105566980d51943c154b49229e81712f932f | eabcf2a843656149d303b096a6dbe18ee386d3b3 | /src/TesT๋ฐ์ดํฐ๋ง๋ค๊ธฐ/์ฐ์ ํต๊ณ_TEST๋ฐ์ดํฐ_๋ง๋ค๊ธฐ.R | 17e4fcd0d38b8426ebb7e4574d00e15f4d7db0e9 | [] | no_license | TaegyuHan/Unstructured_data_analysis_finalTest_SCH | b8b4b305bf337e7d44695c11834ae1a2ab49434e | a5d1eeb3ea4de28cbbae9709a2142968b1319523 | refs/heads/main | 2023-06-02T08:16:30.672120 | 2021-06-23T06:56:01 | 2021-06-23T06:56:01 | 377,427,906 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,466 | r | ์ฐ์ ํต๊ณ_TEST๋ฐ์ดํฐ_๋ง๋ค๊ธฐ.R |
# --------------------------------------------------------------------------- #
# ์ฌ์ฉํจ์
predShowConfusionMatrix <- function(TargetData, predData)
{
# ------------------------------------------------ #
# confusionMatrix ์๊ฐํ๋ฅผ ๋ง๋๋ ํจ์ ์
๋๋ค.
# ------------------------------------------------ #
confusionMatrixData <- tibble("target" = TargetData,
"prediction" = predData)
basic_table <- table(confusionMatrixData)
cfm <- as_tibble(basic_table)
cvms::plot_confusion_matrix(cfm,
target_col = "target",
prediction_col = "prediction",
counts_col = "n",
palette = "Greens" )
}
# --------------------------------------------------------------------------- #
# ๊ต์๋์ด ์ฃผ์ ์ ์ฒ๋ฆฌ Data_set ํํ๋ก ํจ์ ์ธ์๋ก ๋ฃ์ด์ฃผ์ธ์
# --------------------------------------------------------------------------- #
# Data_set
makeStatisticsTestData <- function( inputData=Data_set )
{
# ---------------------------------------------------- #
# ์ฐ์ ํต๊ณ TEST ๋ฐ์ดํฐ๋ฅผ return ํฉ๋๋ค.
# ---------------------------------------------------- #
AllData <- data.frame()
print("Data_set ๋ฐ์ดํฐ๋ฅผ ํ๋๋ก ํฉ์น๊ณ ์์ต๋๋ค.")
for (num in 1:length(Data_set)){
AllData <- rbind(AllData, inputData[[num]])
print(paste0(num, " > ์๋ฃ"))
}
print("statisticsTestData ์ ์ฒ๋ฆฌ๋ฅผ ์์ํฉ๋๋ค.")
statisticsTestData <- data.frame(
sum = apply(AllData[,1:15000], 1, sum),
mean = apply(AllData[,1:15000], 1, mean),
min = apply(AllData[,1:15000], 1, min),
max = apply(AllData[,1:15000], 1, max),
geometricMean = apply(AllData[,1:15000], 1, psych::geometric.mean),
median = apply(AllData[,1:15000], 1, median),
sd = apply(AllData[,1:15000], 1, stats::sd,),
skewness = apply(AllData[,1:15000], 1, e1071::skewness),
event = as.factor(AllData$event)
)
print(" statisticsTestData ์ ์ฒ๋ฆฌ ์๋ฃ! ")
return(statisticsTestData)
}
# TEST ๋ฐ์ดํฐ
s_TestData <- makeStatisticsTestData(Data_set)
# setwd("ํ์ฌ ํ์ผ ๊ฒฝ๋ก")
# ๋ชจ๋ธ
RFModelstatistic <- get(load("./model/RFModelstatistic.rda"))
# ์์ธก
predStatistic <- predict(RFModelstatistic, newdata = s_TestData[1:8])
StatisticCM <- predShowConfusionMatrix(s_TestData$event, predStatistic)
|
043caa9aa31d04b6a1bf586eb0c5e7b39963c104 | ebab9db9bb33c548b003d421380b9aa804117e57 | /tests/testthat/test_featureImportance.R | 2249dda28a6916cdaed95e34f6ae815588d3b93e | [] | no_license | xiaoxiaozhangx/featureImportance | 9d6227ba16e8c6e40860bae500b1f5223d0d0118 | cf064ddc757f73db1efbc7e9722869fbfff8e6ba | refs/heads/master | 2023-05-09T08:50:15.609474 | 2021-05-30T08:17:52 | 2021-05-30T08:17:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,022 | r | test_featureImportance.R | context("featureImportance with WrappedModel works")
test_that("featureImportance with WrappedModel works", {
feat = list(features[1], features[2:3])
local = c(FALSE, TRUE)
method = c("permute", "replace.id")
for (m in method) {
for (loc in local) {
set.seed(1)
if (m == "permute") {
imp = featureImportance(mod, data = d, features = feat, n.feat.perm = n.feat.perm, measures = measures, local = loc)
} else {
imp = featureImportance(mod, data = d, features = feat, replace.ids = 1:2, measures = measures, local = loc)
}
imp = imp$importance
if (loc)
expect_subset(c("row.id", "replace.id"), colnames(imp))
nrow = length(feat)*n.feat.perm*ifelse(loc, nrow(d), 1)
expect_output(print.featureImportance(imp), regexp = "Global feature importance")
expect_data_table(imp, nrows = nrow)
expect_subset(c("features", "n.feat.perm", mid), colnames(imp))
expect_equal(imp$acc, -imp$mmce)
expect_equal(stri_split_fixed(unique(imp$features), ","), feat)
# check if using mod$learner.model yields the same importances
set.seed(1)
if (m == "permute") {
imp2 = featureImportance(mod$learner.model, data = d, target = target, features = feat, n.feat.perm = n.feat.perm,
measures = measures.fun, local = loc, predict.fun = predict.fun)
} else {
imp2 = featureImportance(mod$learner.model, data = d, target = target, features = feat, replace.ids = 1:2,
measures = measures.fun, local = loc, predict.fun = predict.fun)
}
imp2 = imp2$importance
expect_identical(imp, imp2)
}
}
})
context("featureImportance with ResampleResult works")
test_that("featureImportance with ResampleResult works", {
feat = list(features[1:2], features[3])
for (i in seq_along(res.list)) {
res = res.list[[i]]
rin = resampling[[i]]
imp = featureImportance(res, data = d, features = feat, n.feat.perm = n.feat.perm, measures = measures, local = FALSE)
imp = imp$importance
nrow = length(feat)*n.feat.perm
expect_data_table(imp, nrows = nrow)
expect_subset(c("features", "n.feat.perm", mid), colnames(imp))
expect_error(expect_warning(featureImportance(res, data = d[1:2,], features = feat,
n.feat.perm = n.feat.perm, measures = measures, local = FALSE),
regexp = "Use the same data that created the ResampleResult"))
imp.local = featureImportance(res, data = d, features = feat, n.feat.perm = n.feat.perm, measures = measures, local = TRUE)
imp.local = imp.local$importance
nrow = length(feat)*length(unique(unlist(rin$test.inds)))*n.feat.perm
expect_data_table(imp.local, nrows = nrow)
expect_equal(imp.local$acc, -imp.local$mmce)
expect_equal(stri_split_fixed(unique(imp.local$features), ","), feat)
expect_subset(c("features", "n.feat.perm", "row.id", mid), colnames(imp.local)) # for CV there must be a replace.id column
expect_set_equal(res$pred$data$id, imp.local$row.id)
}
})
|
d20dc6f9de3de7abe2c0fd597083b992364d89bf | 17c22edbc09a276d0af436912d9bc5de803aa0fc | /ImagePlot.r | e901cbf3a99d206c6a08a8c47cd0ed704fd7f904 | [] | no_license | johngfc/ClimatePrimers | 00cef54a891416247ccf3db6ebc252ee746732b2 | b889d703307a93cbfb06d251e906cd0378401f17 | refs/heads/master | 2021-01-22T12:35:15.667420 | 2014-02-20T19:08:03 | 2014-02-20T19:08:03 | 17,331,118 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,421 | r | ImagePlot.r |
ImagePlot<-function(InputDat,Ylab="",Xlab="",Plot=TRUE,Months,Colors,Baseline=c(1890,1980),Main,ColVar="TempChng",cexMult,
DisplayOutput=DisplayOutput,OutputGraphics=OutputGraphics){
Call<-match.call()
#this doesn't look great for my current dataset but maybe eventually
if(!DisplayOutput){ png(file.path(OutputGraphics,
paste(Call$InputDat,"Baseline",min(Baseline),"to",max(Baseline),"Image.png",sep="_")),height=1000,width=500)
on.exit(dev.off())
}
if(missing(Main)) Main= paste(Call$InputDat,"with Baseline\n",min(Baseline),"to",max(Baseline),sep=" ")
Dat<-matrix(data=InputDat@Ts,nrow=12,byrow=FALSE)
#This should also work for the GDO with a specified model
BsLnDat<-InputDat@Ts[InputDat@Year>=Baseline[1] &InputDat@Year>Baseline[2]]
BsLnDat<-apply(matrix(data=BsLnDat,nrow=12,byrow=FALSE),1,mean) #we calculate the mean this way so it's monthly
Dat<-matrix(data=InputDat@Ts,nrow=12,byrow=FALSE)
ID<-(Dat-BsLnDat) #/apply(InputDat,1,sd)
if(missing(Colors)) Colors<-GenerateColors(ColVar)
Colors=two.colors(n=256, start=Colors[1], end=Colors[length(Colors)], middle="gray89",alpha=1.0)
Breaks<-SetBreaks(ID,"diff",Colors)
image.plot(z=ID,x=seq(1:12),y=unique(InputDat@Year),xlab="Month",
ylab=Ylab,col=Colors,main=Main,cex.lab=cexMult,cex.axis=cexMult,cex.main=cexMult,legend.mar=7.1,breaks=Breaks)
}
|
cfb57a4a2c4fd7e00e0c8eb6820b9a49aae1b713 | 769898772e7225264fd942b2e5a666af3105d3a1 | /R/fuzzySum.R | 1122df9ea51e5ab598e6b4bc4df3dfb48096297a | [] | no_license | cran/spatialEco | 3fa4393496453b091c547cc7601a984e54bf2be6 | 22944d790b25451c848d420b61d386471073b1ee | refs/heads/master | 2023-07-08T05:04:12.117110 | 2023-06-30T07:40:02 | 2023-06-30T07:40:02 | 30,218,937 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 611 | r | fuzzySum.R | #' @title Fuzzy Sum
#' @description Calculates the fuzzy sum of a vector
#'
#' @param x Vector of values to apply fuzzy sum
#'
#' @return Value of fuzzy sum
#'
#' @note
#' The fuzzy sum is an increasing linear combination of values.
#' This can be used to sum probabilities or results of multiple
#' density functions.
#'
#' @author Jeffrey S. Evans <jeffrey_evans@@tnc.org>
#'
#' @examples
#' p = c(0.8,0.76,0.87)
#' fuzzySum(p)
#' sum(p)
#'
#' p = c(0.3,0.2,0.1)
#' fuzzySum(p)
#' sum(p)
#'
#' @export
fuzzySum <- function(x) { return( 1 - prod( (1 - x) ) ) }
|
4df2b744137981779b1c3edfce24d71978243467 | 9719c43e784f48e79c81c7151ada584c105bbe11 | /man/fDgeSeaAnalysis.Rd | e53ab9c92aa9a7b0e6a5a14d4eb79db5608cb47e | [
"Apache-2.0"
] | permissive | edavidaja/FacileAnalysis | ade3de9b07fb4d614a04dce7783843dfc57d5ce4 | 8f96cdf41904d606f81294f4ff169c658113dd86 | refs/heads/main | 2023-09-04T21:15:56.014307 | 2021-11-17T20:35:35 | 2021-11-17T20:35:35 | 430,765,832 | 0 | 0 | NOASSERTION | 2021-11-22T15:38:31 | 2021-11-22T15:38:30 | null | UTF-8 | R | false | true | 1,478 | rd | fDgeSeaAnalysis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fdge-ffsea-analysis-module.R
\name{fDgeSeaAnalysis}
\alias{fDgeSeaAnalysis}
\title{An analysis module that combines differential expression with GESA.}
\usage{
fDgeSeaAnalysis(input, output, session, rfds, gdb = NULL, ..., debug = FALSE)
}
\description{
This module enables the interactive configuration and execution of
"paired" differential expression and feature set enrichment.
}
\details{
Although the focus of the analysis workflow in this package is based on
singular units of analysis, there are times like this where we almost always
want to perform two anlayses together, differential expression and GSEA
on the same contrast. This shiny-module presents an interface to both.
}
\section{Development Thougts}{
This also gives us the opportunity to exercise different methods of
interaction between independant analysis results. In the DGE and GSEA
scenario, for instance, we might want the linked brushing that happens within
the \code{fdge} volcano to do a "significant gene set search" in the fgsea result.
More specifically, maybe the volcano plot in the \code{dge_view} module should
be able to broadcast the feature brushing in such a way that the "listener"
in the \code{ffsea_vew} module can react to it. Similarly for the \code{ffsea_vew}
module should be able to broadcast the features that are brushed within
it to the \code{dge_vew} volcano and statistics tables ... or not?
}
|
a934c906d0c569e3d6fa13cc304daa40be95d712 | a8bb8fb4c24d7284297cb25827f779453b78cbc7 | /man/oldest_player.Rd | a62dae024ec276411d9dce9f1c5579c3423be2bc | [] | no_license | falhashe/nba.hw5 | 8f091eb3feebf148a9c7a93fe554abe0f5a002cb | 2b8c45b5116a5d1af8dd90d4213ecd211fe09831 | refs/heads/master | 2020-04-28T13:13:48.455970 | 2019-03-12T21:49:16 | 2019-03-12T21:49:16 | 175,294,367 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 445 | rd | oldest_player.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oldest_player.R
\name{oldest_player}
\alias{oldest_player}
\title{Oldest Player Function}
\usage{
oldest_player(x)
}
\arguments{
\item{years}{extend from 1950 to 2017}
}
\description{
This function allows you to find the oldest player for each year in the nba dataset
}
\examples{
oldest_player(1950) would yield 36
}
\keyword{age}
\keyword{year,}
|
021f286fc2564259577098889ad987f8ca03193f | dca44395dbf60e1743c65bced7b26838bd676781 | /KCDC/GWAS/01.KCHIP_open/KKY_7th/02-1.check.plat.PACplot.R | 5bf37ca9fce4fbe21b2bdfb96aec59b72f0a0993 | [] | no_license | ksmpooh/SungminCode | 1b550c375125ea7869917de337aa093160aa03eb | 33b266b80389664282a2d4d6eb9c2db593442a5f | refs/heads/master | 2023-08-03T16:22:35.085299 | 2023-07-31T16:12:33 | 2023-07-31T16:12:33 | 106,177,934 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,027 | r | 02-1.check.plat.PACplot.R | setwd("~/Desktop/KCDC/KKY/00.sampleInfo/")
library(stringr)
cel <- read.table("cel_files.txt",header = T)
#pca <- read.table("PCA.txt",header = T)
pca <- read.table("PCA.2nd.txt",header = T)
# 043140 : DANlink
# 5507... : Teragen
DNAlink <- read.table("DANlink.2020.cel.list.txt")
tera <- read.table("2020.7th.tera.cel.list.txt")
head(cel)
tail(cel)
head(pca)
head(DNAlink)
head(tera)
DNAlink$company <- "DNAlink"
tera$company <- "Teragen"
df <- rbind(DNAlink,tera)
df$plate <- str_split_fixed(df$V1,"_",6)[,4]
df$ID <- str_replace_all(str_split_fixed(df$V1,"_",6)[,6],".CEL","")
head(str_split_fixed(df$V1,"_",6)[,6])
head(str_replace_all(str_split_fixed(df$V1,"_",6)[,6],".CEL",""))
head(df)
tail(df)
dim(table(df$plate))
head(pca)
df <- merge(df,pca,by.x = 'ID',by.y ='FID')
#pdf("1stQC/plotDATA/JG.1st.PCA.pdf", height = 10, width = 10)
plot(df$PC1, df$PC2, col=rgb(0,0,0,0.3)
# ,xlim=c(-0.25, 0.25), ylim=c(-0.15,0.15)
#,xlim=c(-0.2, 0.5), ylim=c(-0.2,1)
, xlab="PC1", ylab="PC2", main="KKY 7th 1st PCA", cex=1.5, pch=20)
abline(v=-0.1, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(v=0.1, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(h=0.1, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(h=-0.1, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(v=-0.06, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(v=0.05, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(h=0.05, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(h=-0.07, col=rgb(1,0,0,0.5), lty=3, lwd=2)
points(df[df$company == "Teragen",]$PC1,
df[df$company == "Teragen",]$PC2,
col = rgb(1,0,0,0.3), cex = 1 , pch = 20)
points(df[df$company == "DNAlink",]$PC1,
df[df$company == "DNAlink",]$PC2,
col = rgb(0,0,1,0.3), cex = 1 , pch = 20)
color <- c(
rgb(1,0,0,1),
rgb(0,0,1,1))
list <- c("Teragen","DNAlink")
#legend(x = -0.25 ,y = 0.15,list,col = color,cex = 0.7,pch = 16)
legend(x = 0.3 ,y = 0.1,list,col = color,cex = 1,pch = 16)
legend(x = 0 ,y = 0.48,list,col = color,cex = 1,pch = 16)
dev.off()
rmlist <- df[0.05<df$PC1 | -0.07 > df$PC1 | df$PC2 < -0.06 | 0.05 < df$PC2,]
dim(rmlist)
pdf("plate_check/leftside_plate_check.txt.pdf", height = 10, width = 10)
plot(df$PC1, df$PC2, col=rgb(0,0,0,0.3)
# ,xlim=c(-0.25, 0.25), ylim=c(-0.15,0.15)
#,xlim=c(-0.2, 0.5), ylim=c(-0.2,1)
, xlab="PC1", ylab="PC2", main="KKY 7th 1st.PCA", cex=1.5, pch=20)
abline(v=-0.05, col=rgb(1,0,0,0.5), lty=3, lwd=2)
#abline(v=0.06, col=rgb(1,0,0,0.5), lty=3, lwd=2)
#abline(h=0.06, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(h=-0.05, col=rgb(1,0,0,0.5), lty=3, lwd=2)
points(df[df$PC1 < -0.05,]$PC1,
df[df$PC1 < -0.05,]$PC2,
col = rgb(0,0,1,1), cex = 1 , pch = 20)
dev.off()
rmlist <- df[-0.05 > df$PC1 ,]
table(rmlist$plate)
as.data.frame(t(table(rmlist$plate)))
plate_check <- as.data.frame(t(table(rmlist$plate)))
ori_plate_check <- as.data.frame(t(table(df$plate)))
head(ori_plate_check)
plate_check$ori_freq <- 0
for (i in plate_check$Var2) {
print(i)
plate_check[plate_check$Var2 == i,"ori_freq"] <- ori_plate_check[ori_plate_check$Var2 == i,]$Freq
}
ori_plate_check[ori_plate_check$Var2 == i,]$Freq
head(plate_check)
plate_check[plate_check$Var2 == i,"Freq"]
plate_check <- plate_check[,c("Var2","Freq","ori_freq")]
colnames(plate_check) <- c("plate","outliner_freq","number.of.sample")
write.table(plate_check,"plate_check/leftside_plate_check.txt",col.names = T,row.names = F,quote = F,sep = "\t")
pdf("plate_check/leftside_plate_check.txt.pdf", height = 10, width = 10)
plot(df$PC1, df$PC2, col=rgb(0,0,0,0.3)
# ,xlim=c(-0.25, 0.25), ylim=c(-0.15,0.15)
#,xlim=c(-0.2, 0.5), ylim=c(-0.2,1)
, xlab="PC1", ylab="PC2", main="KKY 7th 1st.PCA", cex=1.5, pch=20)
abline(v=-0.06, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(v=0.06, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(h=0.06, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(h=-0.06, col=rgb(1,0,0,0.5), lty=3, lwd=2)
points(df[df$PC1 < -0.06 | df$PC1 > 0.06| df$PC2 < -0.06| df$PC2 > 0.06,]$PC1,
df[df$PC1 < -0.06 | df$PC1 > 0.06| df$PC2 < -0.06| df$PC2 > 0.06,]$PC2,
col = rgb(1,0,0,1), cex = 1 , pch = 20)
dev.off()
head(df)
rmlist <- df[0.06<df$PC1 | -0.06 > df$PC1 | df$PC2 < -0.06 | 0.06 < df$PC2,]
table(rmlist$type)
head(rmlist)
########## missing-het PCA all KKY 7th
setwd("~/Desktop/KCDC/KKY/QC/")
miss <-read.table("missing.imiss",header = T)
het <- read.table("het.het", header = T)
miss <- cbind(miss, CR=((1 - miss$F_MISS)*100))
het <- cbind(het, HET=((het$N.NM. - het$O.HOM.)/het$N.NM.)*100)
lowSample <- merge(miss, het, by="FID")
lowSample <- merge(lowSample,df,by.x="FID",by.y = "ID")
head(lowSample)
head(df)
#pdf("../PDF/KKY.1st_missing-het.pdf", height = 7, width = 10)
plot(lowSample$HET, lowSample$F_MISS,
#xlim=c(13,22), ylim=c(0,0.1),
xlab="heterozygosity rate",
ylab="missing rate", main="Missing vs heterozygosity", col=rgb(0,0,0,0.3), cex=1, pch=16)
abline(v=15.5, col=rgb(1,0,0,1), lty=3, lwd=2)
abline(v=17, col=rgb(1,0,0,1), lty=3, lwd=2)
abline(h=0.03, col=rgb(1,0,0,1), lty=3, lwd=2)
points(lowSample[lowSample$HET < 15.5 | 17 < lowSample$HET | 0.03 < lowSample$F_MISS,]$HET,
lowSample[lowSample$HET < 15.5 | 17 < lowSample$HET | 0.03 < lowSample$F_MISS,]$F_MISS,
col=rgb(1,0,0,0.3), cex=1.5, pch=16)
points(lowSample[lowSample$company == "DNAlink",]$HET,
lowSample[lowSample$company == "DNAlink",]$F_MISS,
col = rgb(0,0,1,0.3), cex = 1 , pch = 16)
points(lowSample[lowSample$company == "Teragen",]$HET,
lowSample[lowSample$company == "Teragen",]$F_MISS,
col = rgb(1,0,0,0.3), cex = 1 , pch = 16)
color <- c(
rgb(1,0,0,1),
rgb(0,0,1,1))
list <- c("Teragen","DNAlink")
#legend(x = -0.25 ,y = 0.15,list,col = color,cex = 0.7,pch = 16)
legend(x = 20 ,y = 0.17,list,col = color,cex = 1,pch = 16)
dev.off()
rmList <- lowSample[0.03 < lowSample$F_MISS | lowSample$HET < 15.5 | 17 < lowSample$HET,]
table(rmList$company)
#dim(rmList)
pca <- read.table("PCA.txt",header = T)
pca <- merge(pca,df,by.x = "FID",by.y = "ID")
head(pca)
plot(pca$PC1, pca$PC2, col=rgb(0,0,0,0.3)
# ,xlim=c(-0.25, 0.25), ylim=c(-0.15,0.15)
#,xlim=c(-0.2, 0.5), ylim=c(-0.2,1)
, xlab="PC1", ylab="PC2", main="KKY 7th 1st.PCA", cex=1, pch=16)
abline(v=-0.07, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(v=0.05, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(h=0.05, col=rgb(1,0,0,0.5), lty=3, lwd=2)
abline(h=-0.06, col=rgb(1,0,0,0.5), lty=3, lwd=2)
points(pca[pca$company == "DNAlink",]$PC1,
pca[pca$company == "DNAlink",]$PC2,
col = rgb(0,0,1,0.3), cex = 1 , pch = 16)
points(pca[pca$company == "Teragen",]$PC1,
pca[pca$company == "Teragen",]$PC2,
col = rgb(1,0,0,0.3), cex = 1 , pch = 16)
color <- c(
rgb(1,0,0,1),
rgb(0,0,1,1))
list <- c("Teragen","DNAlink")
#legend(x = -0.25 ,y = 0.15,list,col = color,cex = 0.7,pch = 16)
legend(x = 0 ,y = 0.4,list,col = color,cex = 1,pch = 16)
dev.off()
head(df)
rmlist <- pca[0.05<pca$PC1 | -0.07 > pca$PC1 | pca$PC2 < -0.06 | 0.05 < pca$PC2,]
table(rmlist$company)
|
e23085dcfc2389de5a66eae7b2e7ebc64962efd4 | b19024ee9cb6192a960488819176ad08aa5bf38d | /2์ฐจ ํํ๋ก์ ํธR/rdata 1์ฐจ ๋ถ์/code.R | 25c1d59dddaf5fb6492c2acdbf9651854cf88b45 | [] | no_license | jihyeyun00/workR2 | 43e7387640e2fe34b35bbff2b364a16f9d06da67 | 1c9600c81718cbc499df7bdc904378910f64dafb | refs/heads/master | 2020-09-19T23:48:01.423697 | 2019-12-26T10:21:22 | 2019-12-26T10:21:22 | 224,325,705 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,471 | r | code.R | #1.
#16๋
๋ ์๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ ์
๋ํํฉ (๊ณต๊ณต๋ฐ์ดํฐํฌํธ)
##1. ํ์ผ ๋ถ๋ฌ์ค๊ธฐ, ์ ์
setwd("C:/jeju")
visit2016 <- read.csv("์ ์ฃผํน๋ณ์์น๋_๋ด๊ตญ์ธ๊ด๊ด๊ฐํํฉ_2016.csv",header=T)
visit2016 <- visit2016[1:3,]
visit2016
visit2016 <- as.data.frame(t(visit2016))
visit2016
visit2016 <- visit2016[4:15,]
colnames(visit2016) <- c("๊ฐ๋ณ์ฌํ","๋ถ๋ถํจํค์ง","ํจํค์ง")
visit2016
str(visit2016)
for(i in 1:ncol(visit2016)){ #ํฉํฐ -> ๋ฒกํฐ๋ก ๋ฐ๊พผ๋ค.(๋ฌธ์๋ก ๋จ)
visit2016[,i] <- as.vector(visit2016[,i])
}
str(visit2016)
for(i in 1:ncol(visit2016)){ #๋ฌธ์ -> ์ซ์๋ก ๋ฐ๊พผ๋ค.
visit2016[,i] <- as.numeric(visit2016[,i])
}
str(visit2016)
visit2016 <- data.frame(visit2016,sum=rowSums(visit2016)) #์๋ณ ์
๋๊ฐ ํฉ๊ณ
visit2016
#17๋
๋ ์๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ ์
๋ํํฉ
##1. ํ์ผ ๋ถ๋ฌ์ค๊ธฐ, ์ ์
visit2017 <- read.csv("์ ์ฃผํน๋ณ์์น๋_๋ด๊ตญ์ธ๊ด๊ด๊ฐํํฉ_20171231.csv",header=T)
visit2017 <- visit2017[1:3,-15]
visit2017
visit2017 <- as.data.frame(t(visit2017))
visit2017
visit2017 <- visit2017[3:14,]
colnames(visit2017) <- c("๊ฐ๋ณ์ฌํ","๋ถ๋ถํจํค์ง","ํจํค์ง")
visit2017
str(visit2017)
for(i in 1:ncol(visit2017)){ #ํฉํฐ -> ๋ฒกํฐ๋ก ๋ฐ๊พผ๋ค.(๋ฌธ์๋ก ๋จ)
visit2017[,i] <- as.vector(visit2017[,i])
}
str(visit2017)
for(i in 1:ncol(visit2017)){ #๋ฌธ์ -> ์ซ์๋ก ๋ฐ๊พผ๋ค.
visit2017[,i] <- as.numeric(visit2017[,i])
}
str(visit2017)
visit2017 <- data.frame(visit2017,sum=rowSums(visit2017)) #์๋ณ ์
๋๊ฐ ํฉ๊ณ
visit2017
#18๋
๋ ์๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ ์
๋ํํฉ
##1. ํ์ผ ๋ถ๋ฌ์ค๊ธฐ, ์ ์
visit2018 <- read.csv("์ ์ฃผํน๋ณ์์น๋_๋ด๊ตญ์ธ๊ด๊ด๊ฐํํฉ_20181231.csv",header=T)
visit2018 <- visit2018[1:3,-15]
visit2018
visit2018 <- as.data.frame(t(visit2018))
visit2018
visit2018 <- visit2018[3:14,]
colnames(visit2018) <- c("๊ฐ๋ณ์ฌํ","๋ถ๋ถํจํค์ง","ํจํค์ง")
visit2018
str(visit2016)
for(i in 1:ncol(visit2018)){ #ํฉํฐ -> ๋ฒกํฐ๋ก ๋ฐ๊พผ๋ค.(๋ฌธ์๋ก ๋จ)
visit2018[,i] <- as.vector(visit2018[,i])
}
str(visit2018)
for(i in 1:ncol(visit2018)){ #๋ฌธ์ -> ์ซ์๋ก ๋ฐ๊พผ๋ค.
visit2018[,i] <- as.numeric(visit2018[,i])
}
str(visit2018)
visit2018 <- data.frame(visit2018,sum=rowSums(visit2018)) #์๋ณ ์
๋๊ฐ ํฉ๊ณ
visit2018
month <- 1:12
late1 <- visit2016$sum
late2 <- visit2017$sum
late3 <- visit2018$sum
plot(month,late2,type="l",main="์๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ ์
๋ ํํฉ") #17๋
๋ ์๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ๊ทธ๋ํ
lines(month,late1,type="l",col="blue") #16๋
๋ ์๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ๊ทธ๋ํ
lines(month,late3,type="l",col="red") #18๋
๋ ์๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ๊ทธ๋ํ
#------------------------------------------------------------------------------------------------------
# ๋
๋๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ ์
๋ ํํฉ (๊ณต๊ณต๋ฐ์ดํฐํฌํธ)
visit <- read.csv("์ ์ฃผํน๋ณ์์น๋_์ฐ๋๋ณ๊ด๊ด๊ฐ์
๋ํํฉ_2013~2018.csv",header=F);visit
visit <- visit[2,2:7];visit
visit <- t(visit);visit
visit <- as.numeric(visit)
visit <- visit/1000
year <- 2013:2018
visit <- data.frame(year,visit)
visit
library(ggplot2) #๋
๋๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ ์
๋ ํํฉ ๋ณํ ๊ทธ๋ํ
ggplot(visit,aes(x=year,y=visit))+
geom_bar(stat="identity",width=0.75,fill="steelblue")+
ggtitle("๋
๋๋ณ ๊ตญ๋ด ๊ด๊ด๊ฐ ์
๋ ํํฉ") +
theme(plot.title=element_text(size=30,face="bold",colour="steelblue"))+
labs(x="์ฐ๋",y="๊ด๊ด๊ฐ ์ (๋จ์: ๋ฐฑ๋ง)")+
geom_text(aes(label=visit,vjust=-1,hjust=0,),colour="red")
#------------------------------------------------------------------------------------------------------
#๊ตญ๋ด๋ฐฉ๋ฌธ๊ฐ ์ ์ฃผ๋ ์ฌํ ๋ชฉ์ / ์ ์ฃผ์ฌํ ๊ณ ๋ ค ์์ธ / ์ ์ฃผ์์์ ํ๋-> ์์ฐ๊ฒฝ๊ด
##์ ์ฃผ์ฌํ ๋ถ๋ง์กฑ ์ฌํญ (์ ์ฃผ๊ด๊ด๊ณต์ฌ)
# ์ธ๊ตญ์ธ๋ณด๋ค๋ ๋ด๊ตญ์ธ์ด ๋ถํธ์ ๋ ๋๋๋ค๊ณ ๋ํ๋จ. (18๋
๋ ์ค๋ฌธ ์กฐ์ฌ)
# ์ ์ฃผ๋ ๋ด๊ตญ์ธ ๊ด๊ด๊ฐ์ 63.6% ๋ ์ ์ฃผ์ฌํ์ ๋ถํธ์ ๋๋ผ์ง ์์๋ค.
# ๋ถํธ์ ๋๋ ๋ถ๋ถ์ ๋น์ผ ๋ฌผ๊ฐ๋ผ๊ณ ์๋ตํ ๋น์จ์ด ๋์๋ค.
dislike1 <- read.csv("๊ตญ๋ด์ฌํ๊ฐ๋ถ๋ง์กฑ1.csv")
head(dislike1)
dislike1 <- dislike1[1,]
dislike1 <- dislike1[,-c(1:3)]
rownames(dislike1) <- "percentage"
colnames(dislike1) <- c("์ฌํ์ ๋ณด์ต๋ ์ด๋ ค์","๊ด๊ด์ ๋ณด ๋ถ์ ํ","์๋น์ค ๋ถ์น์ ","์๋ดํ์งํ ๋ถ์ ํ","์๋น ๋ถ์ฒญ๊ฒฐ","์
์ ๋ง์ง ์๋ ์์","๋น์ผ ๋ฌผ๊ฐ")
dislike1
dislike2 <- read.csv("๊ตญ๋ด์ฌํ๊ฐ๋ถ๋ง์กฑ2.csv")
head(dislike2)
dislike2 <- dislike2[1,]
dislike2 <- dislike2[,-c(1:3)]
dislike2
rownames(dislike2) <- "percetage"
colnames(dislike2) <- c("๋์ค๊ตํต ๋ถํธ์ฑ","ํ์ ์๋น์ค ๋ถํธ","๊ด๊ด ๊ฐ์ด๋ ์๋น์ค","๋ค์ํ์ง ๋ชปํ ์ผํ","์ํ ๊ตฌ์
๊ฐ์","๊ธฐํ","๋ง์กฑ")
dislike2
dislike <- data.frame(dislike1,dislike2);dislike
dislike <- t(dislike);dislike
names <- rownames(dislike)
dislike <- data.frame(reason=names,dislike)
dislike
order(dislike$percentage)
dislike <- dislike[order(dislike$percentage,decreasing=T),]
dislike <- head(dislike,6)
library(ggplot2) #๊ตญ๋ด ์ ์ฃผ์ฌํ๊ฐ ๋ถ๋ง์ฌํญ ๊ทธ๋ํ
ggplot(dislike,aes(x=reason,y=percentage))+
geom_bar(stat="identity",width=0.51,fill="steelblue") +
ggtitle("์ ์ฃผ ๊ตญ๋ด๋ฐฉ๋ฌธ๊ฐ ๋ถ๋ง์กฑ ์ฌํญ")+
theme(plot.title=element_text(size=30,face="bold",colour="steelblue"))+
labs(x="์ฐ๋",y="ํผ์ผํธ")
#์ฐ๋ฆฌ๋๋ผ ๊ด๊ด๊ฐ์ ๋ณํ ๋ฏธ๋ฏธ -> ์ธ๊ตญ์ธ ๊ด๊ด๊ฐ์ ์ค์์ผ๋ ๋๋ถ๋ถ์ ์ํฅ์ ์ค๊ตญ์ ์ฌ๋ ๋๋ฌธ -> ์ฐ๋ฆฌ๊ฐ ํด๊ฒฐํ ์ ์๋๊ฒ์ ์๋ค.
#-> ๋ค๋ฅธ ๋๋ผ ์ฌ๋ก
# http://www.consumerdata.co.kr/news/articleView.html?idxno=3842
#๊ตญ๋ด๋ฐฉ๋ฌธ๊ฐ์ ๊ตญ๋ด์ฌํ ์ค ์ ์ฃผ๋ ๋ ๋ช์์ธ๊ฐ?
#ํธ์ผ : ๊ณ์ ์ฆ๊ฐ, ์๋
๋ถํฐ๋ ์ค๊ตญ๊ด๊ด๊ฐ ์์ ๊ฐ์๋ก ํ๋ฝ
# ์ ๋ฐ ์ ๋ณต ์ฌ๊ณ ๋ก ์ค๊ตญ์ธ ๊ด๊ด๊ฐ ๊ธ๊ฐ
# ์ค๊ตญ์ธ ๊ด๊ด๊ฐ ํ๋ณต์ ์ํด ์ฌ์ฆ์ ์ฒญ์์๋ฃ+๋ฉด์ ์ ๋ ์ฌ์ํ ์ ๋ถ์ ์์ฒญ ์ค
#1. ๊ฒฝ์ ์ฒ ๊ฐ๋ฐ ( 2023๋
์๊ณต ) - > ์ด๋ ์๊ฐ ๋จ์ถ
#2. ์ ๊ณตํญ ๊ฑด์ค (2023๋
์๊ณต ) |
a8e1d1046d22dcf1a345ca118abdc29a33050edc | 08d71b55cabd0d7a04bfe49ae9f04a9da3482103 | /R/R_profile.R | 6ee3afcdd89c073d6c66318d2893ada85ef0d4c4 | [] | no_license | MalteThodberg/ThodbergMisc | 533129f735c0b407063b38caf272f9625a44b208 | d1218db1057c266f577187b228a0b8ae93bc8dea | refs/heads/master | 2022-08-14T10:27:51.181090 | 2022-08-02T11:25:59 | 2022-08-02T11:25:59 | 87,921,885 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 597 | r | R_profile.R | #' Create .Rprofile
#'
#' Copy a template .Rprofile to the home directory
#'
#' @details ThodbergMisc/extdata contains a template for an .Rprofile file for an effective R-setup that does not influence code execution. This file will try to adapt startup settings to whether R is run from a terminal or RStudio.
#' @export
create_Rprofile <- function(){
# Files
template_fname <- file.path(find.package("ThodbergMisc"), "extdata/template.R")
to_fname <- file.path("~/.Rprofile")
# Copy file as safely as possibly
file.copy(from=template_fname, to=to_fname, overwrite=FALSE, recursive=FALSE)
}
|
69f2ffc625343bb48241ebdfa776c59864d21d1f | bc6fc11685a58ac09ae8cfc286ec3eee68729324 | /060-ggplot/output/purl/02-1-scales.R | 735b3cc97b2b6040f556808f3d37eecd40c4f0ed | [] | no_license | quantide/qtraining | 2803fe93446931c43b70ecd7ed644f01c79ece85 | 846043fcc8207da7b5dbf5bd9a7d4b94111d5132 | refs/heads/master | 2021-09-15T10:57:47.797783 | 2018-05-30T09:13:43 | 2018-05-30T09:13:43 | 56,151,061 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,990 | r | 02-1-scales.R | ## ----first, include=TRUE, purl=TRUE, message=FALSE-----------------------
require(dplyr)
require(ggplot2)
require(qdata)
data(bands)
## ----scales_structure_1--------------------------------------------------
ggplot(data=bands, mapping=aes(x=humidity, y=viscosity)) +
geom_point(mapping = aes(colour=band_type))
## ----scales_structure_2--------------------------------------------------
ggplot(data=bands, mapping=aes(x=humidity, y=viscosity)) +
geom_point(mapping = aes(colour=band_type)) +
scale_x_continuous() +
scale_y_continuous() +
scale_color_discrete()
## ----scale_colour_discrete-----------------------------------------------
ggplot(data=bands, mapping=aes(x=humidity, y=viscosity)) +
geom_point(mapping = aes(colour=band_type)) +
scale_color_discrete(l = 45)
## ----colour_aes_discrete-------------------------------------------------
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = press_type)) +
geom_point()
## ----scale_colour_hue_1--------------------------------------------------
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = press_type)) +
geom_point() +
scale_color_hue()
## ----scale_colour_hue_2--------------------------------------------------
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = press_type)) +
geom_point() +
scale_colour_hue(l=45)
## ----scale_colour_brewer-------------------------------------------------
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = press_type)) +
geom_point() +
scale_colour_brewer(palette="Set1")
## ----scale_colour_grey_1-------------------------------------------------
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = press_type)) +
geom_point() +
scale_colour_grey()
## ----scale_colour_grey_2-------------------------------------------------
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = press_type)) +
geom_point() +
scale_colour_grey(start=0.7, end=0)
## ----scale_colour_manual_1-----------------------------------------------
# named colours
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = press_type)) +
geom_point() +
scale_colour_manual(values = c("magenta", "dark turquoise", "dodger blue", "lime green"))
# RGB colour code
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = press_type)) +
geom_point() +
scale_colour_manual(values = c("#FF00FF", "#00CED1", "#1E90FF", "#32CD32"))
## ----press_type_levels---------------------------------------------------
levels(bands$press_type)
## ----scale_colour_manual_2-----------------------------------------------
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = press_type)) +
geom_point() +
scale_colour_manual(values = c(ALBERT70 = "lime green", MOTTER70 = "dark turquoise", MOTTER94 = "magenta", WOODHOE70 = "dodger blue"))
## ----colour_aes_continuous-----------------------------------------------
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = solvent_pct)) +
geom_point()
## ----scale_colour_gradient-----------------------------------------------
# scale_colour_gradient()
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = solvent_pct)) +
geom_point() +
scale_colour_gradient(low="black", high="white")
## ----scale_colour_gradient2----------------------------------------------
# scale_colour_gradient2()
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = solvent_pct)) +
geom_point() +
scale_colour_gradient2(low="red", mid="white", high="blue")
## ----scale_colour_gradientn_1--------------------------------------------
# scale_colour_gradientn()
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = solvent_pct)) +
geom_point() +
scale_colour_gradientn(colours = c("darkred", "orange", "yellow", "white"))
## ----scale_colour_gradientn_2--------------------------------------------
ggplot(data = bands, mapping = aes(x = humidity, y = viscosity, colour = solvent_pct)) +
geom_point() +
scale_colour_gradientn(colours = terrain.colors(n=8))
## ----ChickWeightMean-----------------------------------------------------
# data
ChickWeightMean <- ChickWeight %>%
group_by(Time, Diet) %>%
summarize(weight=mean(weight)) %>%
mutate(Diet = factor(Diet, levels=1:4, labels=c("A","B","C","D")))
## ----linetype_aes--------------------------------------------------------
ggplot(data=ChickWeightMean, mapping=aes(x=Time, y=weight)) +
geom_line(mapping=aes(linetype=Diet))
## ----scale_linetype_manual-----------------------------------------------
ggplot(data=ChickWeightMean, mapping=aes(x=Time, y=weight)) +
geom_line(mapping=aes(linetype=Diet)) +
scale_linetype_manual(values=c(3,4,5,6))
## ----scale_shape_manual--------------------------------------------------
ggplot(data=bands, mapping=aes(x=humidity, y=viscosity, shape=press_type)) +
geom_point() +
scale_shape_manual(values = c(12, 13, 8, 3))
## ----size_aes------------------------------------------------------------
ggplot(data=bands, mapping=aes(x=humidity, y=viscosity, size=ink_pct)) +
geom_point()
## ----scale_size----------------------------------------------------------
ggplot(data=bands, mapping=aes(x=humidity, y=viscosity, size=ink_pct)) +
geom_point() +
scale_size(range=c(2, 4))
## ----scale_size_area_1---------------------------------------------------
ggplot(data=bands, mapping=aes(x=humidity, y=viscosity, size=ink_pct)) +
geom_point() +
scale_size_area()
## ----scale_size_area_2---------------------------------------------------
ggplot(data=bands, mapping=aes(x=humidity, y=viscosity, size=ink_pct)) +
geom_point() +
scale_size_area(max_size = 3)
## ----scale_radius--------------------------------------------------------
ggplot(data=bands, mapping=aes(x=humidity, y=viscosity, size=ink_pct)) +
geom_point() +
scale_radius()
|
f8f4d1d1dc5b86eff0c8ef5a4ea10f806eb2dcf0 | 3b9eedc54756c672d406daf55884c56a21479c10 | /CacheSolve.R | 374e86caee769ada7f5aca5ce80c28b3cdea56a9 | [] | no_license | adityakiranm/ProgrammingAssignment2 | 783392ea612b8f98919d673a30cfcee3ce9886f7 | 706dc9e103874d5dd6ee9454a296cb048f7af47c | refs/heads/master | 2020-05-22T02:36:48.091036 | 2017-03-12T05:22:02 | 2017-03-12T05:22:02 | 84,664,255 | 0 | 0 | null | 2017-03-11T16:34:39 | 2017-03-11T16:34:39 | null | UTF-8 | R | false | false | 387 | r | CacheSolve.R | # CacheSolve function will return the inverse of a matrix. If the Inverse for the
# matrix is already available in Cache, the function will return the same and NOT
# re-compute the same again
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$getMatrix()
m <- solve(data)
x$setInverse(m)
m
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.