blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6534f9570e23c9e042378ee079d2608f8be4b6d2
|
66fcd73c639c308b030f834ab3ece0725cfd712c
|
/SRC/Consolidado/Comparacion_TCGA.R
|
5b17700c7dbaaecd40c22ea19ab0c6e16ae6ea55
|
[] |
no_license
|
chrismazzeo/Tesis_Marcadores_Glicoinmunologicos
|
ae871350a259afb7c08cde70d76e262bddd49e4e
|
2626a75ceedb2c59531fe39f02f5590ac8ebea85
|
refs/heads/master
| 2023-06-30T13:54:01.223662
| 2021-08-03T03:28:47
| 2021-08-03T03:28:47
| 286,357,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,792
|
r
|
Comparacion_TCGA.R
|
#con este hacemos todos los heatmas lindos
library(readr)
library(writexl)
library(RColorBrewer)
library(pheatmap)
#dataset <- read_delim("/Volumes/Externo/Google Drive/Bioinformática/PFI/Tesina/Tesis Final/Resultados Finales/Consolidación/Tablas/FinalMergeLimpia_comparativa_ratones2.csv", ";", escape_double = FALSE, trim_ws = TRUE)
#dataset <- read_delim("Results/Tablas/FinalMergeLimpia_humanos.csv", ";", escape_double = FALSE, trim_ws = TRUE)
dataset <- read_delim("~/Desktop/shares CCRAC/Todos los modelos-Tabla 1.csv", ";", escape_double = FALSE, trim_ws = TRUE)
View(dataset)
pdfOutput = "ratones_All_modelos.pdf"
dataFrameOutput= "resultRatonesModelos.csv"
minLog2FC = 2
dataset = as.data.frame(dataset)
heatmap.breakUp = 10
heatmap.breakDown = 7
heatmap.colorUp = colorRampPalette((brewer.pal(n = 8, name = "Reds")))(heatmap.breakUp)
heatmap.colorDown = colorRampPalette(rev(brewer.pal(n = 8, name = "Blues")))(heatmap.breakDown)
heatmap.colorZero = "white"
heatmap.colorNa = "white"
heatmap.colors=c( heatmap.colorDown,heatmap.colorZero,heatmap.colorUp)
a = dataset
if (filter){ #lo usamos para filtrar filas que no cumplan con el log2fc
#removemos filas que no tengan lo2fC > minLog2FC
a = dataset[,3:dim(dataset)[2]]
heatmap.breaks = unique(c(
seq(min(a, na.rm = TRUE),-1, length=heatmap.breakDown),
0,
rev(seq(max(a, na.rm = TRUE),1, length=heatmap.breakUp))
))
a[abs(a) < minLog2FC] = NA
a = dataset[rowSums(is.na(a)) != ncol(a), ] #remuevo filas con todo NA
a[a==0] = NA
}
family.colors = settings.graphics.colors[3:(length(levels(as.factor(a$Family)))+2)]
names(family.colors) = levels(as.factor(a$Family))
annotationColor = list(
Family = family.colors)
annotationRow =data.frame(Family = a$Family)
rownames(annotationRow) = a$Genes
matrix = a[3:dim(a)[2]]
rownames(matrix) = a$Genes
#separación por familia de enzimas
rowGaps.freq = as.data.frame(table(a$Family))
rowGaps.freq = rowGaps.freq$Freq
rowGaps = 0
for (i in 1:length(rowGaps.freq)){
rowGaps = rowGaps+ rowGaps.freq[i]
rowGaps.freq[i] = rowGaps
}
rowGaps.freq = rowGaps.freq[1:(length(rowGaps.freq)-1)]
pheatmap(matrix,
cellheight = 10,cellwidth = 30,
treeheight_row = 45, treeheight_col = 15,
annotation_row = annotationRow,
annotation_colors = annotationColor,
na_col ="white",
display_numbers = FALSE,
# gaps_row = rowGaps.freq,
fontsize_row = 8,
fontsize_number = 4,
cluster_rows = FALSE, cluster_cols = FALSE, show_rownames = TRUE, show_colnames = TRUE,
filename = paste0("./../",pdfOutput)
)
HELPER_SAVE_DATA_FRAME(a, paste0("./../",dataFrameOutput))
|
ef26f529adecff7f73ef2d1fd7e29de315dc7d92
|
b76f9a09a87ba50a6d71a55e3c45abfe5850b5c7
|
/man/add_waypoints.Rd
|
0175e8e241d908cc2d3e6f7e799f0a944d0f8c64
|
[] |
no_license
|
dweemx/dynwrap
|
963fa4029a103bfefa6a5de5cdb24df5754bbc3f
|
dcbe65a6e661ae3ba46e571fbeabc3b52699b6ef
|
refs/heads/master
| 2020-04-15T18:01:27.853035
| 2018-12-17T15:59:49
| 2018-12-17T15:59:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 648
|
rd
|
add_waypoints.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrap_add_waypoints.R
\name{add_waypoints}
\alias{add_waypoints}
\title{Add or create waypoints to a trajectory}
\usage{
add_waypoints(trajectory, n_waypoints = 100,
resolution = sum(trajectory$milestone_network$length)/n_waypoints)
}
\arguments{
\item{trajectory}{Wrapper with trajectory}
\item{n_waypoints}{The number of waypoints}
\item{resolution}{The resolution of the waypoints, measured in the same units as the lengths of the milestone network edges, will be automatically computed using n_waypoints}
}
\description{
Add or create waypoints to a trajectory
}
|
a4f7f2df3d4a055d7ecc253c19369cc6f4739de0
|
2c2ebb391be90b61b86cd7c8182a88c3bbeddb20
|
/get_data.R
|
970a026c464224d4c09aa2f4b25a29e0220c510a
|
[] |
no_license
|
langcomp/bicknell_levy_rayner
|
46110777d4713d4966b2b31f523129ed40436170
|
b883ef03378897d85e4cdb934b09b4440bd6f7f8
|
refs/heads/master
| 2021-05-09T15:51:07.954036
| 2020-02-07T20:56:29
| 2020-02-07T20:56:29
| 119,101,675
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,563
|
r
|
get_data.R
|
## combined analysis of Yosemite, YosDos, and YosDosReplication
library(Hmisc)
library(dplyr)
source("et-admin/et-admin.R")
source("etAnalyze/R/etAnalyze.R")
subj_excl <- readRDS("subj_excl.rds")
df_exclusions <- readRDS("df_exclusions.rds")
df.pretarget.length <- readRDS("df.pretarget.length.rds")
get.all.data <- function(path,
has.sub, # has subgaze info?
expt) {
## get all the data
df.ffix <- read.subj.by.item('FFIX.TXT', path)
df.single <- read.subj.by.item('SINGLE.TXT', path)
df.gzd <- read.subj.by.item('GZD.TXT', path)
df.ro <- read.subj.by.item('REGOUT.TXT', path)
df.nfix <- read.subj.by.item('NFIX.TXT', path)
df.lpos <- read.subj.by.item('LPOS.TXT', path)
df.launch <- read.subj.by.item('LAUNCH.TXT', path)
df.ffix.pre <- read.subj.by.item('FFIXPRE.TXT', path)
df.single.pre <- read.subj.by.item('SINGPRE.TXT', path)
df.gzd.pre <- read.subj.by.item('GZDPRE.TXT', path)
df.ro.pre <- read.subj.by.item('REGOPRE.TXT', path)
df.nfix.pre <- read.subj.by.item('NFIXPRE.TXT', path)
df.lpos.pre <- read.subj.by.item('LPOSPRE.TXT', path)
df.launch.pre <- read.subj.by.item('LNCHPRE.TXT', path)
if (has.sub) {
df.gzd.sb <- read.subj.by.item('GZDSB.TXT', path)
df.nfix.sb <- read.subj.by.item('NFIXSB.TXT', path)
} else {
df.gzd.sb <- df.gzd
df.gzd.sb[, 4:ncol(df.gzd.sb)] <- 0
df.nfix.sb <- df.nfix
df.nfix.sb[, 4:ncol(df.nfix.sb)] <- 0
}
## make sure they align
check.alignment <- function(df) {
identical(df.ffix[, c("subj", "item", "cond")],
df[, c("subj", "item", "cond")])
}
stopifnot(check.alignment(df.single)
& check.alignment(df.gzd)
& check.alignment(df.ro)
& check.alignment(df.nfix)
& check.alignment(df.lpos)
& check.alignment(df.launch)
& check.alignment(df.ffix.pre)
& check.alignment(df.single.pre)
& check.alignment(df.gzd.pre)
& check.alignment(df.ro.pre)
& check.alignment(df.nfix.pre)
& check.alignment(df.lpos.pre)
& check.alignment(df.launch.pre)
& check.alignment(df.gzd.sb)
& check.alignment(df.nfix.sb))
## combine
df.all <- df.ffix[, c("subj", "item", "cond")] %>%
mutate(expt = expt)
for (pre.label in c("", ".pre")) {
for (measure.name in c("ffix", "single", "gzd", 'ro',
'nfix', 'lpos', 'launch')) {
for (region.num in seq(2,4)) {
source.df <- eval(parse(text=paste0("df.", measure.name, pre.label)))
df.all[, paste0(measure.name, region.num, pre.label)] <-
source.df[, region.num+3]
}
}
}
df.all$gzd3.sb <- df.gzd.sb$region2
df.all$nfix3.sb <- df.nfix.sb$region2
return(df.all)
}
df.yos <- get.all.data('Yosemite/Analysis/', F, 'yos')
df.yd <- get.all.data('YosDos/Analysis/', T, 'yd')
df.ydr <- get.all.data('YosDosReplication/Analysis/', T,
'ydr')
df.lmer <- bind_rows(df.yos, df.yd, df.ydr)
## add condition names
conds <- c("None", "Right", "None", "Left") # correspondence to numbers 1-4
df.lmer <- df.lmer %>%
filter(cond != 0) %>%
mutate(cond = factor(conds[cond],
levels = c("None", "Right", "Left")))
## create new variables
df.lmer <- df.lmer %>%
mutate(gzd3c = as.integer(gzd3 - gzd3.sb),
nfix3c = nfix3 - nfix3.sb,
single3c = ifelse(nfix3c == 1, gzd3c, NA),
ffix3c = ifelse(nfix3c == 0, NA, ffix3.pre), # only true for the data we analyze
skip3c = (nfix3c == 0),
skip2.pre = (nfix2.pre == 0),
skip3.pre = (nfix3.pre == 0),
refix3 = (nfix3 > 1),
refix3.pre = (nfix3.pre > 1),
refix3c = (nfix3c > 1),
lpos3c = ifelse(cond == "None", lpos3.pre, NA),
lpos3c = ifelse(cond == "Right", lpos3.pre - 3, lpos3c),
lpos3c = ifelse(cond == "Left", lpos3.pre + 3, lpos3c),
single2.pre = ifelse(skip2.pre | single2.pre == 0, NA, single2.pre),
ffix2.pre = ifelse(skip2.pre | ffix2.pre == 0, NA, ffix2.pre),
gzd2.pre = ifelse(skip2.pre | gzd2.pre == 0, NA, gzd2.pre))
stopifnot(with(df.lmer %>% filter(cond != "Left"),
identical(gzd3c, gzd3) &
identical(nfix3c, nfix3)))
## When eyedry excludes trials for having too long of a duration,
## it just makes them a zero. This causes a problem for
## calculating gzd3c, so we exclude these cases across the board.
df.lmer <- df.lmer %>%
filter(!(gzd3==0 & nfix3 > 0) &
!(gzd3.sb==0 & nfix3.sb > 0))
## remove extraneous dependent measures that aren't meaningful anyway
df.lmer <- df.lmer %>%
select(subj, item, cond, expt,
ffix2.pre, single2.pre, gzd2.pre, ro2.pre, nfix2.pre, lpos2.pre, launch2.pre, launch3.pre,
gzd3c, nfix3c, single3c, ffix3c, skip3c, skip2.pre, skip3.pre, refix3c, lpos3.pre, lpos3c,
nfix3.sb, lpos3) # keep these two just for a sanity check later
df.lmer <- df.lmer %>%
## exclude subjects
left_join(subj_excl, by=c("expt", "subj")) %>%
filter(to_kick == F) %>%
select(-c(to_kick)) %>%
## we're only analyzing cases where target is fixated both pre- and post-shift
filter(!skip3c) %>%
filter(skip3.pre == F) %>% # would have skipped target
filter(launch3.pre != -1) %>% # b/c we exclude skips, these are problems (e.g., short fixations)
filter(lpos3.pre != -1) %>% # would-be skips according to lpos
mutate(throwoff = (((cond == "Right") & (lpos3.pre < 4)) | ((cond == "Left") & (lpos3.pre > 4)))) %>%
filter(throwoff == F) %>%
filter(lpos3.pre != 0) %>% # display change triggered without landing on word
## get rid of problematic cases
left_join(df_exclusions, by=c("expt", "subj", "item"))
nrow(df.lmer %>% filter(!good)) / nrow(df.lmer)
df.lmer <- df.lmer %>%
filter(good == T) # exclude blinks, long fixations, and display change problems
## sanity checks
stopifnot(with(df.lmer,
(min(gzd2.pre, na.rm = TRUE) >= 80) &
(min(single3c, na.rm = TRUE) >= 80) &
(min(ffix2.pre, na.rm = TRUE) >= 80) &
(min(single2.pre, na.rm = TRUE) >= 80) &
(min(gzd3c) >= 80)
))
stopifnot(with(df.lmer,
(cond=="None" & lpos3c == lpos3) |
(cond=="Right" & lpos3c == lpos3) |
(cond=="Left" & nfix3.sb == 0 & lpos3c == lpos3) |
(cond=="Left" & nfix3.sb > 0 & lpos3c > lpos3)))
## outlier removal
remove.outliers <- function(x, width=3) {
x[x == 0] <- NA
z <- scale(x)
x[abs(z)>width] <- NA
return(x)
}
df.lmer <- df.lmer %>%
mutate(subj = paste(expt, subj)) %>%
group_by(subj) %>%
mutate(gzd3c = remove.outliers(gzd3c),
single3c = remove.outliers(single3c),
ffix2.pre = remove.outliers(ffix2.pre),
ffix3c = remove.outliers(ffix3c),
single2.pre = remove.outliers(single2.pre),
gzd2.pre = remove.outliers(gzd2.pre)) %>%
ungroup()
# how many gzd3c datapoints were excluded?
sum(is.na(df.lmer$gzd3c)) / nrow(df.lmer)
# add pretarget length
df.lmer <- df.lmer %>%
left_join(df.pretarget.length, by=c("item")) %>%
mutate(front.half.pretarget = ifelse(pretarget.length == 3, lpos2.pre > 1, NA),
front.half.pretarget = ifelse(pretarget.length == 4, lpos2.pre > 2, front.half.pretarget))
# exclude very far launch sites with little data (this only affects the full dataset analysis. the subset already restricts launch site tightly)
old_size <- nrow(df.lmer)
table(df.lmer$launch3.pre) # exclude launch sites with fewer than 20 instances -> keeping 1-11
df.lmer <- df.lmer %>%
filter(launch3.pre > 0, # this is a single case
launch3.pre < 12)
1-nrow(df.lmer)/old_size # excluding 0.8%
# (optionally) analyze tight dataset to rule out mislocated refixations, etc.
# change full_dataset to TRUE this out to generate analyses / figures for the full dataset without these restrictions
full_dataset <- TRUE
if (!full_dataset) {
cat("Dataset size before excluding multiple fixations of pretarget and fixations on back half of pretarget: ")
cat(nrow(df.lmer), "\n")
df.lmer <- df.lmer %>%
filter(!skip2.pre,
launch2.pre != -1, # b/c we exclude skips, these are problems (e.g., short fixations)
ro2.pre == 0,
nfix2.pre == 1,
front.half.pretarget)
cat("Dataset size after those exclusions: ")
cat(nrow(df.lmer), '\n')
}
saveRDS(df.lmer, file = "df.lmer.rds")
|
af829690870b3469f1f4e4cbf74dc1d5b01bed5d
|
30662119ab3ef017ec94458f58ed9ee03bceb169
|
/Plot3.R
|
45d7ce47cd9b73fd1e133dc5c073aa8763b34606
|
[] |
no_license
|
abinashi-prakash/Git
|
52ae0ed2d079e1cb46656b844d340ef939175308
|
30a56224123680b2e988141bf3e589c9156b4fb7
|
refs/heads/master
| 2020-04-27T09:12:29.739020
| 2019-05-30T05:16:17
| 2019-05-30T05:16:17
| 174,205,016
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 934
|
r
|
Plot3.R
|
# Read the file
powerdata<-read.table("household_power_consumption.txt",header = TRUE, sep= ";", na.strings = "?")
#Convert Date
powerdata$Date <- as.Date(powerdata$Date, "%d/%m/%Y")
#Select the data in the date range
powerdata2 <- subset(powerdata,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
# Combine Date and Time in a single variable
powerdata2$DateTime<-paste(powerdata2$Date, powerdata2$Time)
#Convert date time
powerdata2$DateTime <- as.POSIXct(powerdata2$DateTime)
## Create Plot 3
with(powerdata2, {
plot(Sub_metering_1~DateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~DateTime,col='Red')
lines(Sub_metering_3~DateTime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="Plot3.png", height=480, width=480)
dev.off()
|
55a008e720c3151652bb299662b51b8624d46273
|
47248e6d22eb023dbfa7be13025e7b5697676d3b
|
/R/two_class_sim.R
|
6441ddb79e6d7b747ffee24c1f46955ce859636f
|
[] |
no_license
|
markhwhiteii/stat-comp
|
9b096676beac874146c81a8e0f7c4b37286a8cbe
|
a76ab0eb1a472f0f3dcc7e9a5e2c5d271626af2d
|
refs/heads/master
| 2021-08-23T06:42:49.525748
| 2017-12-03T23:58:47
| 2017-12-03T23:58:47
| 105,834,980
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,645
|
r
|
two_class_sim.R
|
two_class_sim <- function(n, intercept, linearVars, noiseVars,
corrVars, minoritySize) {
sigma <- matrix(c(2, 1.3, 1.3, 2), 2, 2)
tmpData <- data.frame(MASS::mvrnorm(n = n, c(0, 0), sigma))
names(tmpData) <- paste("TwoFactor", 1:2, sep = "")
tmpData <- cbind(tmpData, matrix(rnorm(n * linearVars), ncol = linearVars))
colnames(tmpData)[(1:linearVars) + 2] <- paste(
"Linear", gsub(" ", "0", format(1:linearVars)), sep = ""
)
tmpData$Nonlinear1 <- runif(n, min = -1)
tmpData <- cbind(tmpData, matrix(runif(n * 2), ncol = 2))
colnames(tmpData)[(ncol(tmpData) - 1):ncol(tmpData)] <- paste(
"Nonlinear", 2:3, sep = ""
)
tmpData <- as.data.frame(tmpData)
p <- ncol(tmpData)
tmpData <- cbind(tmpData, matrix(rnorm(n * noiseVars), ncol = noiseVars))
colnames(tmpData)[(p + 1):ncol(tmpData)] <- paste(
"Noise", gsub(" ", "0", format(1:noiseVars)), sep = ""
)
lp <- intercept - 4 * tmpData$TwoFactor1 + 4 * tmpData$TwoFactor2 +
2 * tmpData$TwoFactor1 * tmpData$TwoFactor2 + (tmpData$Nonlinear1^3) +
2 * exp(-6 * (tmpData$Nonlinear1 - 0.3)^2) +
2 * sin(pi * tmpData$Nonlinear2 * tmpData$Nonlinear3)
lin <- seq(10, 1, length = linearVars)/4
lin <- lin * rep(c(-1, 1), floor(linearVars) + 1)[1:linearVars]
for (i in seq(along = lin)) lp <- lp + tmpData[, i + 3] * lin[i]
prob <- binomial()$linkinv(lp)
prob <- prob +
(minoritySize - sort(prob)[(1 - minoritySize) * length(sort(prob))])
tmpData$Class <- ifelse(prob <= runif(n), "Class1", "Class2")
tmpData$Class <- factor(tmpData$Class, levels = c("Class1", "Class2"))
tmpData
}
|
c916508abe0e51a2fea892dc5df43f012f8f38f5
|
4bb8fd8247d242a6ef4d1f71c05b5a31d10c860b
|
/man/install_packages.Rd
|
d0fd950571bcd496a276f246285f1f1017836339
|
[
"MIT"
] |
permissive
|
isabella232/crancache
|
95827afe1eb9e58b782b0cc591e6e38d23b1334b
|
7ea4e479bdf780adadd1bd421a5ca23e5f951697
|
refs/heads/master
| 2022-12-20T18:44:31.170172
| 2020-03-07T09:57:11
| 2020-03-07T09:57:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,392
|
rd
|
install_packages.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install-packages.R
\name{install_packages}
\alias{install_packages}
\title{Install Packages from Repositories or Local Files, with Caching}
\usage{
install_packages(pkgs, lib, repos = getOption("repos"),
contriburl = contrib.url(repos, type), method, available = NULL,
destdir = NULL, dependencies = NA, type = getOption("pkgType"),
...)
}
\arguments{
\item{pkgs}{character vector of the names of packages whose
current versions should be downloaded from the repositories.
If \code{repos = NULL}, a character vector of file paths.
These can be source directories or archives
or binary package archive files (as created by \command{R CMD build
--binary}). (\code{http://} and \code{file://} URLs are also
accepted and the files will be downloaded and installed from local
copies.) On a CRAN build of \R for macOS these can be \file{.tgz}
files containing binary package archives.
Tilde-expansion will be done on file paths.
If this is missing or a zero-length character vector, a listbox of
available packages is presented where possible in an interactive \R
session.}
\item{lib}{
character vector giving the library directories where to
install the packages. Recycled as needed. If missing, defaults to
the first element of \code{\link{.libPaths}()}.
}
\item{repos}{
character vector, the base URL(s) of the repositories
to use, e.g., the URL of a CRAN mirror such as
\code{"https://cloud.r-project.org"}. For more details on
supported URL schemes see \code{\link{url}}.
Can be \code{NULL} to install from local files, directories or URLs:
this will be inferred by extension from \code{pkgs} if of length one.
}
\item{contriburl}{
URL(s) of the contrib sections of the repositories. Use this
argument if your repository mirror is incomplete, e.g., because
you burned only the \file{contrib} section on a CD, or only have
binary packages. Overrides argument \code{repos}.
Incompatible with \code{type = "both"}.
}
\item{method}{
download method, see \code{\link{download.file}}. Unused if
a non-\code{NULL} \code{available} is supplied.
}
\item{available}{
a matrix as returned by \code{\link{available.packages}}
listing packages available at the repositories, or \code{NULL} when
the function makes an internal call to \code{available.packages}.
Incompatible with \code{type = "both"}.
}
\item{destdir}{
directory where downloaded packages are stored. If it is
\code{NULL} (the default) a subdirectory
\code{downloaded_packages} of the session temporary
directory will be used (and the files will be deleted
at the end of the session).
}
\item{dependencies}{logical indicating whether to also install
uninstalled packages which these packages depend on/link
to/import/suggest (and so on recursively). Not used if \code{repos
= NULL}. Can also be a character vector, a subset of
\code{c("Depends", "Imports", "LinkingTo", "Suggests", "Enhances")}.
Only supported if \code{lib} is of length one (or missing),
so it is unambiguous where to install the dependent packages. If
this is not the case it is ignored, with a warning.
The default, \code{NA}, means
\code{c("Depends", "Imports", "LinkingTo")}.
\code{TRUE} means to use
\code{c("Depends", "Imports", "LinkingTo", "Suggests")} for
\code{pkgs} and
\code{c("Depends", "Imports", "LinkingTo")} for added dependencies:
this installs all the packages needed to run \code{pkgs}, their
examples, tests and vignettes (if the package author specified them
correctly).
In all of these, \code{"LinkingTo"} is omitted for binary packages.
}
\item{type}{character, indicating the type of package to download and
install. Will be \code{"source"} except on Windows and some macOS
builds: see the section on \sQuote{Binary packages} for those.
}
\item{...}{additional arguments are passed to
\code{\link[utils:install.packages]{utils::install.packages()}}.}
}
\description{
Install Packages from Repositories or Local Files, with Caching
}
\seealso{
Other caching package management functions: \code{\link{available_packages}},
\code{\link{download_packages}},
\code{\link{update_packages}}
}
\concept{caching package management functions}
|
90fe77c456c0f31f4fc05f76ebf4e03307d548e6
|
8d8d1d24986dce6b8a56ed8bcb71ada4b4eeb2bd
|
/man/starwars.Rd
|
0fe5c06f6c2d33b1f53c2855dee68e4249292ca1
|
[
"MIT"
] |
permissive
|
schochastics/networkdata
|
edaed94b788dcd925f55ae07f8a2d8b58d45ae8e
|
535987d074d35206b6804e9c90dbfa4b50768632
|
refs/heads/master
| 2023-01-07T07:20:41.475574
| 2023-01-05T18:54:17
| 2023-01-05T18:54:17
| 226,346,857
| 142
| 17
| null | null | null | null |
UTF-8
|
R
| false
| true
| 398
|
rd
|
starwars.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-misc.R
\docType{data}
\name{starwars}
\alias{starwars}
\title{Star Wars Episode 1-7}
\format{
list of igraph objects
}
\source{
Data downloaded from https://github.com/evelinag/StarWars-social-network
}
\usage{
starwars
}
\description{
Scene Co-occurrence of Star Wars Characters (Episode 1-7)
}
\keyword{datasets}
|
46f08a6f667c02eba1256854d36b806fbecad86f
|
19cd3e2856b30c6e8d4fb6b2608122367551cd8f
|
/man/taxonomy.Rd
|
33c79c69f96dc0b02854df55d72e9b62bb2c7781
|
[] |
no_license
|
brendanf/phylotax
|
3b03469a9dadba6c6d8e4e44830e8a22989703aa
|
77fd100e26b9320a04955b32e59d7c24fa1e60e3
|
refs/heads/master
| 2021-08-07T09:03:47.573616
| 2021-03-03T07:15:19
| 2021-03-03T07:15:19
| 245,027,006
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,098
|
rd
|
taxonomy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxonomy.R
\name{taxonomy}
\alias{taxonomy}
\alias{taxonomy_dada2}
\alias{taxonomy_sintax}
\alias{taxonomy_idtaxa}
\title{Assign taxonomy to nucleotide sequences}
\usage{
taxonomy(seq, reference, method, min_confidence = 50, multithread = FALSE, ...)
taxonomy_dada2(
seq,
reference,
multithread = FALSE,
min_confidence,
tryRC = FALSE,
outputBootstraps = TRUE,
verbose = TRUE,
...
)
taxonomy_sintax(
seq,
reference,
min_confidence = NULL,
multithread = FALSE,
exec = NULL,
...
)
taxonomy_idtaxa(
seq,
reference,
multithread = FALSE,
strand = "top",
min_confidence = 40,
...
)
}
\arguments{
\item{seq}{(`character`` vector or something that can be coerced to
one, or a matrix with sequences as the column names ) Sequences to
assign taxonomy}
\item{reference}{(`character`` string giving a path to a file or the
result from \code{\link[DECIPHER:LearnTaxa]{DECIPHER::LearnTaxa()}}/\code{\link[=train_idtaxa]{train_idtaxa()}}) An appropriately
formatted reference database (see Details).}
\item{method}{(`character`` string) taxonomy assignment method.
Currently accepted values are "dada2", "sintax", and "idtaxa".}
\item{min_confidence}{(`integer`` between 0 and 100) The minimum
confidence to report results.}
\item{multithread}{(\code{integer} scalar) the number of processors to use
for assignment.}
\item{...}{additional arguments for methods}
\item{tryRC}{(\code{logical} scalar) passed on to \code{dada2::assignTaxonomy()}}
\item{outputBootstraps}{(\code{logical} scalar) passed on to \code{dada2::assignTaxonomy()}}
\item{verbose}{(\code{logical} scalar) passed on to \code{dada2::assignTaxonomy()}}
\item{exec}{(\code{character} string) name of the executable to use for
SINTAX search. The default is "vsearch", but "usearch" should also
work. In either case, the executable should be installed and on the
system path.}
\item{strand}{(\code{character} string) passed on to \code{DECIPHER::IdTaxa()}}
}
\value{
raw results of the taxonomy assignment, of various types depending on
\code{method}.
}
\description{
This method uses a common interface to call primary taxonomic assignment
algorithms (i.e., those which assign taxonomy based on a taxonomically
classified reference sequence database, but not based on the results of other
algorithms) from other R packages or external programs.
}
\section{Return types}{
\itemize{
\item \code{taxonomy_dada2} and \code{taxonomy(..., method = "dada2")} return a \code{list}
with elements "tax" and "boot", as \link[dada2:assignTaxonomy]{dada2::assignTaxonomy}.
\item \code{taxonomy_sintax} and \code{taxonomy(..., method = "sintax")} return a
\code{data.frame} with columns "label", "hit", "strand", and, if
\code{min_confidence} is not given, "c12n" (short for "classification").
\item \code{taxonomy_idtaxa} and \code{taxonomy(..., method = "idtaxa")} gives
an S4 object of classes "Taxa" and "Train".
Any of these can be passed to \link{taxtable} to get a uniform format suitable
for use in \link{phylotax}.
}
}
|
5d737f047b3eef11753d2da9297a6ebb7f36ffeb
|
09c8994f16ce0065502d36d64eb78c2c8a7e8875
|
/asd-predictors-results.R
|
e7c30f69c8bdc24edd9f8d45abead93f2dad1fe1
|
[] |
no_license
|
hyeyeon-hwang/machine-learning-asd-predictors
|
bd7f32e9506ff16020699d5f4b2e997f646cedfa
|
c244ecf016e44f5e6c00fe15e6ac4b2d186a8f47
|
refs/heads/master
| 2020-03-29T20:18:35.823797
| 2019-12-05T17:23:50
| 2019-12-05T17:23:50
| 150,305,496
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,686
|
r
|
asd-predictors-results.R
|
source("asd-predictors.R")
library(knitr)
library(kableExtra)
# Random Forest Model Results ---------------------------------------------
rDmrResult <- runFunctions(rDmr, p = 0.8, pos = "Rett")
dDmrResult <- runFunctions(dDmr, p = 0.8, pos = "Dup15q")
aDmrResult <- runFunctions(aDmr, p = 0.8, pos = "ASD")
pDmrResult <- runFunctions(pDmr, p = 0.8, pos = "idiopathic_autism")
# pDmrCBResult <- runFunctions(pDmrCB)
# confusion matrix tables using kabble
cmTable <- function(dmrResult) {
dmrResult$confMat$table %>%
kable() %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), font_size = 15) %>%
add_header_above(header = c("Confusion Matrix" = 3), align = "c")
}
cmTable(rDmrResult)
cmTable(dDmrResult)
cmTable(aDmrResult)
cmTable(pDmrResult)
# predicted probabilities tables using kabble
cmProb <- function(dmrResult) {
dmrResult$probPreds %>%
as.tibble() %>%
add_column("Sample" = c(1:nrow(dmrResult$probPreds)), .before = 1) %>%
kable() %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"), font_size = 15) %>%
add_header_above(header = c("Predicted Probabilities" = 3), align = "c")
}
cmProb(rDmrResult)
cmProb(dDmrResult)
cmProb(aDmrResult)
cmProb(pDmrResult)
# summary of results function
sumRes <- function(dmrResult, caption) {
a <- paste("Model", dmrResult$rfModel$modelInfo$label, sep = ": ")
b <- paste("Disorder", dmrResult$confMat$positive, sep = ": ")
c <- paste(a, b, sep = ", ")
ntrain <- dim(dmrResult$rfModel$trainingData)[1]
ntest <- nrow(dmrResult$probPreds)
npred <- dim(dmrResult$rfModel$trainingData)[2] - 1
sumTable <- tibble(Measure = as.character(), Value = as.numeric()) %>%
add_row(Measure = "Number of Samples in Training Data", Value = round(ntrain)) %>%
add_row(Measure = "Number of Samples in Testing Data", Value = ntest) %>%
add_row(Measure = "Number of Predictors", Value = npred) %>%
add_row(Measure = "Accuracy", Value = dmrResult$confMat$overall["Accuracy"]) %>%
add_row(Measure = "Kappa", Value = dmrResult$confMat$overall["Kappa"]) %>%
add_row(Measure = "Accuracy P Value (Acc > NIR)", Value = dmrResult$confMat$overall["AccuracyPValue"]) %>%
add_row(Measure = "Sensitivity", Value = dmrResult$confMat$byClass["Sensitivity"]) %>%
add_row(Measure = "Specificity", Value = dmrResult$confMat$byClass["Specificity"]) %>%
add_row(Measure = "Positive Predictive Values", Value = dmrResult$confMat$byClass["Pos Pred Value"]) %>%
add_row(Measure = "Negative Predictive Values", Value = dmrResult$confMat$byClass["Neg Pred Value"]) %>%
kable(caption = paste(c, caption, sep = " - ")) %>%
kable_styling() %>%
column_spec(1:2, color = "black") %>%
add_header_above(header = c("Summarized results from classification algorithm" = 2),
align = "c")
return(sumTable)
}
rf_rett_table <- sumRes(rDmrResult, caption = "")
rf_rett_table
rf_dup_table <- sumRes(dDmrResult, caption = "")
rf_dup_table
rf_asd_table <- sumRes(aDmrResult, caption = "")
rf_asd_table
rf_plac_table <- sumRes(pDmrResult, caption = "")
rf_plac_table
# Feature Selection - Remove highly correlated variables ------------------
# run after removing highly correlated variables
rDmr_noHC_90 <- removeHighCor(rDmr, cutoffValue = 0.90) #10 samples, 2884 predictors, acc = 0.925, 1
rDmr_noHC_80 <- removeHighCor(rDmr, cutoffValue = 0.80) #10 samples, 460 predictors, acc = 0.2027, 0
rDmr_noHC_70 <- removeHighCor(rDmr, cutoffValue = 0.70)
dDmr_noHC_90 <- removeHighCor(dDmr, cutoffValue = 0.90) #8 samples, 1196 predictors, acc = 0.769, 0.5
dDmr_noHC_80 <- removeHighCor(dDmr, cutoffValue = 0.80) #8 samples, 116 predictors, acc = 0.0694, 0
dDmr_noHC_70 <- removeHighCor(dDmr, cutoffValue = 0.70)
aDmr_noHC_90 <- removeHighCor(aDmr, cutoffValue = 0.90) #22 samples, 470 predictors, acc = 0.849, 1
aDmr_noHC_80 <- removeHighCor(aDmr, cutoffValue = 0.80) #22 samples, 443 predictors, acc = 0.87, 1
aDmr_noHC_70 <- removeHighCor(aDmr, cutoffValue = 0.70)
pDmr_noHC_90 <- removeHighCor(pDmr, cutoffValue = 0.90)
pDmr_noHC_80 <- removeHighCor(pDmr, cutoffValue = 0.80)
pDmr_noHC_70 <- removeHighCor(pDmr, cutoffValue = 0.70)
pDmr_noHC_75 <- removeHighCor(pDmr, cutoffValue = 0.75)
pDmr_noHC_79 <- removeHighCor(pDmr, cutoffValue = 0.79)
pDmr_noHC_60 <- removeHighCor(pDmr, cutoffValue = 0.60)
# random forest models on data after removing highly correlated variables
rDmrResult_noHC_90 <- runFunctions(rDmr_noHC_90, p = 0.8, pos = "Rett")
rDmrResult_noHC_80 <- runFunctions(rDmr_noHC_80, p = 0.8, pos = "Rett")
rDmrResult_noHC_70 <- runFunctions(rDmr_noHC_70, p = 0.8, pos = "Rett")
dDmrResult_noHC_90 <- runFunctions(dDmr_noHC_90, p = 0.8, pos = "Dup15q")
dDmrResult_noHC_80 <- runFunctions(dDmr_noHC_80, p = 0.8, pos = "Dup15q")
dDmrResult_noHC_70 <- runFunctions(dDmr_noHC_70, p = 0.8, pos = "Dup15q")
aDmrResult_noHC_90 <- runFunctions(aDmr_noHC_90, p = 0.8, pos = "ASD")
aDmrResult_noHC_80 <- runFunctions(aDmr_noHC_80, p = 0.8, pos = "ASD")
aDmrResult_noHC_70 <- runFunctions(aDmr_noHC_70, p = 0.8, pos = "ASD")
# error, no predictors for 0.90, 0.80 cutoffs
pDmrResult_noHC_90 <- runFunctions(pDmr_noHC_90, p = 0.8, pos = "idiopathic_autism") #no predictors
pDmrResult_noHC_80 <- runFunctions(pDmr_noHC_80, p = 0.8, pos = "idiopathic_autism") #no predictors
pDmrResult_noHC_70 <- runFunctions(pDmr_noHC_70, p = 0.8, pos = "idiopathic_autism") # less 1, 286 predictors
pDmrResult_noHC_75 <- runFunctions(pDmr_noHC_75, p = 0.8, pos = "idiopathic_autism") # less 1, 286 predictors
pDmrResult_noHC_79 <- runFunctions(pDmr_noHC_79, p = 0.8, pos = "idiopathic_autism") # less 1, 286 predictors
pDmrResult_noHC_60 <- runFunctions(pDmr_noHC_60, p = 0.8, pos = "idiopathic_autism") # 270 predictors
# generate results table
rf_rett_hc90 <- sumRes(rDmrResult_noHC_90, caption = "0.90 highly correlated variables removed")
rf_rett_hc80 <- sumRes(rDmrResult_noHC_80, caption = "0.80 highly correlated variables removed")
rf_rett_hc70 <- sumRes(rDmrResult_noHC_70, caption = "0.70 highly correlated variables removed")
rf_rett_hc90
rf_rett_hc80
rf_rett_hc70
rf_dup_hc90 <- sumRes(dDmrResult_noHC_90, caption = "0.90 highly correlated variables removed")
rf_dup_hc80 <- sumRes(dDmrResult_noHC_80, caption = "0.80 highly correlated variables removed")
rf_dup_hc70 <- sumRes(dDmrResult_noHC_70, caption = "0.70 highly correlated variables removed")
rf_dup_hc90
rf_dup_hc80
rf_dup_hc70
rf_asd_hc90 <- sumRes(aDmrResult_noHC_90, caption = "0.90 highly correlated variables removed")
rf_asd_hc80 <- sumRes(aDmrResult_noHC_80, caption = "0.80 highly correlated variables removed")
rf_asd_hc70 <- sumRes(aDmrResult_noHC_70, caption = "0.70 highly correlated variables removed")
rf_asd_hc90
rf_asd_hc80
rf_asd_hc70
rf_plac_hc70 <- sumRes(pDmrResult_noHC_70, caption = "0.70 to 0.79 highly correlated variables removed")
rf_plac_hc60 <- sumRes(pDmrResult_noHC_60, caption = "0.60 highly correlated variables removed")
rf_plac_hc70
rf_plac_hc60
# Neural Network Model Results --------------------------------------------
NNsumRes <- function(dmrResult, caption) {
a <- paste("Model", dmrResult$nnModel$modelInfo$label, sep = ": ")
b <- paste("Disorder", dmrResult$confMat$positive, sep = ": ")
c <- paste(a, b, sep = ", ")
ntrain <- dim(dmrResult$nnModel$trainingData)[1]
ntest <- nrow(dmrResult$probPreds)
npred <- dim(dmrResult$nnModel$trainingData)[2] - 1
sumTable <- tibble(Measure = as.character(), Value = as.numeric()) %>%
add_row(Measure = "Number of Samples in Training Data", Value = round(ntrain)) %>%
add_row(Measure = "Number of Samples in Testing Data", Value = ntest) %>%
add_row(Measure = "Number of Predictors", Value = npred) %>%
add_row(Measure = "Accuracy", Value = dmrResult$confMat$overall["Accuracy"]) %>%
add_row(Measure = "Kappa", Value = dmrResult$confMat$overall["Kappa"]) %>%
add_row(Measure = "Accuracy P Value (Acc > NIR)", Value = dmrResult$confMat$overall["AccuracyPValue"]) %>%
add_row(Measure = "Sensitivity", Value = dmrResult$confMat$byClass["Sensitivity"]) %>%
add_row(Measure = "Specificity", Value = dmrResult$confMat$byClass["Specificity"]) %>%
add_row(Measure = "Positive Predictive Values", Value = dmrResult$confMat$byClass["Pos Pred Value"]) %>%
add_row(Measure = "Negative Predictive Values", Value = dmrResult$confMat$byClass["Neg Pred Value"]) %>%
kable(caption = paste(c, caption, sep = " - ")) %>%
kable_styling() %>%
column_spec(1:2, color = "black") %>%
add_header_above(header = c("Summarized results from classification algorithm" = 2),
align = "c")
return(sumTable)
}
NNrunFunctions(rDmr_noHC_90, p = 0.8, pos = "Rett") # didn't converge, too many weights warnings
nn_rDmrResult_noHC_80 <- NNrunFunctions(rDmr_noHC_80, p = 0.8, pos = "Rett") # converged, too many weights warnings
nn_rDmrResult_noHC_70 <- NNrunFunctions(rDmr_noHC_70, p = 0.8, pos = "Rett") # worked
nn_rett_hc80 <- NNsumRes(nn_rDmrResult_noHC_80, caption = "0.80 highly correlated variables removed, too many weights warnings" )
nn_rett_hc70 <- NNsumRes(nn_rDmrResult_noHC_70, caption = "0.70 highly correlated variables removed" )
nn_rett_hc80
nn_rett_hc70
nn_dDmrResult_noHC_90 <- NNrunFunctions(dDmr_noHC_90, p = 0.8, pos = "Dup15q") # didn't converge, too many weights warnings
nn_dDmrResult_noHC_80 <- NNrunFunctions(dDmr_noHC_80, p = 0.8, pos = "Dup15q") # converged, error, subscript out of bounds
nn_dDmrResult_noHC_70 <- NNrunFunctions(dDmr_noHC_70, p = 0.8, pos = "Dup15q") # converged, error, subscript out of bounds
nn_aDmrResult_noHC_90 <- NNrunFunctions(aDmr_noHC_90, p = 0.8, pos = "ASD") # converged, error, subscript, weights warnings
nn_aDmrResult_noHC_80 <- NNrunFunctions(aDmr_noHC_80, p = 0.8, pos = "ASD") # converged, error, subscript, weights warnings
nn_aDmrResult_noHC_70 <- NNrunFunctions(aDmr_noHC_70, p = 0.8, pos = "ASD") # converged, error, subscript, weights warnings
nn_pDmrResult_noHC_70 <- NNrunFunctions(pDmr_noHC_70, p = 0.8, pos = "idiopathic_autism") # converged, error, subscript, weights warnings
nn_pDmrResult_noHC_60 <- NNrunFunctions(pDmr_noHC_60, p = 0.8, pos = "idiopathic_autism") # converged, error, subscript, weights warnings
nn_rett_hc80 <- NNsumRes(nn_rDmrResult_noHC_80, caption = "0.80 highly correlated variables removed, too many weights warnings")
nn_rett_hc70 <- NNsumRes(nn_rDmrResult_noHC_70, caption = "0.70 highly correlated variables removed")
nn_rett_hc80
nn_rett_hc70
nn_dup_hc80 <- NNsumRes(nn_dDmrResult_noHC_80, caption = "0.80 highly correlated variables removed")
nn_dup_hc70 <- NNsumRes(nn_dDmrResult_noHC_70, caption = "0.70 highly correlated variables removed")
nn_dup_hc80
nn_dup_hc70
nn_asd_hc90 <- NNsumRes(nn_aDmrResult_noHC_80, caption = "0.90 highly correlated variables removed, too many weights warnings")
nn_asd_hc80 <- NNsumRes(nn_aDmrResult_noHC_80, caption = "0.80 highly correlated variables removed, too many weights warnings")
nn_asd_hc70 <- NNsumRes(nn_aDmrResult_noHC_70, caption = "0.70 highly correlated variables removed, too many weights warnings")
nn_asd_hc90
nn_asd_hc80
nn_asd_hc70
nn_plac_hc70 <- NNsumRes(nn_pDmrResult_noHC_70, caption = "0.70 highly correlated variables removed, too many weights warnings")
nn_plac_hc60 <- NNsumRes(nn_pDmrResult_noHC_60, caption = "0.60 highly correlated variables removed, too many weights warnings")
nn_plac_hc70
nn_plac_hc60
# ran for more than 10 min
# Error: Stopping
# In addition: There were 50 or more warnings (use warnings() to see the first 50)
# > warnings()
# Warning messages:
# 1: model fit failed for Fold1.Rep01: size=1, decay=0e+00 Error in nnet.default(x, y, w, entropy = TRUE, ...) :
# too many (4644) weights
NNrunFunctions(rDmr, p = 0.8, pos = "Rett") # warnings, too many weights, data set too large
NNrunFunctions(rDmr_vi$sixty) # 10 samples, 28 predictors, size = 1, decay = 0.1, acc = 1, 1
NNrunFunctions(rDmr_vi$seventy) # 10 samples, 7 predictors, size = 1, decay = 0.1, acc = 1, 1
NNrunFunctions(rDmr_vi$eighty) # 10 samples, 7 predictors, size = 1, decay = 0.1, acc = 1, 1
NNrunFunctions(rDmr_vi$ninety) # 10 samples, 4 predictors, size = 1, decay = 0.1, acc = 1, 1
NNrunFunctions(dDmr_vi$sixty) # 8 samples, 32 predictors, size = 1, decay = 0.1, acc = 1, 1
NNrunFunctions(dDmr_vi$seventy) # 8 samples, 9 predictors, size = 1, decay = 0.1, acc = 1, 0.5
NNrunFunctions(dDmr_vi$eighty) # 8 samples, 9 predictors, size = 1, decay = 0.1, acc = 1, 0.5
NNrunFunctions(dDmr_vi$ninety) # 8 samples, 6 predictors, size = 1, decay = 0.1, acc = 1, 0.5
NNrunFunctions(aDmr_vi$sixty) # 22 samples, 4 predictors, size = 3, decay = 0.1, acc = 0.9083, 0.6
NNrunFunctions(aDmr_vi$seventy) # 22 samples, 1 predictors, size = 5, decay = 0.85059, acc = 1, 1
NNrunFunctions(aDmr_vi$eighty) # 22 samples, 1 predictors, size = 5, decay = 0.8505952, acc = 1, 1
NNrunFunctions(aDmr_vi$ninety) # 22 samples, 1 predictors, size = 1, decay = 0.8505952, acc = 1, 1
# Feature Selection - Variable Importance ---------------------------------
# run after selecting important variables
cutoff_vi <- c(60, 70, 80, 90)
rDmr_vi <- list()
rDmr_vi$sixty <- selectImpVar(rDmr, rDmrResult$rfModel, cutoffValue = 60)
rDmr_vi$seventy <- selectImpVar(rDmr, rDmrResult$rfModel, cutoffValue = 70) # accuracy: 1 -> 0.5
rDmr_vi$eighty <- selectImpVar(rDmr, rDmrResult$rfModel, cutoffValue = 80) # accuracy: 1 -> 0.5
rDmr_vi$ninety <- selectImpVar(rDmr, rDmrResult$rfModel, cutoffValue = 90)
dDmr_vi <- list()
dDmr_vi$sixty <- selectImpVar(dDmr, dDmrResult$rfModel, cutoffValue = 60)
dDmr_vi$seventy <- selectImpVar(dDmr,dDmrResult$rfModel, cutoffValue = 70) # accuracy: 1 -> 1
dDmr_vi$eighty <- selectImpVar(dDmr, dDmrResult$rfModel, cutoffValue = 80) # accuracy: 1 -> 1
dDmr_vi$ninety <- selectImpVar(dDmr, dDmrResult$rfModel, cutoffValue = 90)
aDmr_vi <- list()
aDmr_vi$sixty <- selectImpVar(aDmr, aDmrResult$rfModel, cutoffValue = 60)
aDmr_vi$seventy <- selectImpVar(aDmr,aDmrResult$rfModel, cutoffValue = 70) # accuracy: 0.8 -> 1
aDmr_vi$eighty <- selectImpVar(aDmr, aDmrResult$rfModel, cutoffValue = 80) # accuracy: 0.8 -> 1
aDmr_vi$ninety <- selectImpVar(aDmr, aDmrResult$rfModel, cutoffValue = 90)
pDmr_vi <- list()
pDmr_vi$sixty <- selectImpVar(pDmr, pDmrResult$rfModel, cutoffValue = 60)
pDmr_vi$seventy <- selectImpVar(pDmr,pDmrResult$rfModel, cutoffValue = 70) # accuracy: 0.8 -> 1
pDmr_vi$eighty <- selectImpVar(pDmr, pDmrResult$rfModel, cutoffValue = 80) # accuracy: 0.8 -> 1
pDmr_vi$ninety <- selectImpVar(pDmr, pDmrResult$rfModel, cutoffValue = 90)
rf_rDmrResult_vi_60 <- runFunctions(rDmr_vi$sixty, p = 0.8, pos = "Rett")
rf_rDmrResult_vi_70 <- runFunctions(rDmr_vi$seventy, p = 0.8, pos = "Rett")
rf_rDmrResult_vi_80 <- runFunctions(rDmr_vi$eighty, p = 0.8, pos = "Rett") # 1 predictor
rf_rDmrResult_vi_90 <- runFunctions(rDmr_vi$ninety, p = 0.8, pos = "Rett") # 1 predictor
rf_dDmrResult_vi_60 <- runFunctions(dDmr_vi$sixty, p = 0.8, pos = "Dup15q")
rf_dDmrResult_vi_70 <- runFunctions(dDmr_vi$seventy, p = 0.8, pos = "Dup15q")
rf_dDmrResult_vi_80 <- runFunctions(dDmr_vi$eighty, p = 0.8, pos = "Dup15q")
rf_dDmrResult_vi_90 <- runFunctions(dDmr_vi$ninety, p = 0.8, pos = "Dup15q") # 1 predictor
rf_aDmrResult_vi_60 <- runFunctions(aDmr_vi$sixty, p = 0.8, pos = "ASD") # 1 predictor
rf_aDmrResult_vi_70 <- runFunctions(aDmr_vi$seventy, p = 0.8, pos = "ASD") # 1 predictor
rf_aDmrResult_vi_80 <- runFunctions(aDmr_vi$eighty, p = 0.8, pos = "ASD") # 1 predictor
rf_aDmrResult_vi_90 <- runFunctions(aDmr_vi$ninety, p = 0.8, pos = "ASD") # 1 predictor
rf_pDmrResult_vi_60 <- runFunctions(pDmr_vi$sixty, p = 0.8, pos = "idiopathic_autism")
rf_pDmrResult_vi_70 <- runFunctions(pDmr_vi$seventy, p = 0.8, pos = "idiopathic_autism")
rf_pDmrResult_vi_80 <- runFunctions(pDmr_vi$eighty, p = 0.8, pos = "idiopathic_autism")
rf_pDmrResult_vi_90 <- runFunctions(pDmr_vi$ninety, p = 0.8, pos = "idiopathic_autism") # 1 predictor
vi60pred <- colnames(rDmr_vi$sixty[-1])
rf_rett_vi60 <- sumRes(rf_rDmrResult_vi_60, caption = "variable importance 60")
rf_rett_vi70 <- sumRes(rf_rDmrResult_vi_70, caption = "variable importance 70")
rf_rett_vi80 <- sumRes(rf_rDmrResult_vi_80, caption = "variable importance 80")
rf_rett_vi90 <- sumRes(rf_rDmrResult_vi_90, caption = "variable importance 90")
rf_rett_vi60
rf_rett_vi70
rf_rett_vi80
rf_rett_vi90
rf_dup_vi60 <- sumRes(rf_dDmrResult_vi_60, caption = "variable importance 60")
rf_dup_vi70 <- sumRes(rf_dDmrResult_vi_70, caption = "variable importance 70")
rf_dup_vi80 <- sumRes(rf_dDmrResult_vi_80, caption = "variable importance 80")
rf_dup_vi90 <- sumRes(rf_dDmrResult_vi_90, caption = "variable importance 90")
rf_dup_vi60
rf_dup_vi70
rf_dup_vi80
rf_dup_vi90
rf_asd_vi60 <- sumRes(rf_aDmrResult_vi_60, caption = "variable importance 60")
rf_asd_vi70 <- sumRes(rf_aDmrResult_vi_70, caption = "variable importance 70")
rf_asd_vi80 <- sumRes(rf_aDmrResult_vi_80, caption = "variable importance 80")
rf_asd_vi90 <- sumRes(rf_aDmrResult_vi_90, caption = "variable importance 90")
rf_asd_vi60
rf_asd_vi70
rf_asd_vi80
rf_asd_vi90
rf_plac_vi60 <- sumRes(rf_pDmrResult_vi_60, caption = "variable importance 60")
rf_plac_vi70 <- sumRes(rf_pDmrResult_vi_70, caption = "variable importance 70")
rf_plac_vi80 <- sumRes(rf_pDmrResult_vi_80, caption = "variable importance 80")
rf_plac_vi90 <- sumRes(rf_pDmrResult_vi_90, caption = "variable importance 90")
rf_plac_vi60
rf_plac_vi70
rf_plac_vi80
rf_plac_vi90
rDmr_vi_Result <- lapply(rDmr_vi, runFunctions)
dDmr_vi_Result <- lapply(dDmr_vi, runFunctions)
# ∨ warnings() 50: In randomForest.default(x, y, mtry = param$mtry, ...) :invalid mtry: reset to within valid range
aDmr_vi_Result <- lapply(aDmr_vi, runFunctions)
# aDmrResult_vi <- runFunctions(aDmr_vi$sixty), no error
# aDmrResult_vi <- runFunctions(aDmr_vi$seventy), eighty, ninety -> only 1 predictor
# mtry warning: Tuning parameter 'mtry' was held constant at a value of 2
# Compare models and accuracy before and after variable importance
rDmrResult$rfModel$results$Accuracy
rDmrResult$confMat$overall["Accuracy"]
# 28 predictors, accuracy = 1
rDmr_vi_Result$sixty$rfModel$results$Accuracy
rDmr_vi_Result$sixty$confMat$overall["Accuracy"]
# 7 predictors, accuracy = 1
rDmr_vi_Result$seventy$rfModel$results$Accuracy
rDmr_vi_Result$seventy$confMat$overall["Accuracy"]
# 7 predictors, accuracy = 1
rDmr_vi_Result$eighty$rfModel$results$Accuracy
rDmr_vi_Result$eighty$confMat$overall["Accuracy"]
# 4 predictors, accuracy = 1
rDmr_vi_Result$ninety$rfModel$results$Accuracy
rDmr_vi_Result$ninety$confMat$overall["Accuracy"]
# ROC curve, variable importance
|
73d2317de75fddefc4a6c6e983b9aa34f3af3889
|
12e3d5f8618bbc113e6f039b7346fc5d723015c9
|
/Stats_II/Class2/Class 2 - In Class Project.R
|
ae4a59e4efa5745a872635a75346306fbb6d6eec
|
[] |
no_license
|
raschroeder/R-Coursework
|
4af2ded6e9af2c0c64697dcc796a12e508f38ae4
|
1e9800b00f84cb4092c956d9910a710729b9aff3
|
refs/heads/master
| 2020-04-05T12:44:28.824912
| 2019-02-06T15:59:07
| 2019-02-06T15:59:07
| 156,878,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,098
|
r
|
Class 2 - In Class Project.R
|
##########################################################
######## Class 2 - In class Assignment ###################
##########################################################
#Load Libraries you will need
library(car) #graph data
library(ppcor) #part corr
#Set working directory (you need to change this)
setwd('/Users/rachel/Box\ Sync/R\ Coursework/Stats_II/Class2')
# Import this data file
#### Note: Dataset give to me by Amanda Roy
Class2.Data<-read.csv('Class2InClassData.csv')
head(Class2.Data)
################ Variables of the day
# idnum = ID of subject
# gender = 1 = male, 2 = female [nominal]
# age = age of person
# Health = from 0 to 10 (most healthy today) [we are going to pretend its interval]
# Yrsmar = Number of years Married [interval]
###############################################################
# Question: Is the longer you are married the healthier you are, but we need to control for age!!
################################################################
################################
########### Task 1: ###########
################################
## calcuate the semipartial and partial correlation
#zero-order correlation between Health and Yrsmar
Corr.Result.1<-cor.test(Class2.Data$Health, Class2.Data$Yrsmar,
method = c("pearson"))
Corr.Result.1
# Control for Age (SP)
CorrSP.1<-spcor.test(Class2.Data$Health, Class2.Data$Yrsmar, Class2.Data$age)
CorrSP.1
# Control for Age (P)
CorrP.1<-pcor.test(Class2.Data$Health, Class2.Data$Yrsmar, Class2.Data$age)
CorrP.1
#overall relationship didn't change much - there is no relationship between years and health when you've controlled for age
################################
########### Task 2: ###########
################################
# Run the regression, Health~Yrsmar and a second regression controling for age.
M.Model.1<-lm(Health~Yrsmar, data = Class2.Data)
M.Model.2<-lm(Health~Yrsmar+age, data = Class2.Data)
M.Model.1
M.Model.2
## Now control for age
# remember to control, you can residualize age form Yrsmar.
# Then rerun your regression using the residualized Yrsmar
# Also plot health by residualized Yrsmar
Class2.Data$Age.from.Yrsmar<-residuals(lm(Yrsmar~age, Class2.Data))
M.Model.3<-lm(Health~Age.from.Yrsmar, data = Class2.Data)
summary(M.Model.3)
scatterplot(Health~Age.from.Yrsmar,Class2.Data, smoother=FALSE)
#The relationship is gone now - the estimate is now only 0.008
################################
########### Task 3: ###########
################################
# you can just enter both Yrsmar and age into one lm model!
M.Model.2<-lm(Health~Yrsmar+age, data = Class2.Data)
summary(M.Model.2)
CorrSP.1<-spcor.test(Class2.Data$Health, Class2.Data$Yrsmar, Class2.Data$age)
CorrSP.1$estimate^2
CorrSP.2<-spcor.test(Class2.Data$Health, Class2.Data$age, Class2.Data$Yrsmar)
CorrSP.2$estimate^2
################################
########### Task 4: ###########
################################
#Does years of marriage have anything to do with health once we control for age? NO
# Why?
################################
########### Task 5: ###########
################################
# Let do the whole process again but seperarly for males and females
Class2.Males<-subset(Class2.Data, gender==1)
Class2.Females<-subset(Class2.Data, gender==2)
#zero-order corelation between Health and yearsmar
# Males
Corr.Result.2<-cor.test(Class2.Males$Health, Class2.Males$Yrsmar,
method = c("pearson"))
Corr.Result.2
# Females
Corr.Result.3<-cor.test(Class2.Females$Health, Class2.Females$Yrsmar,
method = c("pearson"))
Corr.Result.3
# Control for Age (SP)
# Males
# Females
# Run 2 LM Models: Yrsmar+age
# Males
# Females
# Bootstrap the .95BCa CI for each gender
library(boot)
# Males
# Females
################################
########### Task 6: ###########
################################
# 1. Does the bootstraping match the conclusions from the pvalues for each gender model?
# 2. Compare the coef from each gender and explain in plain english what the results suggest
|
2f1748a5042e4f172daf89df16dc1b177e073300
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/magic/examples/latin.Rd.R
|
4e869296f0dd4fab300221505244116ceaf3868d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
latin.Rd.R
|
library(magic)
### Name: latin
### Title: Random latin squares
### Aliases: latin incidence is.incidence is.incidence.improper unincidence
### inc_to_inc another_latin another_incidence rlatin
### Keywords: array
### ** Examples
rlatin(5)
rlatin(n=2, size=4, burnin=10)
# An example that allows one to optimize an objective function
# [here f()] over latin squares:
gr <- function(x){ another_latin(matrix(x,7,7)) }
set.seed(0)
index <- sample(49,20)
f <- function(x){ sum(x[index])}
jj <- optim(par=as.vector(latin(7)), fn=f, gr=gr, method="SANN", control=list(maxit=10))
best_latin <- matrix(jj$par,7,7)
print(best_latin)
print(f(best_latin))
#compare starting value:
f(circulant(7))
|
f65de342aca34f96706e2f7beb8b4401612833ca
|
7ecbd1e903a5ac6781fd401f1ce6ffc2ef6323a5
|
/demo/example2.R
|
42f43288078f9b1267be728b04a1cb96d7cdbff0
|
[] |
no_license
|
jtilly/knitroR
|
84db064b0053d3bd8c8ecfdc47f82c83bcb63d3c
|
3a1bdb8bff7aab287b22f7af23b1053b01c89ab8
|
refs/heads/master
| 2021-01-18T22:48:12.248719
| 2016-11-09T17:07:37
| 2016-11-09T17:07:37
| 28,303,235
| 3
| 2
| null | 2017-11-17T19:00:49
| 2014-12-21T15:09:11
|
R
|
UTF-8
|
R
| false
| false
| 1,426
|
r
|
example2.R
|
# Example with two inequality constraints and one upper bound
#
# min 100 (x2 - x1^2)^2 + (1 - x1)^2
# s.t. x1 x2 >= 1
# x1 + x2^2 >= 0
# x1 <= 0.5
#
# The standard start point (-2, 1) usually converges to the standard
# minimum at (0.5, 2.0), with final objective = 306.5.
# Sometimes the solver converges to another local minimum
# at (-0.79212, -1.26243), with final objective = 360.4.
#
# The problem comes from Hock and Schittkowski, HS15
library(knitroR)
# define the objective function
objFun = function(x) {
100*(x[2] - x[1]^2)^2 + (1-x[1])^2
}
# define the inequality constraint
c_inequality = function(x) {
return( c( 1- x[1] * x[2], -x[1] - x[2]^2) )
}
ub = c(0.5, 1e20);
# define starting values
x0 = c(-2, 1)
results1 = knitro(objFun=objFun, c_inequality = c_inequality, ub=ub, x0=x0, options="options.opt")
# define objective function gradient
objGrad = function(x) {
grad = vector(mode="numeric", length=2)
grad[1] = (-400.0 * (x[2] - x[1]^2) * x[1]) - (2.0 * (1.0 - x[1]))
grad[2] = 200.0 * (x[2] - x[1]^2)
return(grad)
}
# define the jacobian
jac = function(x) {
jac = matrix(0,nrow=2,ncol=2)
jac[1,1] = -x[2]
jac[1,2] = -x[1]
jac[2,1] = -1.0
jac[2,2] = -2.0 * x[2]
return(jac)
}
results2 = knitro(objFun=objFun, c_inequality = c_inequality,
objGrad=objGrad, jac=jac, ub=ub, x0=x0, options="options.opt")
|
494288944fca2df9cb3943763aeb4aa47e2efbc1
|
034d0b59cc9a5c36c47d28bf8e3adb0c523bb8a2
|
/StitchingDays.R
|
dbc8e1fd6a10a484aae3e8bf5ee5c351ba8f916e
|
[] |
no_license
|
abhie19/IN-IU
|
7c1ddb07803e1e3b4b18f03d652955464e651220
|
79562b016f8c01e1be657135589a860316670005
|
refs/heads/master
| 2021-01-19T04:14:54.047206
| 2016-07-16T02:39:29
| 2016-07-16T02:39:29
| 63,461,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,071
|
r
|
StitchingDays.R
|
day1 = read.csv("/Users/Abhishek/Downloads/01.csv", header = FALSE, stringsAsFactors=FALSE)
colnames(day1) <- c("start_time", "end_time" ,"some1", "s_ip", "d_ip", "s_port", "d_port", "protocol", "flag", "some2", "some3", "flows", "size", "some4", "some5", "some6", "some7", "some8", "some9")
sapply(day1,class)
day2 = read.csv("/Users/Abhishek/Downloads/02.csv", header = FALSE, stringsAsFactors=FALSE)
colnames(day2) <- c("start_time", "end_time" ,"some1", "s_ip", "d_ip", "s_port", "d_port", "protocol", "flag", "some2", "some3", "flows", "size", "some4", "some5", "some6", "some7", "some8", "some9")
# See data type
#sapply(day1,class)
# sapply(day2,class)
day1$start_time <- strptime(x = as.character( day1$start_time ), format = "%Y-%m-%d %H:%M:%S")
day1$end_time <- strptime(x = as.character( day1$end_time ), format = "%Y-%m-%d %H:%M:%S")
day2$start_time <- strptime(x = as.character( day2$start_time ), format = "%Y-%m-%d %H:%M:%S")
day2$end_time <- strptime(x = as.character( day2$end_time ), format = "%Y-%m-%d %H:%M:%S")
day1_filtered <- day1[day1$end_time>"2015-12-01 23:55:00 EST",]
day2_filtered <- day2[(day2$start_time>"2015-12-01 23:55:00 EST" & day2$start_time<"2015-12-01 23:59:59 EST") ,]
count = 0
for(i in 1:nrow(day1_filtered)) {
day1_s_ip <- day1_filtered[i,'s_ip']
day1_d_ip <- day1_filtered[i,'d_ip']
day1_s_port <- day1_filtered[i,'s_port']
day1_d_port <- day1_filtered[i,'d_port']
day1_protocol <- day1_filtered[i,'protocol']
for(j in 1:nrow(day2_filtered)){
day2_s_ip <- day2_filtered[j,'s_ip']
day2_d_ip <- day2_filtered[j,'d_ip']
day2_s_port <- day2_filtered[j,'s_port']
day2_d_port <- day2_filtered[j,'d_port']
day2_protocol <- day2_filtered[j,'protocol']
if((day1_s_ip == day2_s_ip) & (day1_d_ip == day2_d_ip) & (day1_s_port == day2_s_port) & (day1_d_port==day2_d_port) & (day1_protocol==day2_protocol)){
print(count)
count=count+1
print("stichable flow found")
print(day1_filtered[i,])
print(day2_filtered[j,])
}
}
}
|
2db8232e28a99c110442ebafa251971cf23022b0
|
47b6c88ef300b4c1dc3298bbdefd5b98637fd1d2
|
/man/dfp_createCustomFieldOptions.Rd
|
234e356dbecc069313e3b59676327a0ce36e4889
|
[] |
no_license
|
StevenMMortimer/rdfp
|
cff34ff3a2b078a03c75e43d943b226b6ca72bba
|
e967f137d7605b754b53a07d41069f4e5fd209dc
|
refs/heads/main
| 2022-11-18T11:40:26.848090
| 2019-06-05T23:54:54
| 2019-06-05T23:54:54
| 46,183,233
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,458
|
rd
|
dfp_createCustomFieldOptions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CustomFieldService.R
\name{dfp_createCustomFieldOptions}
\alias{dfp_createCustomFieldOptions}
\title{CustomFieldService}
\usage{
dfp_createCustomFieldOptions(request_data, as_df = TRUE,
verbose = FALSE)
}
\arguments{
\item{request_data}{a \code{list} or \code{data.frame} of data elements
to be formatted for a SOAP
request (XML format, but passed as character string)}
\item{as_df}{a boolean indicating whether to attempt to parse the result into
a \code{data.frame}}
\item{verbose}{a boolean indicating whether to print the service URL and POSTed XML}
}
\value{
a \code{data.frame} or \code{list} containing all the elements of a createCustomFieldOptionsResponse
}
\description{
Provides methods for the creation and management of CustomField objects.
}
\details{
createCustomFieldOptions
Creates new CustomFieldOption objects. The following fields are required:
\itemize{
\item{CustomFieldOption displayName}
\item{CustomFieldOption customFieldId}
}
}
\examples{
\dontrun{
request_data <- data.frame(customFieldId=rep(dfp_createCustomFields_result$id, 3),
displayName=c('Morning', 'Afternoon', 'Evening'))
result <- dfp_createCustomFieldOptions(request_data)
}
}
\seealso{
\href{https://developers.google.com/ad-manager/api/reference/v201905/CustomFieldService#createCustomFieldOptions}{Google Documentation for createCustomFieldOptions}
}
|
3d1bcb1076c2e88b3f1902451fd81c90d78025e1
|
4a5a1cb13d1e7a780e4eebe75398133542a916f4
|
/man/kmerFractions.Rd
|
b66f27195017f3b4750bfea891d2923ea1564f75
|
[] |
no_license
|
Malarkey73/fastqc
|
fc596e6b9d2625252cd39e7cc4d79437c60bfa57
|
126b2726fbec7d8104e6e46258899b261e9f2815
|
refs/heads/master
| 2016-08-06T04:28:26.422108
| 2013-06-12T14:44:31
| 2013-06-12T14:44:31
| 10,216,956
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 978
|
rd
|
kmerFractions.Rd
|
\name{kmerFractions}
\alias{kmerFractions}
\title{Over Represented kmers(5)}
\description{
Sometimes short read data can contain biased or artefactual repetitive DNA sequence - sometimes also called low complexity. You can spot such repetitive atrefacts by screening for over-represented kmers (k=5).
}
\details{
The expected fraction of a kmer of 5 bases can be estimated from the overall base (A/T/G/C) composition. Ignoring N there are 1024 possible kmers. The fastqc function calculates the actual and expected base fraction for each. Then for this function there is a second argument \code{frac}. If the raito of actual kmers to expected kmers > \code{frac} then it is returned in the results table. The results table is sorted lexicographically i.e. AAAAA to TTTTT.
\preformatted{%
}
}
\author{Stephen Henderson
s.henderson@ucl.ac.uk}
\examples{
\dontrun{
test2=fastqc("test2.fastq")
kmerfractions(test2)
}
}
\keyword{fastq}
\keyword{genomics}
|
87ac6dcf7c4dd474ce3057ba9118772c19b41c49
|
9079a7b85bc2f35002d0b7b9b526c41cd184cf28
|
/dv_finalproject_rabinbhattarai/01 Data/map.R
|
1ca9ab10dfb596a4e6ddf9a37c46782cd6ae69d0
|
[] |
no_license
|
JN9765/JPN
|
5e4f26984fbc0345902e6b5c90ca0de9d66512e3
|
f054ef563733619d43f640e0a71e6c8ad52a9c3c
|
refs/heads/master
| 2021-04-29T09:51:18.771368
| 2016-12-30T03:07:59
| 2016-12-30T03:07:59
| 77,654,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 855
|
r
|
map.R
|
m <- leaflet()
m <- addTiles(m)
m <- addMarkers(m, lng= c(-97.7243,
-97.7037,
-97.6949,
-97.733,
-97.6958,
-97.7585,
-97.6949,
-97.662,
-97.6943) , lat=c(30.2257,
30.2764,
30.3583,
30.2382,
30.3588,
30.2325,
30.3583,
30.2851,
30.3282),popup=c("The birthplace of R","another coordinate"))
m
|
222c6bd68854704c629c9f985ab0dd253e37e785
|
f61064bb7d0013f111123206b230482514141d9e
|
/man/sis_csmc_tp.Rd
|
1ef5d5cde93b2201fc20adee1b98dd54fdf8a6ad
|
[] |
no_license
|
nianqiaoju/agents
|
6e6cd331d36f0603b9442994e08797effae43fcc
|
bcdab14b85122a7a0d63838bf38f77666ce882d1
|
refs/heads/main
| 2023-08-17T05:10:49.800553
| 2021-02-18T23:01:47
| 2021-02-18T23:01:47
| 332,890,396
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,787
|
rd
|
sis_csmc_tp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sis_csmc_tp.R
\name{sis_csmc_tp}
\alias{sis_csmc_tp}
\title{controlled SMC sampler for SIS model with population-level observations}
\usage{
sis_csmc_tp(y, model_config, particle_config)
}
\arguments{
\item{y}{a vector of length (T+1)}
\item{model_config}{a list containing:
\itemize{
\item 'N': size of the population
\item 'alpha0' : initial infection probability
\item 'lambda': infection rate
\item 'gamma': recovery rate
\item 'adjacency_matrix_b': network structure
\item 'rho': reporting rate
\item 'policy': a policy resulting from approximate dynamic programming
\item }}
\item{particle_config}{a list containing:
\itemize{
\item 'num_particles' : number of particles for the bootstrap particle filter. MUST HAVE!
\item 'ess_threshold' : if effective sample size drops below the threshold, then perform a resample step. ess_threshold = 1 means resampling at every step. MUST HAVE!
\item 'save_particles': binary
\item 'clock' : binary, default to FALSE. If clock = TRUE, then we will use a stopwatch to document its Sys.time()
\item 'save_genealogy': binary
\item 'verbose':
\item 'exact': binary, if FALSE, use translated Poisson and MCMC. MUST HAVE!
}}
}
\value{
A list containing
\itemize{
\item 'log_final_likelihood' :
\item 'particles': [N, (T+1), num_particles] array storing the particles:
\item 'eves' : the unique ancesters of each particle in the genealogy structure, if save_genealogy = TRUE;
\item 'ess' : effective sample size at each step
\item 'runtime' : elapse time at each step, if clock = TRUE;
\item 'totaltime' : elapse time of running the whole particle filter, if clock = TRUE;
}
}
\description{
controlled SMC sampler for SIS model with population-level observations
}
|
ebb736e6c8c99080f023aa2d8c5f1a034a70c89b
|
7755d1332586784e58b9c67e2029bad94b93fd00
|
/clhs1.R
|
f4ef87a72dbcf0b2905cb55493b95b0978f531f5
|
[] |
no_license
|
brendo1001/clhs_addition
|
9c45d9217075448e80705629ec6ad88d1fa2bd99
|
5f1b638536ddd3a81be03a87e14bb663c61f7f88
|
refs/heads/master
| 2020-05-01T05:14:20.744262
| 2019-03-24T10:59:49
| 2019-03-24T10:59:49
| 177,296,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,412
|
r
|
clhs1.R
|
# Conditioned Latin Hypercube sampling
# Checking how the clhs R function handles existing sample data
# What does 'include' do
# Checking outputs with coobs map to detemine whether new samples go to areas of low environmental coverage
# created: 15.3.2019
setwd("Z:/Dropbox/2019/rmuddles/clhs_addtion")
# data frame of covariate data
tempD<- readRDS("tempD.rds")
# Libraries
library(raster);library(sp); library(rgdal); library(clhs)
#rasterise covariate data
s1<- stack()
for (i in 4:ncol(tempD)){
r1<- rasterFromXYZ(tempD[,c(1,2,i)])
names(r1)<- names(tempD[i])
s1<- stack(s1,r1)
}
s1
#tabulate (as i want the cell number)
tempD <- data.frame(cellNos = seq(1:ncell(s1)))
vals <- as.data.frame(getValues(s1))
tempD<- cbind(tempD, vals)
tempD <- tempD[complete.cases(tempD), ]
cellNos <- c(tempD$cellNos)
gXY <- data.frame(xyFromCell(s1, cellNos, spatial = FALSE))
tempD<- cbind(gXY, tempD)
str(tempD)
#rasterise again
s1<- stack()
for (i in 3:ncol(tempD)){
r1<- rasterFromXYZ(tempD[,c(1,2,i)])
names(r1)<- names(tempD[i])
s1<- stack(s1,r1)
}
s1
#Point data (the exisitng sample point data)
dat<- read.table("HunterValley_SiteObsAll.txt", header = T,sep = ",") # existing soil point data
#extract covariate data at points
coordinates(dat)<- ~ X +Y
DSM_data<- raster::extract(s1,dat, sp= 1, method = "simple") #extract
dat<- as.data.frame(DSM_data)
dat<- dat[complete.cases(dat),]
str(dat)
#remove the grid points where there is point data
tempD<- tempD[-which(tempD$cellNos %in% dat$cellNos),]
str(tempD)
## combine grid data with the observed data
str(dat)
dat.sub<- dat[,c(2,3,6:13)]
names(dat.sub)[1:2]<- c("x", "y")
tempD.new<- rbind(dat.sub, tempD)
tempD.new$type<- NA
tempD.new$type[1:nrow(dat.sub)]<- "orig"
tempD.new$type[(nrow(dat.sub)+1):nrow(tempD.new)]<- "possibles"
## clhs sampling with fixed obs and add an extra 100 sites
# note usage of the include parameter
names(tempD.new)
res <- clhs(tempD.new[,c(4:10)], size = nrow(dat.sub) + 100,
iter = 10000, progress = TRUE, simple = TRUE,
include = c(1:nrow(dat.sub)))
res
saveRDS(res, file = "clhs_res.rds")
# get the selected data
dat.sel<- tempD.new[res,]
## coobs raster surface
r1<- raster("sampleNos.tif")
plot(r1)
# extract coobs dat
coordinates(dat.sel)<- ~ x + y
DSM_data2<- extract(r1,dat.sel, sp= 1, method = "simple")
# Doing some summary statistics between the raster grid values and the sample sites for both original and addtional data
# original sample sites
dat1<- DSM_data2[101:432, ]
sum(dat1$sampleNos >= 0 & dat1$sampleNos <= 5) / nrow(dat1) # very low coobs
sum(dat1$sampleNos > 5 & dat1$sampleNos <= 10) / nrow(dat1) # low coobs
sum(dat1$sampleNos > 10 & dat1$sampleNos <= 20) / nrow(dat1) # moderate coobs
sum(dat1$sampleNos > 20 & dat1$sampleNos <= 40) / nrow(dat1) # high coobs
sum(dat1$sampleNos > 40) / nrow(dat1) # quite high coobs
# additional data as selected by clhs
dat2<- DSM_data2[1:100, ]
sum(dat2$sampleNos >= 0 & dat2$sampleNos <= 5) / nrow(dat2)
sum(dat2$sampleNos > 5 & dat2$sampleNos <= 10) / nrow(dat2)
sum(dat2$sampleNos > 10 & dat2$sampleNos <= 20) / nrow(dat2)
sum(dat2$sampleNos > 20 & dat2$sampleNos <= 40) / nrow(dat2)
sum(dat2$sampleNos > 40) / nrow(dat2)
save.image("clhs_samp.RData") #save R session
|
4aa0c086fd2655473766aa71df91f21aede220ed
|
6e5b5fb1a944818e7cd9171037ee507247c25ba0
|
/pulse3d.R
|
6734163a140fd8e7087f618d83bb3f6f7d127e64
|
[] |
no_license
|
adrienne-marshall/solute_transport
|
7b7f811c73a69281daedb0549f6a217d24246cf2
|
f812edcc97782c12210e2313d54589a158d6e758
|
refs/heads/master
| 2021-08-10T16:08:14.104577
| 2017-11-12T19:33:48
| 2017-11-12T19:33:48
| 110,461,495
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,799
|
r
|
pulse3d.R
|
#Function to model concentration of a solute at L and time t based on a pulse input.
#Should maybe output a vector of concentrations at different times.
pulse3d <- function(pulse_conc,
pulse_time,
max_val = 0.1,
time_limits = c(0, 100),
L_limits = c(0, 100),
v = 2, R = 1, D = 0.5,
value_limit = TRUE){
#L is 30 cm, v is 2 cm/day, R is 1 (dimensionless).
#For testing:
# pulse_conc = 0.5; D = 0.5; pulse_time = 2
require(tidyverse)
require(viridis)
require(ggthemes)
time_range <- seq(time_limits[1], time_limits[2], length.out = 100)
L_range <- seq(L_limits[1], L_limits[2], length.out = 100)
output <- expand.grid(time_range, L_range)
names(output) <- c("time", "length")
#Jury et al's solution to solute pulse transport (eq 196):
#Should boundaries be adjusted?
A <- pulse_conc*pulse_time #think about units: (g/g)*sec?
output <- output %>%
mutate(p1 = A*length*sqrt(R)/(2*sqrt(pi*D*(time^3)))) %>%
mutate(p2 = exp(-((R*length - v*time)^2)/(4*R*D*time))) %>%
mutate(c_l_t = p1*p2) %>%
dplyr::select(-p1, -p2)
output <- output %>%
mutate(exceeded_max = ifelse(c_l_t > max_val, 1, 0))
p <- ggplot(output, aes(x = length, y = time, fill = c_l_t)) +
geom_raster() +
scale_fill_viridis() +
theme_few() +
scale_x_continuous(expand = c(0,0)) +
scale_y_continuous(expand = c(0,0)) +
labs(x = "Distance (cm)", y = "Time (sec)", fill = "Solute \nconcentration") +
theme(panel.border = element_blank())
if(value_limit == TRUE){
p <- p + geom_contour(aes(x = length, y= time, z = exceeded_max),
color = "white", size = 0.5)
}
return(p)
}
|
09f839a1ac73ebcc845bcb94971d6e3bfe2d03f7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nullabor/examples/reg_dist.Rd.R
|
1af1f064074f715cc14b4bbc612fcbcf10e90715
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 210
|
r
|
reg_dist.Rd.R
|
library(nullabor)
### Name: reg_dist
### Title: Distance based on the regression parameters
### Aliases: reg_dist
### ** Examples
with(mtcars, reg_dist(data.frame(wt, mpg), data.frame(sample(wt), mpg)))
|
73bb3ddde3838685d5fe61d1a2f34436395a4875
|
033f1b856609297a46fc60b57ea69b5ec2cad818
|
/man/bestbeforemaxdd.Rd
|
a474e345eb134b76489a8aaf489d858d1fd0c4ba
|
[
"MIT"
] |
permissive
|
pluspku/bestbeforemaxdd
|
d57f8eb7e8c21f2d51be10b28ba4d03ffbd2d5bb
|
5901cd6c18896619f8afd67c9740e3312e248158
|
refs/heads/master
| 2021-01-10T04:37:19.008390
| 2015-12-11T22:16:12
| 2015-12-11T22:16:12
| 47,852,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 559
|
rd
|
bestbeforemaxdd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bestbeforemaxdd.R
\name{bestbeforemaxdd}
\alias{bestbeforemaxdd}
\title{Function to calculate the difference before meet the first max drawdown}
\usage{
bestbeforemaxdd(x, threshold, long = TRUE)
}
\arguments{
\item{x}{data series, should be a vector}
\item{threshold}{the threshold to determine max drawdown}
\item{long}{is long or short?}
}
\description{
Function to calculate the difference before meet the first max drawdown
}
\examples{
bestbeforemaxdd()
}
\keyword{maxdd}
|
3856057e7092366c596a82a0a382a2475168d4fe
|
b9c73533135d8a3350cff8c38e3604b02683ea40
|
/ECT2_hyperTRIBE_model_data.R
|
6234a6bc5cbd04ddf804276f7f8b74dcd4a02cf9
|
[] |
no_license
|
sarah-ku/targets_arabidopsis
|
415f4d08e6d308826a03133293362be9e1c5b6a5
|
ad524fd57073b569320998bb79ecc66d433b37c7
|
refs/heads/master
| 2023-08-15T06:07:57.404890
| 2021-10-28T19:03:41
| 2021-10-28T19:03:41
| 312,227,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,612
|
r
|
ECT2_hyperTRIBE_model_data.R
|
setwd("/binf-isilon/alab/projects/ECT2_TC/hyperTRIBE")
library(RNAeditR)
################################### for modelling
dat_shoots <- read.table("./pipeline/output/baseCounts_shoots_hyperTRIBE.txt",header=F)
dat_roots <- read.table("./pipeline/output/baseCounts_roots_hyperTRIBE.txt",header=F)
dim(dat_shoots)
dim(dat_roots)
locsGR_shoots <- GRanges(Rle(dat_shoots$V1),IRanges(dat_shoots$V2,width=1),ref=dat_shoots$V3,names=paste(dat_shoots$V1,dat_shoots$V2,sep="_"))
locsGR_roots <- GRanges(Rle(dat_roots$V1),IRanges(dat_roots$V2,width=1),ref=dat_roots$V3,names=paste(dat_roots$V1,dat_roots$V2,sep="_"))
samp.names_roots <- c("E2T_Rc11","E2T_Rc12","E2T_Rc13","E2T_Rc14","E2T_Rc15",
"E2T_Re6","E2T_Re7","E2T_Re8","E2T_Re9","E2T_Re10",
"E2T_Rt1", "E2T_Rt2", "E2T_Rt3" ,"E2T_Rt4" ,"E2T_Rt5")
samp.names_shoots <- c("E2T_Sc11","E2T_Sc12","E2T_Sc13","E2T_Sc14","E2T_Sc15",
"E2T_Se6", "E2T_Se7", "E2T_Se8" , "E2T_Se9", "E2T_Se10",
"E2T_St1", "E2T_St2", "E2T_St3" ,"E2T_St4" ,"E2T_Se5")
data_list_roots <- extractCountData(dat_roots,samp.names_roots,strand=F)
data_list_shoots <- extractCountData(dat_shoots,samp.names_shoots,strand=F)
#check data is populated for all samples
lapply(data_list_roots,nrow)
lapply(data_list_shoots,nrow)
#for the roots we remove Re8 since it's got very low coverage (indicating a problem with the sample)
data_list_roots <- data_list_roots[-which(names(data_list_roots)=="E2T_Re8")]
#now produce one design vector per experiment (total of 4 for the roots and the shoots, single and triple mutants combinations)
design_vector_roots_single <- c(E2T_Rc11 = "control", E2T_Rc12 = "control", E2T_Rc13 = "control",
E2T_Rc14 = "control", E2T_Rc15 = "control", E2T_Re6 = "treat",
E2T_Re7 = "treat", E2T_Re9 = "treat", E2T_Re10 = "treat")
table(design_vector_roots_single)
design_vector_roots_triple <- c(E2T_Rc11 = "control", E2T_Rc12 = "control", E2T_Rc13 = "control",
E2T_Rc14 = "control", E2T_Rc15 = "control", E2T_Rt1 = "treat",
E2T_Rt2 = "treat", E2T_Rt3 = "treat", E2T_Rt4 = "treat", E2T_Rt5 = "treat")
table(design_vector_roots_triple)
design_vector_shoots_single <- c(E2T_Sc11 = "control", E2T_Sc12 = "control", E2T_Sc13 = "control",
E2T_Sc14 = "control", E2T_Sc15 = "control", E2T_Se6 = "treat",
E2T_Se7 = "treat", E2T_Se8 = "treat", E2T_Se9 = "treat",E2T_Se10 = "treat")
table(design_vector_shoots_single)
design_vector_shoots_triple <- c(E2T_Sc11 = "control", E2T_Sc12 = "control", E2T_Sc13 = "control",
E2T_Sc14 = "control", E2T_Sc15 = "control", E2T_Se5 = "treat", E2T_St1 = "treat", E2T_St2 = "treat",
E2T_St4 = "treat", E2T_St3 = "treat")
table(design_vector_shoots_triple)
design_vector_roots_single_triple <- c(E2T_Re6 = "control", E2T_Re7 = "control", E2T_Re9 = "control", E2T_Re10 = "control",
E2T_Rt1 = "treat", E2T_Rt2 = "treat", E2T_Rt3 = "treat", E2T_Rt4 = "treat", E2T_Rt5 = "treat")
table(design_vector_roots_single_triple)
design_vector_shoots_single_triple <- c( E2T_Se6 = "control", E2T_Se7 = "control", E2T_Se8 = "control", E2T_Se9 = "control",E2T_Se10 = "control",
E2T_Se5 = "treat",E2T_St1 = "treat", E2T_St2 = "treat", E2T_St4 = "treat", E2T_St3 = "treat")
table(design_vector_shoots_single_triple)
|
bfaa5c6d99ca08d366bff94c057f88923a64cbd0
|
dd256026b874e4d4109fda14b1f1e906ada8468c
|
/man/agg_g2.Rd
|
379e9069465ca49fc1e01ae55694bbf3236ce38f
|
[] |
no_license
|
cran/MAd
|
8a90077b46752bf4b8995c231ace3f86d62f75d2
|
2ac236b36e9e0826240e936cf158326773c78e2d
|
refs/heads/master
| 2022-09-09T16:17:36.767905
| 2022-08-06T21:40:02
| 2022-08-06T21:40:02
| 17,680,460
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
rd
|
agg_g2.Rd
|
\name{agg_g2}
\alias{agg_g2}
\title{Internal anRpackage objects}
\description{Internal anRpackage objects.}
\details{These are not to be called by the user.}
\keyword{internal}
|
ea9c1f4f16259c9019768224dce6959979eef25a
|
d884a45b9b3055b668c83b5acc2936be9107769e
|
/R/ebpm_gamma_mixture2.R
|
f6531c2c6d7eab985ae4daf6b606ede01f4e52bb
|
[] |
no_license
|
stephenslab/ebpm
|
c9526a3a1a8ea4ce2ecd74cf3fef8bf6d84abd54
|
aa3a957698c153c008ea42c6c99e64e0f5682aec
|
refs/heads/master
| 2021-07-14T20:18:42.225297
| 2021-07-08T19:45:28
| 2021-07-08T19:45:28
| 210,671,798
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,074
|
r
|
ebpm_gamma_mixture2.R
|
#' @title Empirical Bayes Poisson Mean with Mixture of Gamma as Prior (still in development)
#' @description Uses Empirical Bayes to fit the model \deqn{x_j | \lambda_j ~ Poi(s_j \lambda_j)} with \deqn{lambda_j ~ g()}
#' with Mixture of Gamma: \deqn{g() = sum_k pi_k gamma(shape = a_k, rate = b_k)}
#' @import mixsqp
#' @details The model is fit in 2 stages: i) estimate \eqn{g} by maximum likelihood (over pi_k)
#' ii) Compute posterior distributions for \eqn{\lambda_j} given \eqn{x_j,\hat{g}}.
#' @param x A vector of Poisson observations.
#' @param s A vector of scaling factors for Poisson observations: the model is \eqn{y[j]~Pois(s[j]*lambda[j])}.
#' @param shape A vector specifying the shapes used in gamma mixtures
#' @param scale A vector specifying the scales used in gamma mixtures
#' @param g_init The prior distribution \eqn{g}, of the class \code{gammamix}. Usually this is left
#' unspecified (\code{NULL}) and estimated from the data. However, it can be
#' used in conjuction with \code{fix_g = TRUE} to fix the prior (useful, for
#' example, to do computations with the "true" \eqn{g} in simulations). If
#' \code{g_init} is specified but \code{fix_g = FALSE}, \code{g_init}
#' specifies the initial value of \eqn{g} used during optimization.
#'
#' @param fix_g If \code{TRUE}, fix the prior \eqn{g} at \code{g_init} instead
#' of estimating it.
#'
#' @param m multiple coefficient when selectig grid, so the b_k is of the form {low*m^{k-1}}; must be greater than 1; default is 2
#' @param control A list of control parameters to be passed to the optimization function. `mixsqp` is used.
#'
#' @return A list containing elements:
#' \describe{
#' \item{\code{posterior}}{A data frame of summary results (posterior
#' means, and posterior log mean).}
#' \item{\code{fitted_g}}{The fitted prior \eqn{\hat{g}}, of class \code{gammamix}}
#' \item{\code{log_likelihood}}{The optimal log likelihood attained
#' \eqn{L(\hat{g})}.}
#' }
#' @examples
#' beta = c(rep(0,50),rexp(50))
#' x = rpois(100,beta) # simulate Poisson observations
#' s = replicate(100,1)
#' m = 2
#' out = ebpm::ebpm_gamma_mixture(x,s)
#'
#' @export
## compute ebpm_gamma_mixture problem
ebpm_gamma_mixture2 <- function(x,s, grid = NULL, g_init = NULL, fix_g = FALSE, control_select_grid = NULL, control = NULL){
## a quick fix when all `x` are 0
if(max(x) == 0){
stop("all x are 0") ## TODO replace with sth like gamma(0, ..)
}
if(length(s) == 1){s = replicate(length(x),s)}
if(is.null(control)){control = mixsqp_control_defaults()}
if(is.null(control_select_grid)){
## TODO
}
if(is.null(g_init)){
fix_g = FALSE ## then automatically unfix g if specified so
if(is.null(grid)){
params_ = c(list(x = x, s = s), control_select_grid)
grid = do.call(select_grid2, params_)
}
g_init = grid2gammamix(grid, pi = NULL)
}
grid = gammamix2grid(g_init) ## make sure g_init and grid are consistent
tmp <- compute_L_from_grid(x,s,grid)
L = tmp$L
l_rowmax = tmp$l_rowmax
## compute weight pi
if(!fix_g){ ## need to estimate g_hat
fit <- mixsqp(L, x0 = g_init$pi, control = control)
w = fit$x
w = w/sum(w) ## seems that some times pi does not sum to one
}
else{w = g_init$pi}
fitted_g = grid2gammamix(grid, w)
log_likelihood = sum(log(exp(l_rowmax) * L %*% w))
posterior = compute.posterior.gammamix(x,s,fitted_g, L)
return(list(fitted_g = fitted_g, posterior = posterior,log_likelihood = log_likelihood))
}
select_grid2 <- function(x, s, mus = NULL , vars = NULL, k = 2){
if(is.null(mus)){
if(is.null(k)){ stop("need to provide k in select_grid2") }
mus = as.vector(kmeans(x, centers = k, nstart = 100, iter.max = 100)$centers)
}
if(is.null(vars)){vars = 10^seq(-5,5,1)}
grid = construct_grid(mus, vars)
return(grid)
}
construct_grid <- function(mus, vars){
M = length(mus)
D = length(vars)
a = c()
b = c()
for(m in 1:M){
for(d in 1:D){
b_ = mus[m]/vars[d]
a = c(a, b_ * mus[m])
b = c(b, b_)
}
}
return(list(a = a, b = b))
}
compute_L_from_grid <- function(x,s,grid){
## TODO: need to consider numerical issue later
return( compute_L(x = x,s = s,a = grid$a, b = grid$b) )
}
grid2gammamix <- function(grid, pi = NULL){
n = length(grid$a)
if(is.null(pi)){pi = replicate(n, 1/n)}
return( gammamix(pi = pi, shape = grid$a, scale = 1/grid$b) )
}
gammamix2grid <- function(g){
return(list(a = g$shape, b = 1/g$scale))
}
#' @export compute.posterior.gammamix
compute.posterior.gammamix <- function(x,s,g, L){
a = g$shape
b = 1/g$scale
if(is.null(L)){L = compute_L(x = x,s = s,a = g$shape, b = 1/g$scale)$L}
cpm = outer(x,a, "+")/outer(s, b, "+")
Pi_tilde = t(t(L) * g$pi)
Pi_tilde = Pi_tilde/rowSums(Pi_tilde)
lam_pm = rowSums(Pi_tilde * cpm)
c_log_pm = digamma(outer(x,a, "+")) - log(outer(s, b, "+"))
lam_log_pm = rowSums(Pi_tilde * c_log_pm)
posterior = data.frame(mean = lam_pm, mean_log = lam_log_pm)
return(posterior)
}
|
c22ab48b75f3ffec650e10d655f9f2481f220b69
|
e131207354f2565b45b6c70d438cea8d5c26bce5
|
/cachematrix.R
|
d8a9ee0a5f07852b9ed6426e96978732885be96f
|
[] |
no_license
|
danoot/ProgrammingAssignment2
|
54bbbe35d903231b9ddc3f830b4707590dfb9a57
|
aeb194d382e6346a1fb684ee6c0c2a2cfadbc0b1
|
refs/heads/master
| 2020-12-14T08:50:01.990012
| 2014-10-27T00:45:20
| 2014-10-27T00:45:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,447
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## This function will take a matrix, and return a list, which will contain:
## A function to set the matrix value
## A function to get the matrix values
## A function to set the inverse of the matrix
## A function to get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y #this should set x, in the outer context, to y.
inverse <<- NULL #set inverse to NULL because we might not need to bother.
}
get <- function() x #returns x (original matrix)
setInverse <- function(aMatrix) inverse <<- aMatrix #push aMatrix into inverse
getInverse <- function() inverse # returns inverse of x, calculated on set()
list(set = set, get = get, getInverse = getInverse, setInverse = setInverse) #return the list of functions
}
## The function below takes the output of makeCacheMatrix, and then:
## Checks if there is an inverse already
## if there is, returns it
## else, calculates it, then sets it, then returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getInverse()
if(!is.null(inverse)){
message("getting cached inverse")
return(inverse)
}
matrix <- x$get() #load the matrix in
inverse <- solve(matrix) #invert it
x$setInverse(inverse) #save it for future use
inverse #return it
}
|
955f4805e4e1bbcef0000a6c95a0b3bb56d1e217
|
b9e15be28c0915e70a4798a0acb34ebf9bb71b8d
|
/5.1.4.R
|
1351ff74b92350522f5d2105e90b9f4068fd1a35
|
[] |
no_license
|
ItsRRM97/Suicides-in-India
|
8c9e1f006e4eeee5170d507c41287bb0d7558140
|
cafde4ea3613d0b7d428ff7f79ea8e284783bba7
|
refs/heads/master
| 2021-09-10T17:36:47.887438
| 2018-03-30T06:45:39
| 2018-03-30T06:45:39
| 103,838,259
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,106
|
r
|
5.1.4.R
|
# age wise social of suicide
ageSocial <- function(Dataset) {
social <- vector() # Null Vector
Dataset$Type_code <- as.character(Dataset$Type_code)
Dataset$Type <- as.character(Dataset$Type)
for(i in 1:236583) {
if(Dataset$Type_code[i] == 'Social_Status') {
social<-c(social,Dataset$Type[i])
}
}
social <- unique(social)
social <- sort(social)
age <- c(sort(unique(as.character((Dataset$Age_group)))))
mat <- matrix(0, nrow = length(age), ncol = length(social))
dimnames(mat) <- list(age,social)
#making a legend for readability
#legend <- data.frame(list(1:length(social)),social)
for(i in 1:236583) {
if(Dataset$Total[i] != 0 && Dataset$Type_code[i] == 'Social_Status') {
for(j in 1:length(age)) {
if(Dataset$Age_group[i] == age[j]) {
for(k in 1:length(social)) {
if(Dataset$Type[i] == social[k]) {
mat[j,k] = mat[j,k] + Dataset$Total[i]
}
}
}
}
}
}
write.table(mat,"output/5.1.4.csv", row.names = TRUE, col.name = TRUE, sep = ",")
#print(legend)
}
|
14b3b1032f0ab34ad1b08bf3f5c11875295b3209
|
7d61a07208b1425ba7e54a8ad6143cfa4f827d35
|
/Week 7/ggplot2 facets and Panel Layout Designs/Post Confident Class Maps/NP_Confident Class Comparison map.r
|
2c2ec5a5712f4d64f5c5735681e961dc65704462
|
[] |
no_license
|
ceharvs/Statistical-Graphics
|
27557777f4c55cdf20142ac6c93879f322239875
|
ff6812147bbda0ee7137700a4b935de7b1a724ed
|
refs/heads/master
| 2016-09-06T13:28:26.241644
| 2014-12-10T04:56:14
| 2014-12-10T04:56:14
| 23,636,565
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,956
|
r
|
NP_Confident Class Comparison map.r
|
File NP_Confident Class Comparison Map.r
By Daniel B. Carr
Copyright 2010, 2011, 2012, 2013
Due The pdf file.
Prototype script
Labels on left rather the right
Color variants: gray states
white borders
Reference variant: compared to VA
Uses NP_NAEP_MergedDat.csv or
VA_NAEP_MergedDat.csv
stateVBorders.csv
nationVBorders.csv
This will work for Virginia if you
change the prefix below and
hand edit the top title in Section 5
to say Virginia instead of National
Public. (With a little more time
this could be automated)
If other states are of interest
you can get the data. 2011
data is available.
## Run from here to end of file
# 1.read the data, select variables, set output
prefix <- "NP_" # "VA_"
fin <- paste(prefix,"NAEP_MergedDat.csv",sep='')
tmpDat <- read.csv(fin, row.names=1)
colnames(tmpDat)
pdfFile <- paste(prefix,
"Confident Comparison Class Map.pdf",sep='')
# pdfFile <- NULL
mapDat <- tmpDat[, c(1, 3, 7)]
refRegion = "NP"
if(prefix!="NP_"){
stId <- substring(prefix,1,2)
refRegion <- stId
i <- which(row.names(mapDat)==stId)
mapDat[i,] <- c(2,2,2)
}
# 2. read Region boundary files set map xy limits
stateVBorders <- read.csv('stateVBorders.csv',
row.names=NULL, header=T, as.is=TRUE)
statePolyId <- stateVBorders$st[is.na(stateVBorders$x)]
nationVBorders <- read.csv('nationVBorders.csv',
blank.lines.skip=F, row.names=NULL, header=T)
names(stateVBorders) # st= state ids, x, y polygon coordinates
names(nationVBorders) #
# note na.rm removes missing values
rx <- range(stateVBorders$x, na.rm=T)
ry <- range(stateVBorders$y, na.rm=T)
rx <- mean(rx)+ diff(rx)*1.06*c(-.5, .5)
ry <- mean(ry)+ diff(ry)*1.06*c(-.5, .5)
# 3. Define panel Layout_____________________________________
width=9.7
height=7.1
if(!is.null(pdfFile)){
pdf(width=width, height=height, file=pdfFile)
} else {
windows(width=width, height=height)
}
topMar= 1
bottomMar <- .6
leftMar <- .7
rightMar <- 0
borders=rep(.2, 4)
nc <- 3
nr <- 3
panels <- panelLayout(nrow=nr, ncol=nc,
borders=borders,
topMar=topMar, bottomMar=bottomMar,
leftMar=leftMar, rightMar=rightMar)
# 4. Draw Maps
stateName <- row.names(mapDat)
myGray <- rgb(.5, .5, .5)
myColor <- c("#AF5FFF", "#D0D0D0", "#40D040")
for(i in 1:nr){
for(j in 1:nc){
panelSelect(panels, i, j)
panelScale(rx, ry)
panelFill(col="#D0FFFF") # panel fill
panelOutline(col="gray")
polygon(stateVBorders$x, stateVBorders$y, col="white",
border="gray", lwd=1) # fill and outline
panCol <- myColor[mapDat[, 3]]
fore <- 4-i==mapDat[, 2] & j==mapDat[, 1]
if(any(fore)){
foreNam <- stateName[fore]
goodBnd <- !is.na(match(stateVBorders$st, foreNam))
goodPolyId <- !is.na(match(statePolyId, foreNam))
subs <- match(statePolyId[goodPolyId], stateName)
polyCol <- myColor[mapDat[subs, 3]]
# plot states in gray with white outlines
polygon(stateVBorders$x[goodBnd], stateVBorders$y[goodBnd],
col=polyCol, border="black", lwd=1) # fill and outline
}
}
}
# highlight reference state if any
goodBnd = stateVBorders$st==refRegion
if(any(goodBnd){
panelSelect(panels,2,2)
panelScale(rx,ry)
polygon(stateVBorders$x[goodBnd],
stateVBorders$y[goodBnd],
col=polyCol,border="black",lwd=2) # fill and outline
}
5. Labeling
# top___________________________________________________
panelSelect(panels, mar="top")
panelScale(inches=TRUE)
# Coordinate experiments (could get from panel Layout)
xl <- 2.88 + leftMar
xr <- 5.73 + leftMar
xm <- mean(c(xl, xr))
topCenX <- c(xl, xm, xr)
#points(topCenX, rep(.08, 3), pch=21, bg='red', cex=1.25)
text(xm, .59,
"Reading Average Scores: Grade 8, 2009", cex=1.12)
text(xm, .9,
"States Compared to National Public", adj=.5, cex=1.2)
sep <- .67
topRectX <- c(xl+sep, xm, xr-sep)
text(topRectX, rep(.38, 3),
c("Below", "Similar To", "Above"), adj=.5, cex=1.12)
wid <- c(.27, .40, .27)
rect(topRectX-wid, rep(.08, 3),
topRectX+wid, rep(.27, 3), col=myColor)
# bot__________________________________________________
panelSelect(panels, mar="bottom")
panelScale(inches=TRUE)
botX <- apply(panels$coltabs[2:4, ], 1, mean)
text(botX, rep(.48, 3),
c("Below", "Similar To", "Above"), adj=.5, cex=1.12)
text(botX[2], .18,
"Mathematics Average Scores: Grade 4, 2009", adj=.5,
cex=1.12)
# left______________________________________________
panelSelect(panels, mar="left")
panelScale(inches=TRUE)
leftY <- rev(apply(panels$rowtabs[2:4, ], 1, mean))
text(rep(.53, 3), leftY, srt=90, c("Below", "Similar To", "Above"),
adj=.5, cex=1.12)
text(rep(.23, 3), leftY[2], srt=90,
"Mathematics Average Scores: Grade 8, 2009", adj=.5, cex=1.12)
if(!is.null(pdfFile))dev.off()
|
338234315825c1d8575bbb56d0b82669abb9f56b
|
690c3c3e583094011d339d20a819b0fbe11a2bf8
|
/output_analysis.R
|
b50edcf2b8eb86902be8e9d49683a35e4ee9ead5
|
[] |
no_license
|
AllisonVincent/StarFM-code
|
a0f907e2931460b7867600bd1566cb39a600338b
|
eac755b6ef61af5d1925b3b65d02269c846e79e1
|
refs/heads/master
| 2021-06-17T15:02:43.013841
| 2021-04-20T17:19:42
| 2021-04-20T17:19:42
| 194,706,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,864
|
r
|
output_analysis.R
|
### This script is for viewing and acquiring basic information about individual layers of Landsat, MODIS, and STARFM data
library(raster)
landsat<- brick('./landsat.tif')
modis<- brick('./mod_.tif')
data<- brick('./starfm_East_fusion.tif') ## starfm data
## To find the fraction of data available, first set pixels with data to 1, then convert all non-data values for each raster layer to NA
## First, do this for STARFM results
for (i in 1:nlayers(data)) {
# Use the raster Which() function for speed:
masked <- Which(data[[i]] > -11111 , cells=TRUE)
data[[i]][masked] <- 1
}
for (i in 1:nlayers(data)) {
# Use the raster Which() function for speed:
masked <- Which(data[[i]] == -11111, cells=TRUE)
data[[i]][masked] <- NA
}
## Compute the data fraction for each layer
## total number of pixels in raster
area<- ncol(data) * nrow(data)
n <- nlayers(data)
data_frac<- rep(NA, n) ## create an empty raster with the required number of layers to fill in via the loop below:
for (i in 1:nlayers(data)) {
layer<- as.vector(data[[i]], mode = 'numeric')
good_data<- sum(layer, na.rm = TRUE) ## find the sum, or the number of pixels with data, for each layer
frac<- good_data/area ## calculate the fraction of pixels with data for each layer
data_frac[[i]]<- frac
}
starfm_df<- data.frame("STARFM" = data_frac) ## convert above results into a data frame to write to a .csv table file
#write.table(starfm_df, "./starfm_data.csv", row.names = FALSE)
################## Repeat the above, but for Landsat
for (i in 1:nlayers(landsat)) {
# Use the raster Which() function for speed:
masked <- Which(landsat[[i]] == 0 , cells=TRUE)
landsat[[i]][masked] <- NA
}
for (i in 1:nlayers(landsat)) {
# Use the raster Which() function for speed:
masked <- Which(landsat[[i]] != 0 , cells=TRUE)
landsat[[i]][masked] <- 1
}
## Compute the data fraction for each landsat layer
area<- ncol(landsat) * nrow(landsat)
n <- nlayers(landsat)
land_frac<- rep(NA, n)
for (i in 1:nlayers(landsat)) {
layer<- as.vector(landsat[[i]], mode = 'numeric')
good_data<- sum(layer, na.rm = TRUE)
frac<- good_data/area
land_frac[[i]]<- frac
}
################## Now do the same for modis data
for (i in 1:nlayers(modis)) {
# Use the raster Which() function for speed:
masked <- Which(modis[[i]] != 0 , cells=TRUE) ## no data values for MODIS area already set to NA
modis[[i]][masked] <- 1
}
## Compute the data fraction for each layer
area<- ncol(modis) * nrow(modis)
n <- nlayers(modis)
modis_frac<- rep(NA, n)
for (i in 1:nlayers(modis)) {
layer<- as.vector(modis[[i]], mode = 'numeric')
good_data<- sum(layer, na.rm = TRUE)
frac<- good_data/area
modis_frac[[i]]<- frac
}
## Put all the above data into a single dataframe
df<- data.frame("Landsat" = land_frac, "Modis" = modis_frac, "STARFM" = data_frac)
|
12df01a2706afe67a50eb7dc59a807826fadd925
|
46fd3e7df135ee7f9c939bf48481beafb3f08abf
|
/inst/apps/brapi/mw_studytypes.R
|
2a0026d35eb2f2c8d80ac2bd0dc94fd7c487ef61
|
[] |
no_license
|
CIP-RIU/brapiTS
|
40f4106727e4bde4baddc13c34c1ae99c2b2bfdd
|
e4c7074d9226941f4187426db0f582e65660eed6
|
refs/heads/master
| 2020-07-23T03:16:54.359229
| 2017-09-15T16:56:10
| 2017-09-15T16:56:10
| 78,644,303
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,003
|
r
|
mw_studytypes.R
|
studyTypes_data = tryCatch({
res <- read.csv(system.file("apps/brapi/data/studyTypes.csv", package = "brapiTS"),
stringsAsFactors = FALSE)
}, error = function(e) {
NULL
}
)
studyTypes_list = function(page = 0, pageSize = 100){
if(is.null(studyTypes_data)) return(NULL)
# paging here after filtering
pg = paging(studyTypes_data, page, pageSize)
studyTypes_data <- studyTypes_data[pg$recStart:pg$recEnd, ]
n = nrow(studyTypes_data)
out = list(n)
for(i in 1:n){
out[[i]] <- as.list(studyTypes_data[i, ])
#out[[i]]$datatypes = list(safe_split(out[[i]]$datatypes, ";"))
#out[[i]]$methods = list(safe_split(out[[i]]$methods, ";"))
}
attr(out, "pagination") = pg$pagination
out
}
studyTypes = list(
metadata = list(
pagination = list(
pageSize = 10,
currentPage = 0,
totalCount = nrow(studyTypes_data),
totalPages = 1
),
status = list(),
datafiles = list()
),
result = list(data = studyTypes_list())
)
process_studyTypes <- function(req, res, err){
prms <- names(req$params)
page = ifelse('page' %in% prms, as.integer(req$params$page), 0)
pageSize = ifelse('pageSize' %in% prms, as.integer(req$params$pageSize), 100)
studyTypes$result$data = studyTypes_list(page, pageSize)
studyTypes$metadata$pagination = attr(studyTypes$result$data, "pagination")
if(is.null(studyTypes$result$data)){
res$set_status(404)
studyTypes$metadata <- brapi_status(100, "No matching results!")
}
res$set_header("Access-Control-Allow-Methods", "GET")
res$json(studyTypes)
}
mw_studytypes <<-
collector() %>%
get("/brapi/v1/studyTypes[/]?", function(req, res, err){
process_studyTypes(req, res, err)
}) %>%
put("/brapi/v1/studyTypes[/]?", function(req, res, err){
res$set_status(405)
}) %>%
post("/brapi/v1/studyTypes[/]?", function(req, res, err){
res$set_status(405)
}) %>%
delete("/brapi/v1/studyTypes[/]?", function(req, res, err){
res$set_status(405)
})
|
3db96bf567f9b16ab42c6a4061f43e0b442cd466
|
1cbdfc9dae2fb81522cfad64ce4bc10f7db63b4a
|
/plot3.R
|
4c6b9f634535fbb243d3923ea2d28f88ef79aa23
|
[] |
no_license
|
cstaats32/ExData_Plotting1
|
00f097de1b0022a6a0b1592fa0651127b611285e
|
493dac165dccf36dbe97dfe180ceae4eddb60c41
|
refs/heads/master
| 2021-01-09T06:42:32.320212
| 2017-02-06T02:30:50
| 2017-02-06T02:30:50
| 81,039,344
| 0
| 0
| null | 2017-02-06T02:21:05
| 2017-02-06T02:21:04
| null |
UTF-8
|
R
| false
| false
| 1,963
|
r
|
plot3.R
|
setwd("~/Cathy/Coursera R Programming/Exploratory Data Analysis")
## read in the dataset
poweruse <- read.table("household_power_consumption.txt", sep=";", header = TRUE,
colClasses = c("character","character",
"numeric","numeric","numeric",
"numeric","numeric","numeric","numeric"),
na.strings=c("?"))
## Install dplyr library
library(dplyr)
## Convert Date to a Date class
datev <- as.Date(poweruse$Date, "%d/%m/%Y")
## Combine time and date characters and convert to a POSIXct variable
datetime <- as.POSIXct(paste(poweruse$Date, poweruse$Time), "%d/%m/%Y %H:%M:%S", tz="GMT")
## Construct a dataframe consisting of Date, timestamp, Sub_metering_1, Sub_metering_2,
## , Sub_metering_3
poweruse2 <- data.frame("Date" = datev,
"timestamp" = datetime, "Sub_metering_1" = poweruse$Sub_metering_1,
"Sub_metering_2" = poweruse$Sub_metering_2,
"Sub_metering_3" = poweruse$Sub_metering_3)
## Filter based on desired dates
poweruse2day <- filter(poweruse2,
Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
## Open png device and create file in working directory
png(filename = "plot3.png", width = 480, height = 480)
## Set up plot area
plot(poweruse2day$timestamp, poweruse2day$Sub_metering_1, pch = NA,
xlab="", ylab="Energy sub metering")
## Add line2 to chart for each of the variables
lines(x=poweruse2day$timestamp, y=poweruse2day$Sub_metering_1)
lines(x=poweruse2day$timestamp, y=poweruse2day$Sub_metering_2, col="red")
lines(x=poweruse2day$timestamp, y=poweruse2day$Sub_metering_3, col="blue")
legend("topright", lty = 1, col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
## Close graphics device
dev.off()
|
083e6678301797ef5858b11afeadb172b8e93389
|
d2945a5842efe71d476535cfdf3b32fb07a378c8
|
/man/get_genes.Rd
|
8a754bf44b24f1f9af588b3234036ef1f59f805d
|
[] |
no_license
|
TheJacksonLaboratory/mousegwas
|
5f41001d57360b15eb1d197b6c356db34cfb7b0d
|
da23e1b918e1fd88252e305e821cc4545c069de7
|
refs/heads/master
| 2021-11-27T00:08:12.226207
| 2021-09-23T12:01:12
| 2021-09-23T12:01:12
| 236,527,353
| 1
| 2
| null | 2021-09-23T11:58:39
| 2020-01-27T15:49:58
|
R
|
UTF-8
|
R
| false
| true
| 552
|
rd
|
get_genes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_genes.R
\name{get_genes}
\alias{get_genes}
\title{Retrieve genes from biomaRt and return the intersecting genes}
\usage{
get_genes(snps = NULL, dist = 1e+06, attempts = 5, annot = NULL)
}
\arguments{
\item{snps}{A list of SNPs with chr, ps columns. if null returns genes only}
\item{dist}{Distance of genes from SNPs}
\item{attempts}{Maximal number of attempts to try biomaRt}
}
\value{
}
\description{
Retrieve genes from biomaRt and return the intersecting genes
}
|
fd16785050dc4aad72db83c067101cfd753e6402
|
7bd6b1d50f19113cce4a62cfcfa7d7d88d21c93e
|
/Assignment-6/3.R
|
9dfd6907ef4df0e15b6985902c5a8ad9ca15c4f4
|
[] |
no_license
|
PedroINCA/Brasil_2019
|
e5ce80aab69cecc3cfbdbae409887b32463ee683
|
fbe00f6fdb19a8f3deac46349a5295048e50710e
|
refs/heads/master
| 2020-07-07T00:25:39.358351
| 2019-08-23T01:40:23
| 2019-08-23T01:40:23
| 203,184,836
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 610
|
r
|
3.R
|
#Please write a function called containsAnyOfTheseKeys that accepts two arguments: 1) a list variable and 2) a vector variable.
#Your function should return a Boolean (logical) value that indicates whether the list contains any key in the specified vector.
#Hint: one way to do this is with the intersect function.
x = list(dog="puppy",cat="kitten",horse="foal",pig="piglet",bear="cub",alligator="hatchling")
y = c("dog","cat","horse","pig","bear","alligator")
names(x)
intersect(names(x),y)
containsAnyOfTheseKeys = function(x,y) {return(any(intersect(names(x),y)))}
containsAnyOfTheseKeys(x,y)
|
efbc381b4c249bc0886649cada2735d42639ff36
|
5137f6f49055a6d75b96f1b1b0c30b055636e44e
|
/man/run_test_applications.Rd
|
c6d8ec779309c9f72ffe6bae0eefd56696b969a8
|
[] |
no_license
|
cran/rODE
|
d23abb178718e5d79aa6eba3591f398f485ca201
|
42190459c2b840012a6277018668051f2987ef43
|
refs/heads/master
| 2021-01-22T10:28:29.418155
| 2017-11-10T03:17:51
| 2017-11-10T03:17:51
| 92,644,502
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 262
|
rd
|
run_test_applications.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{run_test_applications}
\alias{run_test_applications}
\title{run_test_applications}
\usage{
run_test_applications()
}
\description{
Run test all the examples
}
|
aac12ac005931f1c18d5b82997191a171e0b8203
|
335a31a8cd4afb48fb29867b560b0084b8f31142
|
/server.R
|
6779e15931bb15442986e435b78aecd29d822d1b
|
[] |
no_license
|
sangtani/DataProducts
|
a3eec4c83d5802a837bae2078a5e2d29055a2076
|
f28e0a8f4240e1be9b3b8c149f9ba0207758e049
|
refs/heads/master
| 2021-01-10T03:57:52.707239
| 2015-11-22T16:50:09
| 2015-11-22T16:50:09
| 46,667,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 727
|
r
|
server.R
|
library(shiny)
library(caret)
library(rpart)
library(lattice)
library(ggplot2)
library(e1071)
data(iris)
inTrain <- createDataPartition(y=iris$Species, p=0.7,list=FALSE)
training <- iris[inTrain,]
testing <- iris[-inTrain,]
modFit <- train(Species ~ ., data=training, method="rpart")
shinyServer(
function(input,output)
{
output$PredictionValue <- renderText({
inputData <- data.frame(input$SLength, input$SWidth, input$PLength, input$PWidth)
names(inputData) <- c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width")
output$inputValues <- renderTable(inputData)
pred <- predict(modFit, newdata=inputData)
outputMessage <- as.character(pred)
outputMessage})
}
)
|
ea68d5558578fa2d45383447ca3a1515eedb9606
|
40cc7a64fc13bba3f193b2257350c70c7a62952b
|
/Ex(8).R
|
15c54353e39484e26142a450779e7d62ed768cde
|
[] |
no_license
|
NorthCL/-R-
|
0c021d613adfe3d05007d6a55d9b37dd53ac6b5d
|
59c42e2de6d81552b847e8e19a2d6c267731ecfc
|
refs/heads/main
| 2023-04-19T11:31:57.051577
| 2021-05-12T20:38:31
| 2021-05-12T20:38:31
| 304,287,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 611
|
r
|
Ex(8).R
|
#а - мат ожидание x
#b - среднеквадратическое отклонение x
#c - мат ожидание e
#d - среднеквадратическое отклонение е
#n - кол-во чисел
Function.NC <- function(a, b, c, d, n)
{
set.seed(30)
x <- rnorm(n , mean = a, sd = b)
e <- rnorm(n, c, d)
y <- 100 - 6*x + e
layout(matrix(c(1,2,2,1,2,2,4,3,3),nrow = 3,byrow = T))
boxplot(y,pch = 20,cex = 1,col = "red")
plot(x,y,pch = 20,cex = 2,col = "red")
boxplot(x,pch = 20,cex = 1,horizontal = T,col = "red")
}
Function.NC (8, 2, 0, 15, 300)
|
55c2a7613f54b842a09df7c129127a38c88e6b1b
|
4d20e5e7a209d0c77cd598259010bee26980c50f
|
/r_code.R
|
f387b2ac17bfba50f4a1b29b3f5939ede1c52966
|
[] |
no_license
|
luehkenecology/extract_e_obs_gridded_dataset
|
d13df0baea4cd31e12c5505cb0692437105fa663
|
4a91257feb909bfcd63ecf282edb2bbde91d907a
|
refs/heads/master
| 2021-01-17T17:28:42.636569
| 2016-06-28T11:31:48
| 2016-06-28T11:31:48
| 62,133,864
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,531
|
r
|
r_code.R
|
###################################################
# clear memory
###################################################
rm(list = ls())
#============================================================
# set working directory
#============================================================
RPROJ <- list(PROJHOME = normalizePath(getwd()))
attach(RPROJ)
rm(RPROJ)
setwd(PROJHOME)
###################################################
# load libraries
###################################################
library(plyr)
library(ggplot2)
library(scales) # to access breaks/formatting functions
require(lubridate)
library(zoo)
require(fields)
library(raster)
library(maptools)
library(spatstat)
library(raster)
library(ncdf)
library(RNetCDF)
library(ncdf.tools)
library(fields)
library(colorRamps)
library(rworldmap)
# function to convert *.nc to raster
temp_func <- function(nc, start_dataset = '1950-01-01',
year_start, day_start = "-01-01",
year_end = year_start, day_end = "-12-31",
extent_v = 0,
var = "tg"){
# Time
A1<-paste(year_start, day_start, sep = "")
A2<-paste(year_end, day_end, sep = "")
time.s=as.POSIXct(A1,tz='UTC')
time.e=as.POSIXct(A2,tz='UTC')
tseq=seq(time.s, time.e, by='24 hours')
times=as.POSIXct(nc$dim$time$vals*86400, origin=start_dataset, tz='UTC')
t1=which(times==time.s)
tfull1=which(times==time.s)
t2=which(times==time.e)
tfull2=which(times==time.e)
dt = t2-t1+1
afi<-get.var.ncdf(nc, var,start=c(1, 1, t1), count=c(-1, -1, dt))
lon <- nc$dim$longitude$vals
lat <- nc$dim$latitude$vals
TEST <- lapply(1:dt, function(x) m <- t((afi[,,x])))
TEST1 <- lapply(TEST, function(x) x[nrow(x):1,])
TEST2 <- lapply(TEST1, function(x) raster(x,xmn=min(lon),xmx=max(lon),ymn=min(lat),ymx=max(lat)))
# crop the raster if an extent is present
if(sum(extent_v) > 0 | sum(extent_v) < 0){
TEST3<-lapply(TEST2, function(x) crop(x, extent_v))
brick(unlist(TEST3))
} else{
brick(unlist(TEST2))
}
}
#============================================================
# loop through years to convert *.nc to raster
#============================================================
for(i in 1950:2015){
# convert *.nc to raster
data <- temp_func(open.ncdf("data/tg_0.25deg_reg_v13.0.nc"),
year_start = i)
# save raster
writeRaster(data, paste("output/mean_temperature_europe_",
i, ".grd", sep = ""), overwrite = T)
}
|
8c58c00ae2a387cc08c69a0e294895c5ebf2286b
|
d467300a1edd4f18630caba9653acd54e11651bf
|
/Class_Code/Class_1/makestereo.r
|
f2056206b35abfd5a083b4563df887e0f90d204c
|
[] |
no_license
|
ZixinNie/Introduction_to_Data_Science
|
63e4645b77dfd9b04570761a7933ca412694d445
|
5163ed6f9c2c613425fe3ce1b10133b2c8f1310a
|
refs/heads/master
| 2020-04-18T03:24:16.431261
| 2019-02-06T13:50:17
| 2019-02-06T13:50:17
| 167,197,283
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,038
|
r
|
makestereo.r
|
require(lattice)
make.Stereo <- function(Z, Groups, Screen1=list(z = 20, x = -70, y = 3), Screen2=list(z = 20, x = -70, y = 0), Main="Stereo", asp="Equal", Xlab="X", Ylab="Y",Zlab="Z", pch=16) {
Z <- data.frame(Z)
dimnames(Z) <- list(1:dim(Z)[1], c("Z1", "Z2", "Z3"))
if (asp == "Equal") {
X.range <- c(min(apply(Z,2,range)[1,]), max(apply(Z,2,range)[2,]))
Y.range <- X.range
Z.range <- X.range
}
else {
X.range <- range(Z[,1])
Y.range <- range(Z[,2])
Z.range <- range(Z[,3])
}
print(cloud(Z3 ~ Z1 * Z2, data = Z, pch=pch, cex = .8, perspective = FALSE, groups=Groups, subpanel = panel.superpose, main = Main, screen = Screen1, xlim=X.range,ylim=Y.range,zlim=Z.range, xlab=Xlab, ylab=Ylab, zlab=Zlab), split = c(1,1,2,1), more = TRUE)
print(cloud(Z3 ~ Z1 * Z2, data = Z, pch=pch, cex = .8, perspective = FALSE, groups=Groups, subpanel = panel.superpose, main = Main, screen = Screen2, xlim=X.range,ylim=Y.range,zlim=Z.range, xlab=Xlab, ylab=Ylab, zlab=Zlab), split = c(2,1,2,1))
}
|
a50c26d9ac214a4b75d69f21a966a886ccdd0cc1
|
0e949f187763332b63439b811f7870c0cb959e67
|
/Plot4.R
|
ca4a0e889e8201c711012fdbe63e04f11f3a1bad
|
[] |
no_license
|
MariaJose97-22/ExData_Plotting1
|
b1110e0c3fe9a26ae8403f0b9e8cc09f29502f0f
|
6aac1a411fea4ff1407e76e33c1a1a78b07af94f
|
refs/heads/master
| 2022-11-23T16:42:10.169661
| 2020-08-03T03:28:01
| 2020-08-03T03:28:01
| 284,590,822
| 0
| 1
| null | 2020-08-03T09:49:49
| 2020-08-03T03:06:57
|
R
|
UTF-8
|
R
| false
| false
| 1,572
|
r
|
Plot4.R
|
setwd("C:/Users/Maria Jose Figueroa/Desktop")
power_consumption<-read.table("./ExData_Plotting1/household_power_consumption.txt", skip=1,sep=";")
names(power_consumption)<- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subset_data<-subset(power_consumption,power_consumption$Date=="1/2/2007"|power_consumption$Date=="2/2/2007")
subset_data$Date<-as.Date(subset_data$Date,format = "%d%m%Y")
subset_data$Time<-strptime(subset_data$Time,format = "%H:%M:%S")
subset_data[1:1440,"Time"] <- format(subset_data[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subset_data[1441:2880,"Time"] <- format(subset_data[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
png(filename = "./ExData_Plotting1/Plot4.png")
layout(matrix(c(1:4), nrow=2, byrow=FALSE))
plot(subset_data$Time,as.numeric(subset_data$Global_active_power),type="l",xlab="",ylab="Global Active Power (kilowatts)")
plot(subset_data$Time,subset_data$Sub_metering_1,type="l", xlab="", ylab="Energy sub metering")
with (subset_data, lines(subset_data$Time,subset_data$Sub_metering_2,col="red"))
with (subset_data, lines(subset_data$Time,subset_data$Sub_metering_3,col="blue"))
legend("topright",lty = 1, col=c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(subset_data$Time,as.numeric(subset_data$Voltage),type="l",xlab="Date time",ylab="Voltage")
plot(subset_data$Time,as.numeric(subset_data$Global_reactive_power),type="l",xlab="Date time",ylab="Global Reactive Power")
dev.off()
|
c318cd738f8b83e5a02692d79c2960898f5c160b
|
ab67cc16f40aeb69c1551c8e907d97abfd144212
|
/R/scrape.R
|
754c5ed75e135c1e7c8d1c335aff0eaa3b2c73c4
|
[
"MIT"
] |
permissive
|
news-r/papers
|
6c315f423b923eb14e0d67ea7f37555e2f449678
|
2e94fea7cacd1f511e4ed30072b5957b7e6fb771
|
refs/heads/master
| 2020-05-31T18:01:57.481746
| 2020-02-23T19:42:56
| 2020-02-23T19:42:56
| 190,424,523
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,390
|
r
|
scrape.R
|
#' Regions
#'
#' Get the list of regions available.
#'
#' @import rvest
#' @import polite
#'
#' @examples regions <- get_regions()
#'
#' @export
get_regions <- function() {
session <- bow(BASE_URL, force = TRUE, user_agent = get_user_agent())
result <- scrape(session) %>%
html_node(".cList") %>%
html_nodes("li")
region <- html_text(result)
link <- result %>%
html_nodes("a") %>%
html_attr("href") %>%
paste0(BASE_URL, .)
df <- tibble::tibble(
region = region,
link = link
)
cat(crayon::blue(cli::symbol$info), nrow(df), "Countries\n")
regions <- .construct_countries(df)
invisible(regions)
}
#' @export
print.regions <- function(x, ...){
print(x[["region"]])
}
#' User Agent
#'
#' Get and set \code{user-agent} for subsequent calls.
#'
#' @param agent Agent name.
#'
#' @name user-agent
#' @export
get_user_agent <- function(){
ua <- Sys.getenv("PAPERS_USER_AGENT")
if(nchar(ua) <= 0)
ua <- "papers-r-package"
return(ua)
}
#' @rdname user-agent
#' @export
set_user_agent <- function(agent){
if(missing(agent))
stop("Missing agent", call. = FALSE)
Sys.setenv("PAPERS_USER_AGENT" = agent)
}
#' Papers
#'
#' Get papers from a certain region.
#'
#' @param data Dataset as returned by \code{\link{get_regions}}.
#' @param region Name of the region \emph{or} index of region in \code{data}.
#'
#' @examples
#' regions <- get_regions()
#' get_papers(regions, "Belgium")
#' get_papers(regions, 15) # 15th country in the regions data.frame
#'
#' @name get_papers
#' @export
get_papers <- function(data, region) UseMethod("get_papers")
#' @rdname get_papers
#' @method get_papers regions
#' @export
get_papers.regions <- function(data, region){
if(missing(region))
stop("Missing region", call. = FALSE)
if(inherits(region, "character"))
subset <- data[data[["region"]] == region,]
else
subset <- data[region, ]
url <- subset[["link"]]
region <- subset[["region"]]
session <- bow(url, force = TRUE, user_agent = get_user_agent())
result <- scrape(session) %>%
html_node(".cList") %>%
html_nodes("li")
newspaper <- html_text(result) %>%
gsub("\\(.+\\)", "", .) %>%
trimws()
link <- result %>%
html_nodes("a") %>%
html_attr("href")
df <- tibble::tibble(
newspaper = newspaper,
link = link
)
df[["region"]] <- region
return(df)
}
|
1ac4b023a1f3593005f26a8ad0370eaea9d6955d
|
c045d3a278f9e394cfe812e02cba316c9aeae0fb
|
/man/twn.Rd
|
a0bbb0fb452a949fa612ba3d961fbf8d976b3001
|
[
"MIT"
] |
permissive
|
RedTent/twn
|
67198ef20852342b681a51000e20811ebdcabb8a
|
ddcedabf99124d72ad2a963b7fbf7678880fcdf9
|
refs/heads/master
| 2023-06-23T06:00:57.839342
| 2023-06-20T09:33:12
| 2023-06-20T09:33:12
| 242,074,205
| 0
| 0
|
NOASSERTION
| 2021-03-29T19:33:26
| 2020-02-21T06:54:10
|
R
|
UTF-8
|
R
| false
| true
| 633
|
rd
|
twn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ZZ_package_description.R
\docType{package}
\name{twn}
\alias{twn}
\title{Een package voor de TWN-lijst}
\description{
De bedoeling van 'twn' is om de TWN-lijst beschikbaar te maken in R en om er makkelijk mee te kunnen werken.
De package biedt diverse functies die het eenvoudig maken om informatie van een taxon op te vragen.
Zie ook:
\itemize{
\item \link{twn_lijst} en \link{twn_statuscodes}
\item \link{twn_info}
\item \code{\link[=match_parent]{match_parent()}}
}
Voor meer informatie zie de package website: \url{https://redtent.github.io/twn/}
}
|
09d8c39dcfe02e739f8b1e9ca8e815b9a3ad9db9
|
32725711b519cdbd3cfa57faea2d21e4de10a92a
|
/R/test-script.R
|
98633ccb5489e17016cf49d77f528d4c377bf2f7
|
[] |
no_license
|
koopmans-michaela/mcs-qsar
|
74e02d58801a0c6f555615043b9d2e4defd5b377
|
ad0005404af8bc1c946929deb45efc96fb949722
|
refs/heads/master
| 2021-05-14T08:42:09.881356
| 2018-02-05T18:40:21
| 2018-02-05T18:40:21
| 116,306,688
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 120
|
r
|
test-script.R
|
library(acepack)
library(testthat)
library(jsonlite)
#here's a comment
#testing for john and jay
#learn committ and push
|
c283085e03888182859f7fcdfe1bcc659410ff15
|
3d9f5d23f0c4b933d433acdb7aebf801ae2aa653
|
/Hospitals_per_capita_by_Province.R
|
834e2b2a8f31e9bb742e620762dcb378a4a6f110
|
[] |
no_license
|
bzhang1945/vizathonsubmission
|
c29c2cae65b6f18b9a73f77e99222309270ef079
|
f17043a74c98dbe90e87491f0ff24c10d74be411
|
refs/heads/main
| 2023-06-26T09:57:32.522352
| 2021-08-01T15:53:29
| 2021-08-01T15:53:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,029
|
r
|
Hospitals_per_capita_by_Province.R
|
library(ggplot2)
library(tidyverse)
Canadian_province <- c("AB", "BC", "MB", "NB", "NL", "NS", "NT","NU", "ON","PE", "QC","SK","YT")
Hospitals_per_capita <- c(0.00007461243237, 0.0002895378824, 0.0001549674677, 0.00007927598015, 0.0001844600125, 0.00019909153, 0.0001550868486, 0.00007612860659, 0.0002529953655, 0.0002189977412, 0.0002750717589, 0.0004012446218, 0.0009243458476)
data <- data_frame(Canadian_province, Hospitals_per_capita)
ggplot(data, aes(x = Canadian_province, y = Hospitals_per_capita)) + geom_bar(fill = "steelblue", stat = 'identity') + ggtitle("Hospitals Per Capita By Province") + xlab("Canadian Provinces") + ylab("Hospitals Per Capita")+
theme_minimal() + theme(plot.background = element_rect(fill = "lightblue")) + theme(plot.title = element_text(face = "bold", colour = "steelblue")) +
theme(axis.text = element_text(color = "steelblue", size = 12)) +
theme(axis.title = element_text(face = "bold", color = "steelblue")) +
theme(panel.background = element_rect(fill = "linen"))
|
8faf0442d0f05d026b7ac003d9ef71180d810e8b
|
38116111ccbbb1c4580d8e8c5ac3f9775e1fa384
|
/man/calculateDiscreteDiscreteMI_Entropy.Rd
|
2b27068c8fb942a402365ea1069d093db12de7bb
|
[
"MIT"
] |
permissive
|
terminological/tidy-info-stats
|
6c1e37684eeac8d765384b773a23f0488eb7b467
|
1b1f19a718edb44c7178943c322b45fd1e3c93b1
|
refs/heads/master
| 2022-11-30T08:16:46.311945
| 2022-11-18T20:37:21
| 2022-11-18T20:37:21
| 232,600,275
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,137
|
rd
|
calculateDiscreteDiscreteMI_Entropy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidyDiscreteDiscreteMI.R
\name{calculateDiscreteDiscreteMI_Entropy}
\alias{calculateDiscreteDiscreteMI_Entropy}
\title{calculate mutual information between a discrete value (X) and a discrete value (Y) using estimates of entropy}
\usage{
calculateDiscreteDiscreteMI_Entropy(
df,
groupXVars,
groupYVars,
entropyMethod = "Grassberger",
...
)
}
\arguments{
\item{df}{- may be grouped, in which case the grouping is interpreted as different types of discrete variable}
\item{groupXVars}{- the column of the discrete value (X) quoted by vars(...)}
\item{groupYVars}{- the column of the discrete value (Y)}
\item{entropyMethod}{- the method used to calculate the entropy (see ?tidyinfostats::calculateDiscreteEntropy) - defaults to "Grassberger"}
}
\value{
a dataframe containing the disctinct values of the groups of df, and for each group a mutual information column (I). If df was not grouped this will be a single entry
}
\description{
calculate mutual information between a discrete value (X) and a discrete value (Y) using estimates of entropy
}
|
ec54e653dcc9bf40aea688a0c48c424fb394112e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sjmisc/examples/merge_imputations.Rd.R
|
a620f78ba39d3218a15c754461a7ca97e75e7923
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 495
|
r
|
merge_imputations.Rd.R
|
library(sjmisc)
### Name: merge_imputations
### Title: Merges multiple imputed data frames into a single data frame
### Aliases: merge_imputations
### ** Examples
library(mice)
imp <- mice(nhanes)
# return data frame with imputed variables
merge_imputations(nhanes, imp)
# append imputed variables to original data frame
merge_imputations(nhanes, imp, nhanes)
# show summary of quality of merging imputations
merge_imputations(nhanes, imp, summary = "dens", filter = c("chl", "hyp"))
|
357472be7ca58c06914d8d647191c5cc5d65deab
|
8e4353a1dc52d42267cd6e8c1d39d5e94c0b1b99
|
/r/registration_gui.R
|
9bed7c8650a5841bae13a0d7ce91b77c91eb4268
|
[
"Apache-2.0"
] |
permissive
|
SimpleITK/ISBI2018_TUTORIAL
|
f6b7ff1dadb736274c6987627fdae7574f6e2e58
|
7e8f255bb9d0aca9bebd8f4091d7d867b791d1ac
|
refs/heads/master
| 2022-11-08T13:19:44.807531
| 2022-11-01T13:53:10
| 2022-11-01T13:53:10
| 125,414,481
| 27
| 18
|
Apache-2.0
| 2018-03-27T14:16:44
| 2018-03-15T19:08:16
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 6,119
|
r
|
registration_gui.R
|
library(ggplot2)
# Labels used in the POPI dataset
popi_body_label <- 0
popi_air_label <- 1
popi_lung_label <- 2
# Callback invoked when the StartEvent happens, sets up our new data.
# Functions ending in _jn are for use with Jupyter notebooks, as the display
# behaviour is a bit different.
# Note that we could use a special plotting environment instead of the global
# environment, but we may want to use the metrics elsewhere and they are easier to
# access if they are global
start_plot <- function()
{ #global empty vectors (via assignment operator)
metric_values <<- c()
multires_iterations <<- c()
}
end_plot_jn <- function()
{
Multi <- rep(NA, length(metric_values))
Multi[multires_iterations] <- "M"
DDF <- data.frame(IterationNumber=1:length(metric_values),
MetricValue=metric_values,
MultiresIteration=Multi)
DDFM <- subset(DDF, !is.na(MultiresIteration))
pl <- ggplot(DDF, aes(x=IterationNumber, y=MetricValue)) +
geom_line() +
geom_point(data=DDFM, aes(colour=MultiresIteration)) +
theme(legend.position="none")
print(pl)
rm(metric_values, pos = ".GlobalEnv")
rm(multires_iterations, pos = ".GlobalEnv")
}
# Callback invoked when the IterationEvent happens, update our data and display new figure.
# Note that this won't appear as an animation in R studio, but you can use the arrows to cycle
# through plots
plot_values <- function(registration_method)
{
metric_values <<- c(metric_values, registration_method$GetMetricValue())
Multi <- rep(NA, length(metric_values))
Multi[multires_iterations] <- "M"
DDF <- data.frame(IterationNumber=1:length(metric_values),
MetricValue=metric_values,
MultiresIteration=Multi)
DDFM <- subset(DDF, !is.na(MultiresIteration))
pl <- ggplot(DDF, aes(x=IterationNumber, y=MetricValue)) +
geom_line() +
theme(legend.position="none")
if(nrow(DDFM) > 1) {
pl <- pl + geom_point(data=DDFM, aes(colour=MultiresIteration))
}
print(pl)
dev.flush()
Sys.sleep(0)
}
# Use this one inside a notebook
plot_values_jn <- function(registration_method)
{
## No point attempting to plot every one in a notebook
metric_values <<- c(metric_values, registration_method$GetMetricValue())
}
# Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
update_multires_iterations <- function()
{
multires_iterations <<- c(multires_iterations, length(metric_values)+1)
}
#
# Get a coronal slice with overlaid contour of the mask for the specific slice index in all temporal images.
#
temporal_coronal_with_overlay <- function(coronal_slice_index, images, masks, label, window_min, window_max)
{
# Extract the 2D images and masks.
slices <- lapply(images, function(img, slc) img[,slc,], slc=coronal_slice_index)
slice_masks <- lapply(masks, function(msk, slc, lbl) msk[,slc,]==lbl , slc=coronal_slice_index, lbl=label)
# Resample the image (linear interpolation) and mask (nearest neighbor interpolation) into an isotropic grid,
# required for display.
original_spacing <- slices[[1]]$GetSpacing()
original_size <- slices[[1]]$GetSize()
min_spacing <- min(original_spacing)
new_spacing <- c(min_spacing, min_spacing)
new_size <- c(as.integer(round(original_size[1]*(original_spacing[1]/min_spacing))),
as.integer(round(original_size[2]*(original_spacing[2]/min_spacing))))
resampled_slices <- lapply(slices, function(slc, sz, spc) Resample(slc, sz, Transform(),
"sitkLinear", slc$GetOrigin(),
spc, slc$GetDirection(), 0.0,
slc$GetPixelID()), sz=new_size, spc=new_spacing)
resampled_slice_masks <- lapply(slice_masks, function(msk, sz, spc) Resample(msk, sz, Transform(),
"sitkNearestNeighbor", msk$GetOrigin(),
spc, msk$GetDirection(), 0.0,
msk$GetPixelID()), sz=new_size, spc=new_spacing)
# Create the overlay: cast the mask to expected label pixel type, and do the same for the image after
# window-level, accounting for the high dynamic range of the CT.
overlaid_slices <- mapply( function(slc, msk, win_min, win_max) LabelMapContourOverlay(Cast(msk, "sitkLabelUInt8"),
Cast(IntensityWindowing(slc,
windowMinimum=win_min,
windowMaximum=win_max),
"sitkUInt8"),
opacity = 1,
c(0,0), c(2,2)),
resampled_slices,
resampled_slice_masks, win_min=window_min, win_max=window_max)
# Create the temporal slice, 3D volume representing 2D coronal+time
temporal_image <- Image(c(overlaid_slices[[1]]$GetSize(), length(overlaid_slices)), overlaid_slices[[1]]$GetPixelID())
# Two subtle points: (1) to paste the 2D slice into the 3D volume we need to make it a 3D slice (JoinSeries),
# (2) the Paste function uses SimpleITK indexing, requiring the seq()-1.
invisible(mapply(function(slice, index) temporal_image<<- Paste(temporal_image, JoinSeries(slice), c(slice$GetSize(),1), c(0,0,0), c(0,0,index)),
overlaid_slices, seq(length(overlaid_slices))-1))
return(temporal_image)
}
|
367063fd58baaf0e39fde6c99af7db7c9499b9da
|
ba49eb475d4fcd6d61655270ec34fe829657494e
|
/man/elsc.Rd
|
63f22f19170cdc4187546ff723a263831db042e1
|
[] |
no_license
|
cran/Bios2cor
|
f3b630684862d4f48e7137361e39094358336f63
|
50b2948bfdd3888e6593015247419ce1f5b58d8a
|
refs/heads/master
| 2022-07-31T18:21:28.244917
| 2022-07-08T08:25:23
| 2022-07-08T08:25:23
| 101,306,648
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,688
|
rd
|
elsc.Rd
|
\name{elsc}
\Rdversion{1.1}
\alias{elsc}
\title{
Explicit Likelihood of Subset Covariation (ELSC) function
}
\description{
Calculates a score based on rigorous statistics of correlation/covariation in a perturbation-based algorithm. It measures how many possible subsets of size n would have the composition found in column j in the subset alignment defined by the perturbation in column i, and in the ideal subset
(i.e., in a subset with the amino acid distribution equal to the total alignment).
}
\usage{
elsc(align, gap_ratio = 0.2)
}
\arguments{
\item{align}{
An object of class 'align' created by the \code{\link{import.msf}} or the \code{\link{import.fasta}} function from a sequence alignment
}
\item{gap_ratio}{
Numeric value between 0 and 1 indicating the maximal gap ratio at a given position in the MSA for this position to be taken into account. Default is 0.2, positions with more than 20 percent of gaps will not be taken into account in the analysis. When gap_ratio is 1 or close to 1, only positions with at least 1 aa are taken into account (positions with only gaps are excluded).
}
}
\details{
The ELSC score at position [i,j] has been computed with the following formula :
\deqn{ELSC(i,j) = -ln\prod_{y}^{ } \frac{{{N_{y(j)}}\choose{n_{y(j)}}}}{{{N_{y(j)}}\choose{m_{y(j)}}}}}
As a reminder, a binomial coefficient \eqn{{N}\choose{k}} is computed as follow :
\deqn{{{N}\choose{k}} = \frac{N!}{k!(N-k)!}}
where :
\itemize{
\item {\eqn{N_{y(j)}}} {is the number of residues y at position j in the total (unperturbed) sequence alignment}
\item {\eqn{n_{y(j)}}} {is the number of residues y at position j in the subset alignment defined by the perturbation in column i}
\item {\eqn{m_{y(j)}}} {is the number of residues y at position j in the ideal subset (i.e., in a subset with the amino acid distribution equal to the total alignment)}
}
}
\value{
A list of two elements which are numeric matrices containing the ELSC scores and Z-scores for each pair of elements.
}
\author{
Madeline DENIAUD and Marie CHABBERT
}
\references{
Dekker JP, Fodor A, Aldrich RW, Yellen G. A perturbation-bqsed method for calculating explicit likelihood of evolutionary covariance in multiple sequence alignements. Bioinformatics 2004;20:1565-1572.
}
\examples{
#Importing MSA file
align <- import.msf(system.file("msa/toy_align.msf", package = "Bios2cor"))
#Creating correlation object with ELSC method for positions with gap ratio < 0.1
elsc <- elsc(align, gap_ratio = 0.1)
#Creating correlation object with ELSC method for positions with gap_ratio < 0.2 (Default)
#elsc <- elsc(align)
}
|
7de17c63a02f19eb43fa13e3f0b8b904d331d13c
|
1a826358019cd1bc4082a44099bbc9fb7034aa86
|
/OCRPractice.R
|
7a208e81601c1b897b693d408fdc55ea86b5cf2a
|
[] |
no_license
|
sheharyarakhtar/Digit_Recognition
|
86dc367c85c75017886e5e8b7fbc3a96c512a578
|
3a36c0f12f8bdb05b45919fe75bcf69f21391ed5
|
refs/heads/main
| 2023-04-10T23:34:04.284972
| 2021-04-14T21:37:38
| 2021-04-14T21:37:38
| 357,990,667
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,837
|
r
|
OCRPractice.R
|
library(keras)
#Import dataset mnist for training and testing
mnist <- dataset_mnist()
#2 lists pf 2. First list first and second part contains training and label and second for test
str(mnist)
#Seperate them to variable
trainx <- mnist$train$x
trainy <- mnist$train$y
testx <- mnist$test$x
testy <- mnist$test$y
table(trainy)
table(testy)
#plot images
par(mfrow = c(3,3))
for(i in 1:9) plot(as.raster(trainx[i,,], max=255))
par(mfrow = c(1,1))
trainx[5,,]
hist(trainx[5,,])
trainy
#FIVE
a <- c(1,12,36,48,66,101,133,139,146)
par(mfrow = c(3,3))
for(i in a) plot(as.raster(trainx[i,,], max = 255))
par(mfrow = c(1,1))
#reshape and rescale
##reshape linearise the matrix into one long row of 784 variables. 784 =28x28pixels
trainx <- array_reshape(trainx, c(nrow(trainx), 784))
str(trainx)
testx <- array_reshape(testx, c(nrow(testx), 784))
str(testx)
#Normalise the values by dividing them by 255, which the maximum value of each variable
trainx <- trainx/255
testx <- testx/255
#One hot encoding
##Forms a matrix array from a given array
trainy <- to_categorical(trainy, 10)
testy <- to_categorical(testy, 10)
head(trainy)
#Model
model <- keras_model_sequential()
model %>%
layer_dense(units = 512, activation = 'relu', input_shape = c(784)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 256, activation = 'relu') %>%
layer_dropout(rate = 0.3) %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dropout(rate = 0.2) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
#Compile
model %>%
compile(loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = 'accuracy')
#Fit model
history <- model %>%
fit(trainx,
trainy,
epochs = 12,
batch_size = 32,
validation_split = 0.2)
plot(history)
##Evaluation of model
model %>% evaluate(testx, testy)
pred <- model %>% predict_classes(testx)
table(Predicted = pred, Actual = mnist$test$y)
prob <- model %>% predict_proba(testx)
cbind(prob, Predicted_class = pred, Actual = mnist$test$y)[1:5,]
library(EBImage)
temp = list.files(pattern = '*.png')
mypic <- list()
for(i in 1:length(temp))mypic[[i]] <- readImage(temp[[i]])
par(mfrow = c(3,2))
for(i in 1:length(temp))plot(mypic[[i]])
par(mfrow = c(1,1))
#527680
#convert to greyscale
for(i in 1:length(temp))colorMode(mypic[[1]]) <- Grayscale
#make them white/blackbackground
for(i in 1:length(temp))mypic[[i]] <- 1-mypic[[i]]
#Resize the image
for(i in 1:length(temp))mypic[[i]] <- resize(mypic[[i]], 28, 28)
#Reshape the pictures
for(i in 1:length(temp))mypic[[i]] <- array_reshape(mypic[[i]], c(28,28,3))
str(mypic)
new <- NULL
for (i in 1:length(temp))new <- rbind(new, mypic[[i]])
newx <- new[,1:784]
newy <- c(5,2,7,6,8,0)
##model prediction
pred <- model %>% predict_classes(newx)
pred
|
5ce70df38c101f1830d625c919507672b45df5c9
|
df3b3e2cff3f789a3e91e561d7e3121603d51546
|
/man/superbData.Rd
|
6d63f85545f8ea0811c72d9f3f046de1c074b309
|
[] |
no_license
|
humanfactors/superb
|
39218b3458d8d8d834b844412a22b9a4bf5a8746
|
cdb7a903d84c2a83d4a4c7c94a97a2d4bc2221a4
|
refs/heads/master
| 2023-07-16T21:47:00.530298
| 2021-09-04T16:52:48
| 2021-09-04T16:52:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,453
|
rd
|
superbData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/superbData.R
\name{superbData}
\alias{superbData}
\title{Obtain summary statistics with correct error bars.}
\usage{
superbData(
data,
BSFactors = NULL,
WSFactors = NULL,
WSDesign = "fullfactorial",
factorOrder = NULL,
variables,
statistic = "mean",
errorbar = "CI",
gamma = 0.95,
adjustments = list(purpose = "single", popSize = Inf, decorrelation = "none",
samplingDesign = "SRS"),
preprocessfct = NULL,
postprocessfct = NULL,
clusterColumn = ""
)
}
\arguments{
\item{data}{Dataframe in wide format}
\item{BSFactors}{The name of the columns containing the between-subject factor(s)}
\item{WSFactors}{The name of the within-subject factor(s)}
\item{WSDesign}{the within-subject design if not a full factorial design (default "fullfactorial")}
\item{factorOrder}{Order of factors as shown in the graph (x axis, groups, horizontal
panels, vertical panels)}
\item{variables}{The dependent variable(s)}
\item{statistic}{The summary statistic function to use}
\item{errorbar}{The function that computes the error bar. Should be "CI" or "SE" or
any function name. Defaults to "CI"}
\item{gamma}{The coverage factor; necessary when errorbar == "CI". Default is 0.95.}
\item{adjustments}{List of adjustments as described below.
Default is \code{adjustments = list(purpose = "single", popSize = Inf, decorrelation = "none", samplingDesign = "SRS")}}
\item{preprocessfct}{is a transform (or vector of) to be performed first on data matrix of each group}
\item{postprocessfct}{is a transform (or vector of)}
\item{clusterColumn}{used in conjunction with samplingDesign = "CRS", indicates which column contains the cluster membership}
}
\value{
a list with (1) the summary statistics in summaryStatistics
(2) the raw data in long format in rawData (using numeric levels for
repeated-measure variables).
}
\description{
The function \code{suberbData()} computes standard error or confidence interval for various descriptive
statistics under various designs, sampling schemes, population size and purposes,
according to the \code{suberb} framework. See \insertCite{cgh21}{superb} for more.
}
\details{
The possible adjustements are the following
\itemize{
\item popsize: Size of the population under study. Defaults to Inf
\item purpose: The purpose of the comparisons. Defaults to "single".
Can be "single", "difference", or "tryon".
\item decorrelation: Decorrelation method for repeated measure designs.
Chooses among the methods "CM", "LM", "CA" or "none". Defaults to "none".
\item samplingDesign: Sampling method to obtain the sample. implemented
sampling is "SRS" (Simple Randomize Sampling) and "CRS" (Cluster-Randomized Sampling).
}
}
\examples{
# Basic example using a built-in dataframe as data;
# by default, the mean is computed and the error bar are 95\% confidence intervals
# (it also produces a $rawData dataframe, not shown here)
res <- superbData(ToothGrowth, BSFactors = c("dose", "supp"),
variables = "len")
res$summaryStatistics
# Example introducing adjustments for pairwise comparisons
# and assuming that the whole population is limited to 200 persons
res <- superbData(ToothGrowth, BSFactors = c("dose", "supp"),
variables = "len",
statistic = "median", errorbar = "CI", gamma = .80,
adjustments = list( purpose = "difference", popSize = 200) )
res$summaryStatistics
}
\references{
\insertAllCited{}
}
|
3c209b647b775b7d0353fdf7e8f6e75ee5acb445
|
66955ee1c32bd7cc6fff3fbbd893558c19d7358c
|
/Data Prep Classification and Prediction Analyis v6.r
|
11227594e40e02d2ed5e31a7d2a269e16d13b36d
|
[
"MIT"
] |
permissive
|
ericsgagnon/psustat897groupproject
|
84445c6187fd6e50164d435c2013060189055226
|
6d3e6fc5ce028a616f97469a8c4b5b6e94dd18a7
|
refs/heads/master
| 2021-01-01T20:21:52.704474
| 2017-08-07T02:45:55
| 2017-08-07T02:45:55
| 98,821,979
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,444
|
r
|
Data Prep Classification and Prediction Analyis v6.r
|
# DATA SETUP
charity = read.csv("E:/STAT 897/Group Project/charity.csv")
charity.t = charity
charity.t$avhv = log(charity.t$avhv)
charity.t$agif = log(charity.t$agif)
charity.t$inca = log(charity.t$inca)
charity.t$incm = log(charity.t$incm)
charity.t$lgif = log(charity.t$lgif)
charity.t$rgif = log(charity.t$rgif)
charity.t$tgif = log(charity.t$tgif)
charity.t$tlag = log(charity.t$tlag)
data.train = charity.t[charity$part=="train",]
x.train = data.train[,2:21]
c.train = data.train[,22] # donr
n.train.c = length(c.train) # 3984
y.train = data.train[c.train==1,23] # damt for observations with donr=1
# x.train.mat = model.matrix(damt~., data.train[c.train==1,2:23])[,-22]
n.train.y = length(y.train) # 1995
data.valid = charity.t[charity$part=="valid",]
x.valid = data.valid[,2:21]
c.valid = data.valid[,22] # donr
n.valid.c = length(c.valid) # 2018
y.valid = data.valid[c.valid==1,23] # damt for observations with donr=1
#x.valid.mat = model.matrix(damt~., data.valid[c.valid==1,2:23])[,-22]
n.valid.y = length(y.valid) # 999
data.test = charity.t[charity$part=="test",]
n.test = dim(data.test)[1] # 2007
x.test = data.test[,2:21]
x.train.mean = apply(x.train, 2, mean)
x.train.sd = apply(x.train, 2, sd)
x.train.std = t((t(x.train)-x.train.mean)/x.train.sd) # standardize to have zero mean and unit sd
# x.train.mat.mean = apply(x.train.mat, 2, mean)
# x.train.mat.sd = apply(x.train.mat, 2, sd)
# x.train.mat.std = t((t(x.train.mat)-x.train.mat.mean)/x.train.mat.sd) # standardize to have zero mean and unit sd
apply(x.train.std, 2, mean) # check zero mean
apply(x.train.std, 2, sd) # check unit sd
data.train.std.c = data.frame(x.train.std, donr=c.train) # to classify donr
data.train.std.y = data.frame(x.train.std[c.train==1,], damt=y.train) # to predict damt when donr=1
x.valid.std = t((t(x.valid)-x.train.mean)/x.train.sd) # standardize using training mean and sd
# x.valid.mat.std = t((t(x.valid.mat)-x.train.mat.mean)/x.train.mat.sd) # standardize to have zero mean and unit sd
data.valid.std.c = data.frame(x.valid.std, donr=c.valid) # to classify donr
data.valid.std.y = data.frame(x.valid.std[c.valid==1,], damt=y.valid) # to predict damt when donr=1
x.test.std = t((t(x.test)-x.train.mean)/x.train.sd) # standardize using training mean and sd
data.test.std = data.frame(x.test.std)
train.dat = data.frame(x = x.train.std, y = as.factor(data.train.std.c$donr))
valid.dat = data.frame(x = x.valid.std, y = as.factor(data.valid.std.c$donr))
#LIBRARIES
library(MASS)
library(tree)
library(randomForest)
library(gbm)
library(e1071)
library(lars)
library(leaps)
library(glmnet)
library(pls)
library(splines)
library(gam)
library(ISLR)
library(magrittr)
library(stringr)
library(lubridate)
library(dplyr)
library(ggplot2)
library(ggthemes)
#EXPLORATORY DATA ANALYSIS
# Helper Functions
density_plots <- function( df , main = 'Density Plots') {
df %>%
gather(
key = measure ,
value = value
) %>%
ggplot(
aes(
value ,
color = measure ,
group = measure
)) +
geom_density() +
facet_wrap( ~measure , scales = 'free' ) +
ggtitle( label = main ) +
theme_gdocs() +
theme( plot.title = element_text( face = 'bold' , size = rel(1.5) , hjust = .5 ) )
}
## EDA Visuals ################################################################
p <- list()
data <- charity %>% tbl_df
cfg.predictors <-
c( "reg1" , "reg2" , "reg3" , "reg4" , "home" , "chld" , "hinc" ,
"genf" , "wrat" , "avhv" , "incm" , "inca" , "plow" , "npro" ,
"tgif" , "lgif" , "rgif" , "tdon" , "tlag" , "agif" )
# raw data density plots
p$density_raw <- {
data %>%
filter( part == 'train' ) %>%
dplyr::select( avhv , npro , tgif , lgif , rgif , agif , tdon , tlag , incm , inca ) %>%
density_plots( 'Density Plots of Raw Data' )
}
# log-transformed density plots
p$density_logtransformed <- {
data %>%
filter( part == 'train' ) %>%
dplyr::select( avhv , npro , tgif , lgif , rgif , agif , tdon , tlag , incm , inca ) %>%
mutate_all( log ) %>%
density_plots( 'Density Plots of Log-Transformed Data' )
}
# correlation plot
p$correlations <- {
data %>%
filter( part == 'train' ) %>%
select( cfg.predictors ) %>%
select( -matches('reg') ) %>%
cor() %>%
round( 3 ) %>%
ggcorrplot(
ggtheme = theme_gdocs ,
legend.title = 'r' ,
hc.order = T ,
colors = c('red' , 'white' , 'green') ,
show.diag = F ,
outline.color = 'black' ,
lab = T ,
lab_col = 'black' ,
lab_size = rel(2.5) ,
tl.cex = 13
) +
ggtitle( 'Predictor Correlations' ) +
theme(
axis.ticks = element_blank() ,
panel.grid.major = element_blank() ,
panel.grid.minor = element_blank() ,
axis.text = element_text(face = 'bold', size = rel(3.5) , hjust = 0 ) ,
plot.title = element_text( face = 'bold' , size = rel(1.5) , hjust = .5 )
)
}
# predictors to damt - previously identified predictors are log-transformed first
p$predstodamt <- {
data %>%
filter( part == 'train' ) %>%
filter( damt > 0 ) %>%
mutate_at(
vars( avhv , tgif , lgif , rgif , agif , tlag , incm , inca ) ,
log ) %>%
select( cfg.predictors , damt , -matches('reg') , -home , -genf ) %>%
gather( predictor , value , -damt ) %>%
ggplot( aes( value , damt , color = predictor , group = predictor ) ) +
geom_point() +
geom_density2d( color = 'gray' ) +
geom_smooth( method = 'loess' , color = 'black') +
facet_wrap( ~ predictor , scales = 'free' ) +
theme_gdocs() +
theme(
legend.position = 'none'
)
}
# save plots to png
p %>%
names %>%
lapply( function(x){
ggsave(
filename = paste0( 'plot-' , x , '.png' ) ,
plot = p[[x]] ,
path = './output'
)
})
#CLASSIFICATION MODELS: LOGIT, LDA, TREES-BASED, AND SVM
#LOGISTIC REGRESSION
model.logit <- glm(y ~ x.reg1 + x.reg2 + x.reg3 + x.reg4 + x.home + x.chld + x.hinc + x.genf + x.wrat +
x.avhv + x.incm + x.inca + x.plow + x.npro + x.tgif + x.lgif + x.rgif + x.tdon +
x.tlag + x.agif,
train.dat, family=binomial("logit"))
post.valid.logit = predict(model.logit, valid.dat, type="response") # n.valid.c post probs
profit.logit = cumsum(14.5*c.valid[order(post.valid.logit, decreasing=T)]-2)
plot(profit.logit) # see how profits change as more mailings are made
n.mail.valid = which.max(profit.logit) # number of mailings that maximizes profits
c(n.mail.valid, max(profit.logit)) # report number of mailings and maximum profit
cutoff.logit = sort(post.valid.logit, decreasing=T)[n.mail.valid+1] # set cutoff based on n.mail.valid
chat.valid.logit = ifelse(post.valid.logit>cutoff.logit, 1, 0) # mail to everyone above the cutoff
logit.table = table(chat.valid.logit, c.valid) # classification table
logit.mail = sum(logit.table[2,])
logit.mail.tp = logit.table[2,2]
logit.error = (logit.table[2,1]+logit.table[1,2])/2018
logit.profit = 14.5*logit.mail.tp - 2*logit.mail
logit.error #validation error rate: 0.2215
logit.mail #total mailings: 1,406
logit.profit #total profit: $11,383.50
#LDA Model Base
model.ldaB = lda(y ~ x.reg1 + x.reg2 + x.reg3 + x.reg4 + x.home + x.chld + x.hinc + x.genf + x.wrat +
x.avhv + x.incm + x.inca + x.plow + x.npro + x.tgif + x.lgif + x.rgif + x.tdon +
x.tlag + x.agif,
train.dat) # include additional terms on the fly using I()
post.valid.ldaB = predict(model.ldaB, valid.dat)$posterior[,2] # n.valid.c post probs
profit.ldaB = cumsum(14.5*c.valid[order(post.valid.ldaB, decreasing=T)]-2)
plot(profit.ldaB) # see how profits change as more mailings are made
n.mail.valid = which.max(profit.ldaB) # number of mailings that maximizes profits
c(n.mail.valid, max(profit.ldaB)) # report number of mailings and maximum profit
cutoff.ldaB = sort(post.valid.ldaB, decreasing=T)[n.mail.valid+1] # set cutoff based on n.mail.valid
chat.valid.ldaB = ifelse(post.valid.ldaB>cutoff.ldaB, 1, 0) # mail to everyone above the cutoff
ldaB.table = table(chat.valid.ldaB, c.valid) # classification table
ldaB.mail = sum(ldaB.table[2,])
ldaB.mail.tp = ldaB.table[2,2]
ldaB.error = (ldaB.table[2,1]+ldaB.table[1,2])/2018
ldaB.profit = 14.5*ldaB.mail.tp - 2*ldaB.mail
ldaB.error #validation error rate: 0.2235
ldaB.mail #total mailings: 1,406
ldaB.profit #total profit: $11,354.50
#LDA Model Base + Quad (hinc, chld, tdon, tlag, wrat, inca, npro, tgif)
model.ldaF = lda(y ~ x.reg1 + x.reg2 + x.reg3 + x.reg4 + x.home + x.chld + x.hinc + x.genf + x.wrat +
x.avhv + x.plow + x.npro + x.tgif + x.tdon +
x.tlag + x.agif + I(x.hinc^2) + I(x.chld^2) + I(x.tdon^2) + I(x.tlag^2) + I(x.wrat^2) +
I(x.npro^2) + I(x.tgif^2),
train.dat) # include additional terms on the fly using I()
post.valid.ldaF = predict(model.ldaF, valid.dat)$posterior[,2] # n.valid.c post probs
profit.ldaF = cumsum(14.5*c.valid[order(post.valid.ldaF, decreasing=T)]-2)
plot(profit.ldaF) # see how profits change as more mailings are made
n.mail.valid = which.max(profit.ldaF) # number of mailings that maximizes profits
c(n.mail.valid, max(profit.ldaF)) # report number of mailings and maximum profit
cutoff.ldaF = sort(post.valid.ldaF, decreasing=T)[n.mail.valid+1] # set cutoff based on n.mail.valid
chat.valid.ldaF = ifelse(post.valid.ldaF>cutoff.ldaF, 1, 0) # mail to everyone above the cutoff
ldaF.table = table(chat.valid.ldaF, c.valid) # classification table
ldaF.mail = sum(ldaF.table[2,])
ldaF.mail.tp = ldaF.table[2,2]
ldaF.error = (ldaF.table[2,1]+ldaF.table[1,2])/2018
ldaF.profit = 14.5*ldaF.mail.tp - 2*ldaF.mail
ldaF.error #validation error rate: 0.1457
ldaF.mail #total mailings: 1,267
ldaF.profit #total profit: $11,763.00
#CLASSIFICATION TREE, BASE MODEL
model.treeB=tree(y~.,train.dat)
summary(model.treeB)
model.treeB
tree.pred=predict(model.treeB,valid.dat,type="class")
treeB.table = table(tree.pred,valid.dat$y)
treeB.mail = sum(treeB.table[2,])
treeB.mail.tp = treeB.table[2,2]
treeB.error = (treeB.table[2,1]+treeB.table[1,2])/2018
treeB.profit = 14.5*treeB.mail.tp - 2*treeB.mail
treeB.error #validation error rate: 0.1516
treeB.mail #total mailings: 1,165
treeB.profit #total profit: $11,140.50
#CLASSIFICATION TREE, RANDOM FORESTS MODEL
#THE BELOW LOOP TO DISCOVER WHICH SEED RETURNS MAXIMUM PROFIT (SET.SEED(53) RETURNED MAXIMUM)
# mat = matrix(, ncol=4)
# for(i in 1:100){
# iter = i
# set.seed(i)
# model.rf = randomForest(y~., train.dat, mtry = 4, importance = TRUE)
# summary(model.rf)
# model.rf
# tree.pred=predict(model.rf, valid.dat, type = "class")
# rf.table = table(tree.pred, valid.dat$y)
# rf.mail = sum(rf.table[2,])
# rf.mail.tp = rf.table[2,2]
# rf.error = (rf.table[2,1]+rf.table[1,2])/2018
# rf.profit = 14.5*rf.mail.tp - 2*rf.mail
# vec = c(iter, rf.error, rf.mail, rf.profit)
# mat = rbind(mat, vec)
# # print(i)
# # print(rf.error) #validation error rate:
# # print(rf.mail) #total mailings:
# # print(rf.profit) #total profit: $
# }
set.seed(53)
model.rf = randomForest(y~., train.dat, mtry = 4, importance = TRUE)
summary(model.rf)
model.rf
tree.pred=predict(model.rf,valid.dat,type="class")
rf.table = table(tree.pred,valid.dat$y)
rf.mail = sum(rf.table[2,])
rf.mail.tp = rf.table[2,2]
rf.error = (rf.table[2,1]+rf.table[1,2])/2018
rf.profit = 14.5*rf.mail.tp - 2*rf.mail
rf.error #validation error rate: 0.1070
rf.mail #total mailings: 1,063
rf.profit #total profit: $11,257.50
#SVM CLASSIFICATION MODEL
svm.train = svm(y ~ x.reg1 + x.reg2 + x.reg3 + x.reg4 + x.home + x.chld + x.hinc + x.genf + x.wrat +
x.avhv + x.plow + x.npro + x.tgif + x.tdon +
x.tlag + x.agif + I(x.hinc^2) + I(x.chld^2) + I(x.tdon^2) + I(x.tlag^2) + I(x.wrat^2)
+ I(x.npro^2) + I(x.tgif^2),
data = train.dat, kernel = "linear", cost=10,scale = FALSE)
summary(svm.train)
set.seed(53)
tune.charity = tune(svm, y ~ x.reg1 + x.reg2 + x.reg3 + x.reg4 + x.home + x.chld + x.hinc + x.genf + x.wrat +
x.avhv + x.plow + x.npro + x.tgif + x.tdon +
x.tlag + x.agif + I(x.hinc^2) + I(x.chld^2) + I(x.tdon^2) + I(x.tlag^2) + I(x.wrat^2)
+ I(x.npro^2) + I(x.tgif^2),
data = train.dat, kernel = "linear", ranges = list(cost = c(0.001, 0.005, 0.01, 0.05, 0.1, 1, 5, 10)))
summary(tune.charity)
bestmod = tune.charity$best.model
summary(bestmod)
svm.pred = predict(bestmod, valid.dat)
svm.table = table(predict = svm.pred, truth = valid.dat$y)
svm.mail = sum(svm.table[2,])
svm.mail.tp = svm.table[2,2]
svm.error = (svm.table[2,1]+svm.table[1,2])/2018
svm.profit = 14.5*svm.mail.tp - 2*svm.mail
svm.error #validation error rate: 0.1115
svm.mail #total mailings: 1,072
svm.profit #total profit: $11,239.50
# PREDICTION MODELING
# LEAST SQUARES REGRESSION
#LEAST SQUARES MODEL (LM)
model.ls3 = lm(damt ~ reg1 + reg2 + reg3 + reg4 + chld + hinc +
wrat + I(wrat^2) + I(wrat^3) + I(wrat^4) + avhv + plow + npro +
tgif + tdon + tlag + I(tlag^2) + agif,
data.train.std.y)
pred.valid.ls3 = predict(model.ls3, newdata = data.valid.std.y) # validation predictions
ls3.mse = mean((y.valid - pred.valid.ls3)^2) # mean prediction error
ls3.se = sd((y.valid - pred.valid.ls3)^2)/sqrt(n.valid.y) # std error
ls3.mse
ls3.se
#BEST SUBSETS (BSS)
bss.train.fit = regsubsets(damt ~ reg1 + reg2 + reg3 + reg4 + chld + hinc +
wrat + I(wrat^2) + I(wrat^3) + I(wrat^4) + avhv + plow + npro +
tgif + tdon + tlag + I(tlag^2) + agif,
data=data.train.std.y, nvmax=17)
which.min(summary(bss.train.fit)$bic)
coef(bss.train.fit,which.min(summary(bss.train.fit)$bic))
predict.regsubsets = function(object, newdata, id, ...){
form = as.formula(object$call[[2]])
mat = model.matrix(form, newdata)
coefi = coef(object, id = id)
xvars = names(coefi)
mat[, xvars]%*%coefi
}
bss.pred = predict.regsubsets(bss.train.fit, data.valid.std.y, which.min(summary(bss.train.fit)$bic))
bss.mse = mean((y.valid - bss.pred)^2) # mean prediction error
bss.se = sd((y.valid - bss.pred)^2)/sqrt(n.valid.y) # std error
bss.mse
bss.se
#BEST SUBSETS 10-FOLD VALIDATION (BSSF)
k=10
set.seed(53)
bssf.folds = sample(1:k,nrow(data.train.std.y),replace=TRUE)
bssf.cv.errors = matrix(NA, k, 10, dimnames=list(NULL, paste(1:10)))
for (j in 1:k){
bssf.train.fit=regsubsets(damt ~ reg1 + reg2 + reg3 + reg4 + chld + hinc +
wrat + I(wrat^2) + I(wrat^3) + I(wrat^4) + avhv + plow + npro +
tgif + tdon + tlag + I(tlag^2) + agif,
data=data.train.std.y[bssf.folds!=j,], nvmax=17)
for (i in 1:10){
bssf.pred = predict(bssf.train.fit, data.train.std.y[bssf.folds==j,], id=i)
bssf.cv.errors[j, i] = mean((bssf.pred - data.train.std.y$damt[bssf.folds==j])^2)
}
}
bssf.mcv.errors = apply(bssf.cv.errors, 2, mean)
which.min(bssf.mcv.errors) #Returns the number of variables in the Cross-Validation Selected model
coef(bssf.train.fit, which.min(bssf.mcv.errors)) #Returns the coefficients for the CV selected model
bssf.pred = predict.regsubsets(bssf.train.fit, data.valid.std.y, which.min(bssf.mcv.errors)) #Predicts y using the CV Selected model
bssf.mse = mean((y.valid - bssf.pred)^2) #Calculates test MSE for the CV Selected model
bssf.se = sd((y.valid - bssf.pred)^2)/sqrt(n.valid.y) # std error
bssf.mse
bssf.se
#RIDGE REGRESSION (RR)
#make x matrix and y from data.train.std.y
rr.train.x = model.matrix(damt ~ reg1 + reg2 + reg3 + reg4 + chld + hinc +
wrat + I(wrat^2) + I(wrat^3) + I(wrat^4) + avhv + plow + npro +
tgif + tdon + tlag + I(tlag^2) + agif, data.train.std.y)[,-21]
rr.train.y = data.train.std.y$damt
rr.valid.x = model.matrix(damt ~ reg1 + reg2 + reg3 + reg4 + chld + hinc +
wrat + I(wrat^2) + I(wrat^3) + I(wrat^4) + avhv + plow + npro +
tgif + tdon + tlag + I(tlag^2) + agif, data.valid.std.y)[,-21]
rr.valid.y = data.valid.std.y$damt
set.seed(53)
rr.cv.out = cv.glmnet(rr.train.x, rr.train.y, alpha=0)
rr.bestlam = rr.cv.out$lambda.min
rr.bestlam
rr.train.fit = glmnet(rr.train.x, rr.train.y, alpha=0, lambda=rr.bestlam)
coef(rr.train.fit)[, 1]
rr.pred = predict(rr.train.fit, s=rr.bestlam, newx=rr.valid.x)
rr.mse = mean((y.valid - rr.pred)^2) # mean prediction error
rr.se = sd((y.valid - rr.pred)^2)/sqrt(n.valid.y) # std error
rr.mse
rr.se
#LASSO
set.seed(53)
lasso.cv.out = cv.glmnet(rr.train.x, rr.train.y, alpha=1)
lasso.bestlam = lasso.cv.out$lambda.min
lasso.bestlam
lasso.train.fit = glmnet(rr.train.x, rr.train.y, alpha=1, lambda=lasso.bestlam)
coef(lasso.train.fit)[, 1]
lasso.pred = predict(lasso.train.fit, s=lasso.bestlam, newx=rr.valid.x)
lasso.mse = mean((y.valid - lasso.pred)^2)
lasso.se = sd((y.valid - lasso.pred)^2)/sqrt(n.valid.y) # std error
lasso.mse
lasso.se
#PRINCIPAL COMPONANTS
set.seed(53)
pcr.train.fit = pcr(damt ~ reg1 + reg2 + reg3 + reg4 + chld + hinc +
wrat + I(wrat^2) + I(wrat^3) + I(wrat^4) + avhv + plow + npro +
tgif + tdon + tlag + I(tlag^2) + agif,
data=data.train.std.y, scale=TRUE, validation="CV")
validationplot(pcr.train.fit,val.type="MSEP")
#PCR (M=10)
pcr.pred = predict(pcr.train.fit, data.valid.std.y, ncomp=10)
pcr.mse = mean((y.valid - pcr.pred)^2) # mean prediction error
pcr.se = sd((y.valid - pcr.pred)^2)/sqrt(n.valid.y) # std error
pcr.mse
pcr.se
#PCR (M=13)
pcr.pred = predict(pcr.train.fit, data.valid.std.y, ncomp=13)
pcr.mse = mean((y.valid - pcr.pred)^2) # mean prediction error
pcr.se = sd((y.valid - pcr.pred)^2)/sqrt(n.valid.y) # std error
pcr.mse
pcr.se
#PCR (M=16)
pcr.pred = predict(pcr.train.fit, data.valid.std.y, ncomp=16)
pcr.mse = mean((y.valid - pcr.pred)^2) # mean prediction error
pcr.se = sd((y.valid - pcr.pred)^2)/sqrt(n.valid.y) # std error
pcr.mse
pcr.se
#PARTIAL LEAST SQUARES
set.seed(53)
pls.train.fit = plsr(damt ~ reg1 + reg2 + reg3 + reg4 + chld + hinc +
wrat + I(wrat^2) + I(wrat^3) + I(wrat^4) + avhv + plow + npro +
tgif + tdon + tlag + I(tlag^2) + agif,
data=data.train.std.y, scale=TRUE, validation="CV")
validationplot(pls.train.fit,val.type="MSEP")
#PLS (M=3)
pls.pred = predict(pls.train.fit, data.valid.std.y, ncomp=3)
pls.mse = mean((y.valid - pls.pred)^2) # mean prediction error
pls.se = sd((y.valid - pls.pred)^2)/sqrt(n.valid.y) # std error
pls.mse
pls.se
#PLS (M=7)
pls.pred = predict(pls.train.fit, data.valid.std.y, ncomp=7)
pls.mse = mean((y.valid - pls.pred)^2) # mean prediction error
pls.se = sd((y.valid - pls.pred)^2)/sqrt(n.valid.y) # std error
pls.mse
pls.se
#PLS (M=14)
pls.pred = predict(pls.train.fit, data.valid.std.y, ncomp=14)
pls.mse = mean((y.valid - pls.pred)^2) # mean prediction error
pls.se = sd((y.valid - pls.pred)^2)/sqrt(n.valid.y) # std error
pls.mse
pls.se
#GAM
#GAM - removed correlated variables
gam.train.fit3 = gam(damt ~ reg1 + reg2 + reg3 + reg4 + s(chld,3) + s(hinc, 3) +
poly(wrat, 3) + s(avhv, 3) + s(plow, 4) + s(npro, 3) +
s(tgif, 4) + s(tdon, 3) + poly(tlag, 2) + s(agif, 4),
data=data.train.std.y)
gam.pred = predict(gam.train.fit3, data.valid.std.y)
gam.mse = mean((y.valid - gam.pred)^2) # mean prediction error
gam.se = sd((y.valid - gam.pred)^2)/sqrt(n.valid.y) # std error
gam.mse
gam.se
#MODEL LDAF FINAL CLASSIFICATION MODEL
model.ldaF = lda(donr ~ reg1 + reg2 + reg3 + reg4 + home + chld + hinc + genf + wrat +
avhv + plow + npro + tgif + tdon +
tlag + agif + I(hinc^2) + I(chld^2) + I(tdon^2) + I(tlag^2) + I(wrat^2) +
I(npro^2) + I(tgif^2),
data.train.std.c) # include additional terms on the fly using I()
n.mail.valid = which.max(profit.ldaF)
tr.rate = .1 # typical response rate is .1
vr.rate = .5 # whereas validation response rate is .5
adj.test.1 = (n.mail.valid/n.valid.c)/(vr.rate/tr.rate) # adjustment for mail yes
adj.test.0 = ((n.valid.c - n.mail.valid)/n.valid.c)/((1 - vr.rate)/(1 - tr.rate)) # adjustment for mail no
adj.test = adj.test.1/(adj.test.1+adj.test.0) # scale into a proportion
n.mail.test = round(n.test*adj.test, 0) # calculate number of mailings for test set
post.test = predict(model.ldaF, data.test.std)$posterior[,2] # post probs for test data
cutoff.test = sort(post.test, decreasing=T)[n.mail.test+1] # set cutoff based on n.mail.test
chat.test = ifelse(post.test>cutoff.test, 1, 0) # mail to everyone above the cutoff
table(chat.test)
#GAM1 FINAL PREDICTION MODEL
yhat.test = predict(gam.train.fit3, newdata = data.test.std) # test predictions
#OUTPUT
ip = data.frame(chat=chat.test, yhat=yhat.test) # data frame with two variables: chat and yhat
write.csv(ip, file="E:/STAT 897/Group Project/Report/Report Elements/ip.csv",
row.names=FALSE) # use group member initials for file name
|
d7b5bc79cab5aba56dec4e658055a5d0dd8900f6
|
13694ccdbebfa834e371d38baf6c649b48b92c35
|
/man/baltimore_map.Rd
|
b50520c6e9f5c243ebaf0c2325e0e3c053c0bc3b
|
[] |
no_license
|
heike/cityshapes
|
56c1d16696af1bc0e47fdeb1bb67cc15e9c7a05d
|
75b1c327dd4067ec68c86b0ae736c68eef019db1
|
refs/heads/master
| 2020-03-09T16:11:46.264511
| 2018-04-10T05:39:29
| 2018-04-10T05:39:29
| 128,878,324
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 870
|
rd
|
baltimore_map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{baltimore_map}
\alias{baltimore_map}
\title{Shape files for the neighborhoods of Baltimore}
\format{A data frame with 4732 rows and 6 variables:
\describe{
\item{OBJECTID}{identifier for each region}
\item{long}{geographic longitude}
\item{lat}{geographic latitude}
\item{group}{grouping variable for the polygon}
\item{order}{order of the rows}
\item{region}{name of the region}
}}
\usage{
baltimore_map
}
\description{
A dataset consisting of shapefiles describing the neighborhoods of Baltimore.
The shapefiles of the neighborhoods is available from the Baltimore City Open GIS Data \url{http://gis-baltimore.opendata.arcgis.com/}
}
\examples{
data(baltimore_map)
ggplot(baltimore_map, aes(x = long, y = lat, group = group)) +
geom_path()
}
\keyword{datasets}
|
5c07dc429fd1896b4c8e082d9c8daed77dc00c58
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/cna/R/cna.r
|
0486c507742007cc0b4de8f64018181640ddb2b7
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,424
|
r
|
cna.r
|
cna <- function(x, ordering = NULL, strict = FALSE, con = 1, cov = 1, notcols = NULL, maxstep = 5,
suff.only = FALSE, what="mac") {
if (inherits(x, "truthTab")) {tt <- x}
else {tt <- truthTab(x)}
if ((! is.null(notcols)) && notcols == "all")
{
colnames(tt)<-chartr("qwertzuiopasdfghjklyxcvbnmQWERTZUIOPASDFGHJKLYXCVBNM","QWERTZUIOPASDFGHJKLYXCVBNMqwertzuiopasdfghjklyxcvbnm",colnames(tt))
for (i in 1:nrow(tt))
{
for (j in 1:ncol(tt))
{
if (tt[i,j]==0) {tt[i,j]<-1}
else if (tt[i,j]==1) {tt[i,j]<-0}
}
}
}
else if (! is.null(notcols))
{
for (i in 1:length(notcols))
{
colnumber<-which( colnames(tt)==notcols[i])
colnames(tt)[colnumber]<-chartr("qwertzuiopasdfghjklyxcvbnmQWERTZUIOPASDFGHJKLYXCVBNM","QWERTZUIOPASDFGHJKLYXCVBNMqwertzuiopasdfghjklyxcvbnm",colnames(tt)[colnumber])
for (j in 1:nrow(tt))
{
if (tt[j,colnumber]==0) {tt[j,colnumber]<-1}
else if (tt[j,colnumber]==1) {tt[j,colnumber]<-0}
}
}
}
# if (inherits(x, "truthTab")) {x <- tt}
x <-tt
cl <- match.call()
if (nrow(tt) <= 1)
stop("Truth table must have at least two rows.")
if (ncol(tt) < 2 || ncol(tt) > 26)
stop("Truth table must have between 2 and 26 columns.")
check.ordering(ordering, tt)
if (maxstep < 1) suff.only <- FALSE
f <- as.vector(attr(tt, "n"))
sol <- vector("list", length(tt))
effect.names <- colnames(tt)
names(sol) <- effect.names
tt.logi <- as.data.frame(lapply(tt, "mode<-", "logical"))
for (zname in effect.names){
z <- tt.logi[[zname]]
xz <- tt.logi[potential.effects(tt, zname, ordering, strict = strict)]
if (ncol(xz) == 0) next
# Identify and minimize sufficient conditions
# -------------------------------------------
xsuff <- sufficient(xz, z, f, con = con)
# initiate list of minimally sufficient conditions
min.suff.conditions <- vector("list", ncol(xz))
consist <- vector("list", ncol(xz))
step <- 1L
if (nrow(xsuff) > 0) repeat{
if (step >= ncol(xz)){
min.suff.conditions[[step]] <- xsuff
consist[[step]] <- attr(xsuff, "consistency")
break
}
# reduce sufficient conditions from previous step by removing one component
reduced <- lapply(seq_along(xsuff),
function(i) xsuff[!is.na(xsuff[i]), -i, drop = FALSE])
reduced.df <- do.call(Rbind, reduced)
reduced.unique <- unique(reduced.df) # Eliminate duplicates
# determine the reduced conditions that are sufficient
xsuff.new <- sufficient(xz, z, f, cond = reduced.unique, con = con, nms = names(xz))
# identify mimally suffcient conditions among the sufficient conditions from the previous step
row.origin <- unlist(lapply(xsuff, function(x) which(!is.na(x))),
use.names = FALSE)
combinations.grouped <- split(as.integer(combine(reduced.df)), row.origin)
is.min.suff <- sapply(combinations.grouped,
function(x) !any(x %in% attr(xsuff.new, "which.sufficient")))
min.suff <- xsuff[is.min.suff, , drop = FALSE]
# add to list
min.suff.conditions[[step]] <- min.suff
consist[[step]] <- attr(xsuff, "consistency")[is.min.suff]
if (nrow(xsuff.new) == 0) break
step <- step + 1L
xsuff <- xsuff.new
}
minSuff <- do.call(rbind, min.suff.conditions)
if (length(minSuff) == 0) next
vc <- verify.conditions(minSuff, xz)
msc <- data.frame(condition = label.conditions(minSuff),
consistency = unlist(consist),
coverage = apply(vc, 2, coverage, z, f),
stringsAsFactors = FALSE)
msc <- msc[order(-with(msc, consistency * coverage), msc$condition), ]
sol[[zname]] <- list(msc = msc)
# Identify and minimize necessary conditions
# -------------------------------------------
# initiate list of atomic solution formulas
if (!necessary(minSuff, xz, z, f, cov = cov) || suff.only) next
cc <- vc[z, , drop = FALSE]
fz <- f[z]
sum.fz <- sum(fz)
min.nec.conditions <- cov.min.nec <- cons.min.nec <- list()
step <- 1L
repeat{
CondInd <- t(combn(seq_len(ncol(cc)), step))
# eliminate conditions that can not be minimally necessary
for (i in seq_along(min.nec.conditions)){
elimCond <- contains(CondInd, min.nec.conditions[[i]])
CondInd <- CondInd[!elimCond, , drop = FALSE]
if (nrow(CondInd) == 0) break
}
cover <- apply(CondInd, 1,
function(x) sum(fz[apply(cc[, x, drop = FALSE], 1, any)]) / sum.fz)
nec <- which(cover >= cov)
# consistency of necessary conditions
necConds <- apply(CondInd[nec, , drop = FALSE], 1, function(nci) minSuff[nci, , drop = FALSE])
cons.necConds <- sapply(necConds,
function(nc){
logvect <- apply(verify.conditions(nc, xz), 1, any)
consistency(logvect, z, f)
})
# check whether necessary conditions are also sufficient
if (length(nec) && con < 1){
also.consistent <- cons.necConds >= con
nec <- nec[also.consistent]
cons.necConds <- cons.necConds[also.consistent]
}
if (length(nec)){
min.nec.conditions <- c(min.nec.conditions, list(CondInd[nec, , drop = FALSE]))
cov.min.nec <- c(cov.min.nec, list(cover[nec]))
cons.min.nec <- c(cons.min.nec, list(cons.necConds))
}
if (step >= maxstep || step >= ncol(cc)) break
step <- step + 1L
}
if (length(min.nec.conditions) > 0){
lbls <- label.conditions(minSuff)
asf0 <- lapply(min.nec.conditions,
function(x) apply(x, 1, function(r) paste(sort(lbls[r]), collapse = " + ")))
sol.frame <- data.frame(condition = unlist(asf0),
consistency = unlist(cons.min.nec),
coverage = unlist(cov.min.nec),
stringsAsFactors = FALSE)
sol.frame <-
sol.frame[order(-with(sol.frame, consistency * coverage), sol.frame$condition), ]
rownames(sol.frame) <- NULL
sol[[c(zname, "asf")]] <- sol.frame
}
}
out <- structure(list(), class = "cna")
out$call <- cl
out$x <- x
out$ordering <- ordering
out$truthTab <- tt
# names(sol) <- toupper(names(sol))
out$solution <- sol
out$what <- what
return(out)
}
# print method for class cna
print.cna <- function(x, what=x$what , digits = 3, nsolutions = 5,
row.names = FALSE, show.cases=FALSE, ...){
cat("--- Coincidence Analysis (CNA) ---\n")
# cat("\nFunction call:\n", deparse(x$call), "\n", sep = "")
what <- tolower(what)
if (what == "all") whatl <- rep(TRUE, 4)
else whatl <- !is.na(match(c("t", "m", "a", "c"), unlist(strsplit(what, ""))))
names(whatl) <- c("t", "m", "a", "c")
if (whatl["t"]){
cat("\nTruth table:\n")
if (show.cases==TRUE)
{print(x$truthTab,show.cases=TRUE)}
else
{print(x$truthTab)}
}
if (!is.null(x$ordering))
cat("\nCausal ordering",
if(!is.null(x$call$strict) && eval(x$call$strict)) " (strict)", ":\n",
do.call(paste, c(lapply(x$ordering, paste, collapse = ", "),
sep = " < ")),
"\n", sep = "")
else cat("\nFactors:", paste(names(x$truthTab), collapse = ", "), "\n")
if (whatl["m"]){
msc.df <- msc(x)
cat("\nMinimally sufficient conditions:\n",
"--------------------------------", sep = "")
if (nrow(msc.df) == 0) cat("\n*none*\n")
else for (msc1 in split(msc.df, msc.df$outcome)){
cat("\nOutcome ", msc1$outcome[1], ":\n", sep = "")
if (short <- ((nsol <- nrow(msc1)) > nsolutions))
msc1 <- msc1[seq_len(nsolutions), , drop = FALSE]
print(msc1[c("condition", "consistency", "coverage")],
digits = digits, row.names = row.names, ...)
if (short)
cat(" ... (total no. of conditions: ", nsol, ")\n", sep = "")
}
}
if (any(whatl[c("a", "c")]))
asf.df <- asf(x)
if (whatl["a"]){
cat("\nAtomic solution formulas:\n",
"-------------------------", sep = "")
if (nrow(asf.df) == 0) cat("\n*none*\n")
else for (asf1 in split(asf.df, asf.df$outcome)){
cat("\nOutcome ", asf1$outcome[1], ":\n", sep = "")
if (short <- ((nsol <- nrow(asf1)) > nsolutions))
asf1 <- asf1[seq_len(nsolutions), , drop = FALSE]
print(asf1[c("condition", "consistency", "coverage")],
digits = digits, row.names = row.names, ...)
if (short)
cat(" ... (total no. of formulas: ", nsol, ")\n", sep = "")
}
}
if (whatl["c"]){
csf1 <- csf(asfx = asf.df)
cat("\nComplex solution formulas:\n",
"--------------------------\n", sep = "")
if (nrow(csf1) == 0) cat("*none*\n")
else {
if (short <- ((nsol <- nrow(csf1)) > nsolutions))
csf1 <- csf1[seq_len(nsolutions), , drop = FALSE]
print(csf1, digits = digits, row.names = row.names, ...)
if (short)
cat(" ... (total no. of formulas: ", nsol, ")\n", sep = "")
}
}
invisible(x)
}
|
fa59d7b8697ccb5e08543160c4e733e8fcaadfa8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/docopulae/examples/nint_space.Rd.R
|
efc7f4273f35d36bfc4c1d7446accad0c708f3c0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 407
|
r
|
nint_space.Rd.R
|
library(docopulae)
### Name: nint_space
### Title: Space
### Aliases: nint_space
### ** Examples
s = nint_space(nint_gridDim(seq(1, 3, 0.9)),
nint_scatDim(seq(2, 5, 0.8)),
nint_intvDim(-Inf, Inf),
nint_funcDim(function(x) nint_intvDim(0, x[1])),
list(nint_gridDim(c(0, 10)),
list(nint_intvDim(1, 7)))
)
s
|
ab68ca5282c4f02160ecbc9327aacb490acb7f72
|
a1da88a19d3025b77df3edc4b3bcb55a90925ac7
|
/content/post/old/corona.R
|
a95dde01ae5f74bbb0f7e5f3d910f1a33771e928
|
[] |
no_license
|
tormodb/academic_blog
|
d43e0b0d522d82ecd2ae7c71f9e36ffe6e40c07f
|
bf7a40f638a75af78a6e7bfac323d20effa0d1c4
|
refs/heads/master
| 2023-06-22T23:04:25.086287
| 2023-06-14T13:09:21
| 2023-06-14T13:09:21
| 229,732,379
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,065
|
r
|
corona.R
|
library(tidyverse)
library(rvest)
library(xml2)
today <- format(Sys.time(), "%a %b %d %X %Y")
day <- unlist(strsplit(today, " "))[1]
day <- recode(day,
Mon = "mandag",
Tue = "tirsdag",
Wed = "onsdag",
Thur = "torsdag",
Fri = "fredag",
Sat = "lordag",
Sun = "sondag")
mth <- unlist(strsplit(today, " "))[2]
mth <- recode(mth,
Mar = "mars",
Apr = "april")
date <- unlist(strsplit(today, " "))[3]
year <- unlist(strsplit(today, " "))[5]
url3 <- paste0("https://www.fhi.no/nyheter/2020/status-koronasmitte-", day, "-", date, ".-", mth, "-", year)
url_res3 <- xml2::read_html(url3)
frafhi <- url_res3 %>% html_nodes(".textual-block") %>%
map_df(~{
tibble(
# postal = html_node(.x, "span") %>% html_text(trim=TRUE),
fhi = html_nodes(.x, "ul > li") %>% html_text(trim=TRUE)
)
})
innlagt_fhi <- frafhi$fhi[1] %>% str_match_all("[0-9]+") %>% unlist %>% as.numeric
intensiv_fhi <- frafhi$fhi[4] %>% str_match_all("[0-9]+") %>% unlist %>% as.numeric
innl_num <- word(innlagt_fhi)[1]
# innl_num <- as.numeric(paste0(innl_num[1], innl_num[2]))
intens_num <- as.numeric(word(intensiv_fhi)[3])
# month <-
fulldate <- format(Sys.time(), "%d.%m.%Y")
fullmonth <- format(Sys.time(), "%m")
fullyear <- format(Sys.time(), "%Y")
infint_fhi <- as.data.frame(cbind(date, fullmonth, fullyear, innl_num, intens_num, fulldate))
write.table(infint_fhi, "/Users/st06810/Dropbox/UiB/blog/content/post/infint_fhi.csv", dec=".", quote=FALSE, append=T, sep="\t", col.names=FALSE)
url2 <- paste0("https://www.helsedirektoratet.no/nyheter/oversikt-over-innlagte-pasienter-med-covid-19-per-", format(Sys.Date(), "%d"),".mars")
url_res2 <- xml2::read_html(url2)
url_res2 %>% html_text()
hdir <- url_res2 %>%
html_nodes(".b-article-intro__intro") %>% html_text()
day_hdir <- unlist(strsplit(hdir, " "))[3]
day_hdir <- gsub('[[:punct:] ]+','', day_hdir)
mth_hdir <- unlist(strsplit(hdir, " "))[4]
mth_hdir <- ifelse(mth_hdir=="mars", "03",
ifelse(mth_hdir=="april", "04",
ifelse(mth_hdir=="mai", "05",
ifels(mth_hdir=="juni", "06", NA))))
n_innl_hdir <- unlist(strsplit(hdir, " "))[6]
n_int_hdir <- unlist(strsplit(hdir, " "))[15]
# This function is supposed to replace NULL values with missing values in case the script is used
# before the website is updated. Not working properly yet...
if (is.null(day_hdir) && is.null(day_hdir && is.null(mth_hdir) && is.null(n_innl_hdir) && is.null(n_int_hdir))){
day_hdir <- NA
mth_hdir <- NA
n_innl_hdir <- NA
n_int_hdir <- NA
return(day_hdir)
return(mth_hdir)
return(n_innl_hdir)
return(n_int_hdir)
}
# day_hdir
# mth_hdir
# n_innl_hdir
# n_int_hdir
year_hdir <- 2020
date <- (paste(day_hdir, mth_hdir, year_hdir, sep = "."))
innl_date <- as.data.frame(cbind(day_hdir, mth_hdir, year_hdir, n_innl_hdir, n_int_hdir, date))
write.table(innl_date, "/Users/st06810/Dropbox/UiB/blog/content/post/innl_date.csv", dec=".", quote=FALSE, append=T, sep="\t", col.names=FALSE)
url <- "https://www.fhi.no/sv/smittsomme-sykdommer/corona/"
url_res <- xml2::read_html(url)
n_inf <- url_res %>%
html_nodes(".fhi-key-figure-number") %>% .[[2]] %>% html_text()
n_inf <- as.numeric(n_inf)
date_txt <- url_res %>%
html_nodes(".fhi-key-figure-desc") %>% .[[2]] %>% html_text()
day_tmp <- unlist(strsplit(date_txt, " "))[7]
mth_tmp <- unlist(strsplit(date_txt, " "))[8]
day <- gsub('[[:punct:] ]+','',day_tmp)
day <- as.numeric(day)
mth <- ifelse(mth_tmp=="mars", "03",
ifelse(mth_tmp=="april", "04",
ifelse(mth_tmp=="mai", "05",
ifels(mth_tmp=="juni", "06", NA))))
year <- 2020
date <- (paste(day, mth, year, sep = "."))
mth <- as.numeric(mth)
inf_date <- as.data.frame(cbind(day, mth, year, n_inf, date, day_tmp, mth_tmp))
write.table(inf_date, "/Users/st06810/Dropbox/UiB/blog/content/post/inf_date.csv", dec=".", quote=FALSE, append=T, sep="\t", col.names=FALSE)
|
b9c90eeebd3b012fa865f124214901f98f34fccb
|
bc12a6667b3d98685b5e3cc4abbfb186b969d968
|
/R/utils_update_grants_db.R
|
67267613ffa95f671e6a195c2d8e8c65176c2ece
|
[
"MIT"
] |
permissive
|
include-dcc/pub-include-r
|
16d28c5c5931f6f4427d44191d7b1a59cd3c2f31
|
8074939a074ccbb32f277c092120ce7303e882f3
|
refs/heads/main
| 2023-06-17T08:55:19.614567
| 2021-07-22T00:59:01
| 2021-07-22T00:59:01
| 384,605,785
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,484
|
r
|
utils_update_grants_db.R
|
#' update_grants_db
#'
#' @description A utils function
#' @importFrom magrittr %>%
#' @return The return value, if any, from executing the utility.
#'
#' @noRd
update_grants_db <- function() {
.fetch_grants_table() %>%
.filter_grants() %>%
.format_grants()
}
.fetch_grants_table <- function() {
sheets_id <- "17dWU0Vh_fR0Kg_tBsCAYfGBt5UdacnzZr3mlUzPay3g"
googlesheets4::read_sheet(
sheets_id,
col_types = "cccccciDccicciiccDDc?iccicicddicccccccDDiciiiciiicc"
)
}
.filter_grants <- function(grants_tbl) {
grants_tbl %>% filter(`INCLUDE Project?` == "Yes")
}
.format_grants <- function(grants_tbl) {
grants_tbl %>%
dplyr::mutate(
grant_regex = stringr::str_glue("(?<={Activity})\\w+"),
`Grant Number` = stringr::str_extract(`Project Number`, grant_regex)
) %>%
dplyr::select(`Project Title`,
`Administering IC`,
`Application ID`,
`Award Notice Date`,
FOA,
`Project Number`,
`Grant Number`,
Type,
Activity,
IC,
`Serial Number`,
`Project Start Date`,
`Project End Date`,
`Contact PI Person ID`,
`Contact PI / Project Leader`,
`Organization Name`,
`Organization ID (IPF)`) %>%
dplyr::distinct() %>%
janitor::clean_names()
}
|
d6779eea7f6cd3acef6f5e9e9c05bd2cb29b0434
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SimInf/examples/plot.Rd.R
|
7904e17f4422dcf38e070dec2f29ff4f04ce6364
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,440
|
r
|
plot.Rd.R
|
library(SimInf)
### Name: plot,SimInf_model-method
### Title: Display the outcome from a simulated trajectory
### Aliases: plot,SimInf_model-method
### ** Examples
## Not run:
##D ## Create an 'SIR' model with 100 nodes and initialise
##D ## it with 990 susceptible individuals and 10 infected
##D ## individuals in each node. Run the model over 100 days.
##D model <- SIR(u0 = data.frame(S = rep(990, 100),
##D I = rep(10, 100),
##D R = rep(0, 100)),
##D tspan = 1:100,
##D beta = 0.16,
##D gamma = 0.077)
##D
##D ## Run the model and save the result.
##D result <- run(model)
##D
##D ## Plot the median and interquartile range of the number
##D ## of susceptible, infected and recovered individuals.
##D plot(result)
##D
##D ## Plot the median and the middle 95% quantile range of the
##D ## number of susceptible, infected and recovered individuals.
##D plot(result, range = 0.95)
##D
##D ## Plot the median and interquartile range of the number
##D ## of infected individuals.
##D plot(result, compartments = "I")
##D
##D ## Plot the number of susceptible, infected
##D ## and recovered individuals in the first
##D ## three nodes.
##D plot(result, node = 1:3, range = FALSE)
##D
##D ## Plot the number of infected individuals in the first node.
##D plot(result, compartments = "I", node = 1, range = FALSE)
## End(Not run)
|
9f951474bdce4d39dbc8021330fa87884d2fa2b4
|
1827dfb1a8868ec2cfb71fa68e15e2c0f301407d
|
/R/internals.R
|
d8e5e57b9525e34e2f7516e3145bad118934f3f9
|
[] |
no_license
|
pssguy/rwunderground
|
4541546bcc38f93deca6b90880d67c8b10237248
|
d86429c111e948bd5a97dbb302e90a59c04f75a8
|
refs/heads/master
| 2021-07-13T01:56:52.887387
| 2017-10-19T16:41:48
| 2017-10-19T16:41:48
| 107,566,640
| 0
| 0
| null | 2017-10-19T15:44:39
| 2017-10-19T15:44:39
| null |
UTF-8
|
R
| false
| false
| 3,627
|
r
|
internals.R
|
#####
# Internal pacakage functions for URL handling and data.frame formatting
#####
#' Base URL for wunderground API
#'
#' @return base wunderground URL
#'
base_url <- function() {
return("http://api.wunderground.com/api")
}
#' Build wunderground request URL
#'
#' @param key wunderground API key
#' @param request_type request type TODO::list all request_types
#' @param date Date, only applicable for history requests
#' @param location location set by set_location
#'
build_url <- function(key = get_api_key(),
request_type,
date,
location) {
location <- paste0(location, ".json")
# check if request_type supports adding in a date
if (!is.null(date) & !(request_type %in% c("history", "planner"))) {
warning("Ignoring date as it is not used in this request.")
} else if (!is.null(date) & (request_type %in% c("history", "planner"))) {
request_type <- paste(request_type, date, sep = "_")
}
URL <- paste(base_url(), key, request_type, "q", location, sep = "/")
return(URL)
}
#' Detect and stop for any wunderground request errors
#'
#' @param httr_parsed_req httr request object
#'
stop_for_error <- function(httr_parsed_req) {
if (is.null(httr_parsed_req$response)) {
stop("Unknown error: Server failed to provide response status")
}
if (is.null(httr_parsed_req$response$error)) {
return(invisible(TRUE))
} else {
type <- httr_parsed_req$response$error$type
description <- httr_parsed_req$response$error$description
stop(paste0("Error from server:: ", type, " - ", description))
}
}
#' wunderground api requests
#'
#' @param request_type Request type TODO::list all types
#' @param location locations set of set_location
#' @param date Date, only applicable for history requests
#' @param key wunderground API key
#' @param message if TRUE print out requested
#' @return httr request object
#'
wunderground_request <- function(request_type,
location,
date = NULL,
key = get_api_key(),
message = TRUE) {
URL <- build_url(
key = key,
request_type = request_type,
date = date,
location = location
)
if (request_type == "currenthurricane") URL <- gsub("/q", "", URL)
req <- httr::GET(URL)
httr::stop_for_status(req)
parsed_req <- httr::content(req, type = "application/json")
if (message) {
print(paste0("Requesting: ", URL))
}
parsed_req
}
#' Processes data.frames and replaces wunderground's -9999/-999 to NAs
#'
#' @param df the data.frame to process
#' @return data.frame with correctly encoded NAs
#'
encode_NA <- function(df) {
df[df == -9999] <- NA
df[df == -999] <- NA
df[df == -99] <- NA
df
}
#' as.numeric with special handling for length 0 (NULL) objects
#'
#' @param x the object to cast as numeric
#' @return value of type double
#'
as.numeric.nonempty <- function(x) {
ifelse(length(x)>0, as.numeric(x), NA_real_)
}
#' return object, or NA for length 0 (NULL) objects
#'
#' @param x the object to cast as numeric
#' @return value of type double
#'
nonempty <- function(x) {
ifelse(length(x)>0, x, NA)
}
#' Check if a variable exists for a PWS. If not set the value to -9999
#'
#' @param x the value to check
#' @param class a character given the desired class for the variable
measurement_exists <- function(x, class = "numeric") {
val <- ifelse(is.null(x),
-9999,
x)
do.call(paste0("as.", class),
list(val))
}
|
524d8b9a3f329d4d2b0b08901744055a89e55526
|
2a0d1fc07d673b8c7cf07c6596dac2630ae3fd7c
|
/scripts/individualmodelTMB.R
|
f3098a9965fdd5ece6bdc7c0682fa48dd246ee07
|
[] |
no_license
|
stelmacm/WNS
|
aef0320ac63d789590cc229e7f24e2fe248c036d
|
a455ce91c547c8174444d8fb811b4236c2ae38f3
|
refs/heads/master
| 2022-04-29T11:23:05.577810
| 2022-03-04T14:31:02
| 2022-03-04T14:31:02
| 241,282,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,604
|
r
|
individualmodelTMB.R
|
#This will be the R portion of the model TMB
#Staring off with individual level because I dont know how to carry lists into TMB
source("modeltesting/creatingfunctiondf.R")
#This is what would be needed to run in
compile("scripts/fullmodelTMB2.cpp")
dyn.load(dynlib("scripts/fullmodelTMB"))
set.seed(123)
#SM = list of matrices that are shared users per year
#dist = matrix distances between 2 counties (counties in alphabetical order)
#countyincidence= list of matrices(technically vectors) that contain county incidence per year
#Dim = number of counties in matrices
#number of years = years including 2006 which gets removed in program
#unifectedcounty = uninfected counties (2006 removed)
#yearindicator = vector of factored years for the mixed model(2006 and years a county is infected are removed)
#countyindicator = vector of factored counties for the mixed model
data <- list(SM = bigsharedusers, dist = orderedmat, countyincidence = countylist, dim = 548, numberofyears = 13,
uninfectedcounty = uninfectedcountylist, yearindicator = years, countyindicator = counties, incidence = incidence)
#d = scaling param
#theta = azzalini power
#rho = proportion of shared users compared to distance matrix
#a = offset parameter (so that log 0 doesn't happen)
parameters <- list(d = 10, theta = 1.5, rho = .5, a = 0.001, Random_vectorone= random_vec, Random_vectortwo = random_vec2)
obj <- MakeADFun(data, parameters, DLL="individualmodelTMB")
obj$hessian <- TRUE
opt <- do.call("optim", obj)
opt
opt$hessian ## <-- FD hessian from optim
obj$he() ## <-- Analytical hessian
sdreport(obj)
|
1cd42db6770567d852b1547833f9a84208b7de91
|
191d276ade533e816e9db403e766f01cf0894315
|
/man/wilcox_test.Rd
|
ed8582b0e039b990f1b978aafd2c957303a01223
|
[] |
no_license
|
gitronald/htester
|
b5132778c4b7aa6f09bb351782e08206bcad222a
|
7aac3653461ec2d1928dbad3cee4f6daa389fd73
|
refs/heads/master
| 2021-01-16T21:46:08.742779
| 2016-07-23T01:36:10
| 2016-07-23T01:36:10
| 62,968,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 792
|
rd
|
wilcox_test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wilcox_test.R
\name{wilcox_test}
\alias{wilcox_test}
\title{Wilcoxon Rank Sum and Signed Rank Tests}
\usage{
wilcox_test(var1, var2, round = TRUE, ...)
}
\arguments{
\item{var1}{a vector to correlate with var2}
\item{var2}{a vector to correlate with var1}
\item{round}{logical, whether or not to round the results}
\item{method}{the correlation method to use, either pearson, spearman, or
kendall}
}
\value{
Returns a data.frame version of the standard htest output. See
\code{ks.test} for additional arguments and further detail
}
\description{
A wrapper for \code{\link[stats]{wilcox.test}} that returns htest data in a
data.frame rather than a list
}
\examples{
wilcox_test(rnorm(10, 5), rnorm(10, 5))
}
|
81a0906d7cbd2c441b7614798edc4535690b585e
|
ff708723a712c8e3cba19dc783031be098cd0c3a
|
/04_Model.Testing/02_Jackknife_hake_all_stages_ROMS_PPC_for_GitHub.R
|
115b1223e95327c3511efed41d5f909eb7886eaf
|
[] |
no_license
|
pacific-hake/recruitment-index
|
430f66e8e4b55175421ed5b9c896cfecf25e73c4
|
ac05ffe31b7e51124b2846c30e9b2dde7c27fcf3
|
refs/heads/main
| 2023-04-27T05:01:45.670796
| 2023-04-20T04:23:11
| 2023-04-20T04:25:27
| 366,157,268
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,464
|
r
|
02_Jackknife_hake_all_stages_ROMS_PPC_for_GitHub.R
|
# jackknife the best fit model. drop one year of data and re-fit best-fit model
# source(paste0(main,'/00_BestModelInfo.R'))
MainFile = "/Users/cdvestfals/Desktop/Hake_Analysis_for_GitHub"
setwd(paste0(MainFile,'/04_Model.Testing/02_jackknife'))
library(MuMIn)
######### need to set parms here if I in parms list ###################
# rename data
data_1 = df0
# current = getwd()
# FileDir = paste0(current,'/Standardized')
# dir.create(FileDir)
# setwd(FileDir)
for(z in 1:2){
if(z==1){data_1 = df0; suffix = 'raw'}
if(z==2){ # set to run standardized results
std = apply(data_1[,parms2],2, FUN=scale)
data_1[,parms2] = std
suffix = "standardized"
}
# get fitted model for comparison ####
# add to file below ###
fm = lm(form_best, data=data_1)
sfm = summary(fm)
fmcoef = t(data.frame(sfm$coefficients[,1]))
fitted.model = data.frame(cbind( sfm$r.squared, sfm$adj.r.squared, sfm$fstatistic[1], sfm$fstatistic[2], sfm$fstatistic[3], fmcoef))
for(k in 1:nrow(data_1)) {# refit model 30 times dropping one year each time
# drop one datum and run
data_2 = data_1[-k,]
print(k)
fit = lm(form_best, data=data_2, na.action = na.fail)
s1 = summary(fit)
Coeffs = t(data.frame(s1$coefficients[,1]))
r1 = data.frame(cbind(s1$r.squared, s1$adj.r.squared,s1$fstatistic[1],s1$fstatistic[2],s1$fstatistic[3], Coeffs))
if(k == 1){results = r1}else{results = rbind(results,r1)}
} # end k loop
colnames(results)[1:6] = c('r2','adjr2','F','df1','df2', 'Intercept')
results$p = 1-pf(results$F,results$df1, results$df2)
write.table(results,paste0('R_jackknife_hake_all_stages_ROMS_PPC_',suffix,'.csv'), sep=',',col.names = TRUE, row.names = FALSE)
# uses same code as bootstrap version, but not bootstrping
# get mean and 95% CLs
mn = apply(results,2,mean)
md = apply(results,2,median)
# quantile function
qt = function(x){quantile(x,c(0.025,0.975))}
ci = apply(results,2, qt)
boot.stats = rbind(mn,md, ci)
fitted.model$p = NA
colnames(fitted.model) <- colnames(boot.stats)
fitted.model$p = 1-pf(fitted.model$F,fitted.model$df1, fitted.model$df2)
boot.stats2 = data.frame(rbind(fitted.model,boot.stats))
x = rownames(boot.stats2)
x[1] <- "fitted model"
boot.results <- cbind(x,boot.stats2)
boot.final = boot.results #rbind(boot.results,CL2)
write.table(boot.final,paste0('R_jackknife_stats_all_stages_ROMS_PPC_',suffix,'.csv'), sep=',',col.names = TRUE, row.names = FALSE)
boot.final
}
setwd(MainFile)
|
0e72cab68593c59444e8312cf65db2d24faedeea
|
992a8fd483f1b800f3ccac44692a3dd3cef1217c
|
/Project_bioinformatics/Bra.WGT.paper/SweeD.annotation/2unique.SweeD.R
|
b1e70acac25a890acc32ed40ccd516152b255f72
|
[] |
no_license
|
xinshuaiqi/My_Scripts
|
c776444db3c1f083824edd7cc9a3fd732764b869
|
ff9d5e38d1c2a96d116e2026a88639df0f8298d2
|
refs/heads/master
| 2020-03-17T02:44:40.183425
| 2018-10-29T16:07:29
| 2018-10-29T16:07:29
| 133,203,411
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,891
|
r
|
2unique.SweeD.R
|
getwd()
setwd("C:/Users/qxs/Desktop")
ch<-read.table("145ch.chr1-10.outlier_genes.anno",sep="_")
ch
nrow(ch)
ch[,2]
length(ch[,2])
Uch=unique(ch[,2])
length(unique(ch[,2]))
write.table(unique(ch[,2]),
"145ch.chr1-10.outlier_genes.anno.unique",
quote=F,row.names=F,col.names=F)
pk<-read.table("145pk.chr1-10.outlier_genes.anno",sep="_")
pk
nrow(pk)
pk[,2]
length(pk[,2])
Upk=unique(pk[,2])
length(unique(pk[,2]))
write.table(unique(pk[,2]),
"145pk.chr1-10.outlier_genes.anno.unique",
quote=F,row.names=F,col.names=F)
tril<-read.table("145tril.chr1-10.outlier_genes.anno",sep="_")
tril
nrow(tril)
tril[,2]
length(tril[,2])
Utril=unique(tril[,2])
length(unique(tril[,2]))
write.table(unique(tril[,2]),
"145tril.chr1-10.outlier_genes.anno.unique",
quote=F,row.names=F,col.names=F)
sylv<-read.table("145sylv.chr1-10.outlier_genes.anno",sep="_")
sylv
nrow(sylv)
sylv[,2]
length(sylv[,2])
Usylv=unique(sylv[,2])
length(unique(sylv[,2]))
write.table(unique(sylv[,2]),
"145sylv.chr1-10.outlier_genes.anno.unique",
quote=F,row.names=F,col.names=F)
EUCA<-read.table("EUCA.chr1-10.outlier_genes.anno",sep="_")
EUCA
nrow(EUCA)
EUCA[,2]
length(EUCA[,2])
UEUCA=unique(EUCA[,2])
length(unique(EUCA[,2]))
write.table(unique(EUCA[,2]),
"EUCA.chr1-10.outlier_genes.anno.unique",
quote=F,row.names=F,col.names=F)
#Venn
library(gplots)
venn(list(Upk,Uch,Utril,Usylv,UEUCA))
venn(list(Upk,Uch,Utril,Usylv,UEUCA))
# REF: https://cran.r-project.org/web/packages/gplots/vignettes/venn.pdf
U4=c(as.character(Uch),as.character(Upk),as.character(Utril),as.character(Usylv))
# 5669
U4=unique(U4) # 4674
write.table(unique(U4),
"U4.chr1-10.outlier_genes.anno.unique",
quote=F,row.names=F,col.names=F)
UEUCA=as.character(UEUCA)
#2058
venn(list(Upk,Uch,Utril,Usylv))
|
5de4062e4f790df58b5937628744d59066736c10
|
f37b0e4bd5854edd93c0acea3d99761469d52247
|
/analysis/data_infrastructures/use-bigrquery/example-select.r
|
267b3145896df9252bcdc9037c4ae9f3c558c4be
|
[] |
no_license
|
metacommunities/metacommunities
|
a80cbba48025c7f5dbbd7519771352cf2845d31e
|
242de413ea47224f1abf372d794d61aa40f582bf
|
refs/heads/master
| 2020-04-05T02:04:52.294053
| 2018-01-17T15:26:18
| 2018-01-17T15:26:18
| 10,381,192
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
r
|
example-select.r
|
library(bigrquery)
library(Rook)
library(tools)
library(brew)
library(rjson)
billing_project <- "237471208995"
sql <-
"
SELECT repo, repolinkDistinctQuestions AS question, repolinkAnswers AS answers
FROM repos_linked_to_from_SO
"
dat <- query_exec("metacommunities", "github_explore", sql,
billing=billing_project)
|
018f5f46900a6f9994db5fd8626d6db8e58ce1f2
|
157b2dc0d0b7c98ece7fd04759ca43c8638707c0
|
/_fnc/tidy_summary.R
|
2216a9348e67151905e077d4d9535b6bf4532606
|
[] |
no_license
|
eugejoh/PAHO_dengue
|
14d9c5d9b273c5975fc9bd4a6ec551ee3858a512
|
c4016f828fcd81edfd4b341c881d86caf4c69f97
|
refs/heads/master
| 2020-03-09T08:19:43.096503
| 2018-09-19T23:17:40
| 2018-09-19T23:17:40
| 128,686,621
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,146
|
r
|
tidy_summary.R
|
#' Tidy Summary Statistics
#'
#' This function outputs a tidy data frame of summary statistics, utilizing the base summary functions
#' `mean`, `median`, `min`, `max`, `quantile`, `sd`. This function selects only columns that `is.numeric = TRUE`.
#' This function ignores missing values `na.rm = TRUE`.
#'
#' @param input a `data.frame` object
#'
#' @return a `data.frame` object with input variables as the first column and respective summary statistics are columns
#' @export
#'
#' @examples
#'
#' data(airquality)
#' tidy_summary(airquality)
#'
tidy_summary <- function(input) {
input %>% select_if(is.numeric) %>%
summarise_all(funs(
min = min(., na.rm = TRUE),
q25 = quantile(., 0.25, na.rm = TRUE),
mean = mean(., na.rm = TRUE),
median = median(., na.rm = TRUE),
q75 = quantile(., 0.75, na.rm = TRUE),
max = max(., na.rm = TRUE),
sd = sd(., na.rm = TRUE),
missing = sum(is.na(.))
)) %>%
gather(stat, val) %>%
separate(stat, into = c("var", "stat"), sep = "_") %>%
spread(stat, val) %>%
select(variable = var, min, q25, mean, median, q75, max, sd, missing)
}
|
18fbbf9fc4afca9d95e04484c90a0d92b962de0c
|
fa703db3c7f0621c2c2656d47aec2364eb51a6d8
|
/4_ss21/r/P01-1.R
|
ecda29a3821195dbcf50dde6dfc5c371531d339c
|
[] |
no_license
|
JosuaKugler/uni
|
f49b8e0d246d031c0feb81705f763859446f9b0f
|
7f6ae93a5ef180554c463b624ea79e5fbc485d31
|
refs/heads/master
| 2023-08-15T01:57:53.927548
| 2023-07-22T10:42:37
| 2023-07-22T10:42:37
| 247,952,933
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 132
|
r
|
P01-1.R
|
# Christian Merten, Josua Kugler
x <- c(1.3, .5, 42, -8e-5)
x[1:4] <- x[4:1]
x[x < 1] <- -2
sum(x^2)
q <- 1:100
q[q %%2 == 0] <- 0
|
766abd13a2d7ca121f1bb04b225ab82d62acc700
|
40c196ea90dbab156db44d9053688d58a90b2800
|
/01_source/03_facebook.R
|
e04aceec47e87f136df89d8dcdd771b410a44265
|
[] |
no_license
|
robcortesl/social_media
|
678b04ffb3e8678adc0ccdfaddc53e83fb5aca4b
|
945dde13ba21d026cf3682b313e06a2d06d3cf5e
|
refs/heads/master
| 2021-07-24T07:00:51.228101
| 2017-10-31T21:37:53
| 2017-10-31T21:37:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
03_facebook.R
|
# Facebook
install.packages("Rfacebook")
library(Rfacebook)
# a traves de token temporal (dar de alta a cuales servicios puede acceder)
token <- "EAACEdEose0cBAB5NouC14dtF13Dv93oWG3RX3eEyZCHslrPGnB5Ly9ZA6LzZB07req3gaU0nZCnsqNIR2xKrA14NpfOaZBUz6kdcbTlcy0byyseMZCR1BiDMFuZCvKMMjnXM4wXZC417tYki8Xl8JroPVQvv1qgljFo1KZBJmtLZCrLV8VlrjxP8tVGLHVSIL91HUiPJltlFueZCwZDZD"
me <- getUsers("me", token, private_info=TRUE)
me
getUsers("barackobama", token, private_info=TRUE)
getUsers(c("barackobama", "donaldtrump"), token)
myfriends <- getFriends(token, simplify = TRUE)
myfriends
my_friends <- getFriends(token, simplify = FALSE)
my_friends
my_friends_info <- getUsers(my_friends$id, token, private_info = TRUE)
my_friends_info
mat <- getNetwork(token, format = "adj.matrix")
mat
|
58df2b5b449c7d24bf52fc4082482000c7feec19
|
3e9f1994f0c73173fc9f7123d4c0096cae76cb15
|
/Generate numbers which follow normal distribution.R
|
760da700ba77a9398f3211bb4d1ec960767cf104
|
[] |
no_license
|
SAUVIK/Start-R-1
|
093a13651e379c0b105a1a83327ab4f253fd54a3
|
b9cce6bf85e3b1149731bfe2ba143553cba09f9a
|
refs/heads/master
| 2021-01-23T17:37:56.736073
| 2017-09-07T18:15:15
| 2017-09-07T18:15:15
| 102,770,413
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 224
|
r
|
Generate numbers which follow normal distribution.R
|
#Generate numbers which follow a normal distribution
noraml_distribution_numbers <- rnorm(1000) # default mean and standard deviation os 0 and 1 respectively
mean(noraml_distribution_numbers)
sd(noraml_distribution_numbers)
|
96a380cdda18347c333208edc76f9cd5cdd14af4
|
9a430b05c1e8cd124be0d0323b796d5527bc605c
|
/wsim.io/man/read_dimension_values.Rd
|
9042272236c50efe8eff519bac634aaed1537ae1
|
[
"Apache-2.0"
] |
permissive
|
isciences/wsim
|
20bd8c83c588624f5ebd8f61ee5d9d8b5c1261e6
|
a690138d84872dcd853d2248aebe5c05987487c2
|
refs/heads/master
| 2023-08-22T15:56:46.936967
| 2023-06-07T16:35:16
| 2023-06-07T16:35:16
| 135,628,518
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 741
|
rd
|
read_dimension_values.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_dimension_values.R
\name{read_dimension_values}
\alias{read_dimension_values}
\title{Get the values of the dimensions associated with a variable in a netCDF file}
\usage{
read_dimension_values(vardef, exclude.dims = NULL, exclude.degenerate = FALSE)
}
\arguments{
\item{vardef}{filename/variable to check}
\item{exclude.dims}{dimensions to ignore in output}
\item{exclude.degenerate}{if \code{TRUE}, do not return dimensions having
length <= 1 in output}
}
\value{
a list with names representing dimension names and values
representing dimension values
}
\description{
Get the values of the dimensions associated with a variable in a netCDF file
}
|
11e8c249ed3336686b9fd756a3112e62a50a06de
|
3ef4654c876937535cd5a19f3a3b6b9796e1a7c5
|
/global.r
|
877da28f81f852883e7e6940907c2921b1776b17
|
[] |
no_license
|
yifanStat/metrics_app
|
3c6a60b9432c18f6878d11fd52c09de38eb93514
|
847b6e423ead7d0151db78ad3e55cb729d1b4a4d
|
refs/heads/master
| 2021-07-17T00:15:30.290567
| 2017-10-23T04:19:35
| 2017-10-23T04:19:35
| 107,930,504
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 150
|
r
|
global.r
|
library(shiny)
source("src/shine_d.r", echo = FALSE, max.deparse.length = 1000L)
source("src/shine_h.r", echo = FALSE, max.deparse.length = 1000L)
|
3fa091d28f9c037a094ccab4a5d4a9a43e057e3f
|
cff4412c81ece904c62c68df4b081583a8f17270
|
/Linear Model Selection and Prediction.R
|
bc9c5ae77860d54329bd3563b8d8950345999dbe
|
[] |
no_license
|
ArvindPawar08/Intermediate-Analytics
|
a78b20d341d20604c9da6bcc21d2e53bc9787f82
|
227ddb375d7c7fdc20b4b8cd20d679564118e3b5
|
refs/heads/master
| 2020-05-05T10:04:27.940109
| 2019-04-07T07:00:55
| 2019-04-07T07:00:55
| 179,929,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,791
|
r
|
Linear Model Selection and Prediction.R
|
#part1
setwd("C:/Users/Arvind/Desktop/Intermediate Analytics/Week 3")
a3 <- read.csv("assignment3.csv")
a3
set.seed(12345)
train <- floor(0.75*nrow(a3))
train_ind <-sample(seq_len(nrow(a3)),size = train)
trainset <- a3[train_ind, ]
testset <- a3[-train_ind, ]
dim(trainset)
dim(testset)
names(a3)
Model <- lm(EP~wind+pressure+humidity+visability+FFMC+DMC+DC+ISI, data = trainset)#build the regression model for EP
summary(Model)
Model<- lm(EP~wind+pressure+humidity+visability+FFMC+DMC+ISI, data = trainset)#removed DC
summary(Model)
Model<- lm(EP~wind+pressure+humidity+visability+FFMC+DMC, data = trainset)#removed ISI
summary(Model)
Model<- lm(EP~wind+pressure+visability+FFMC+DMC, data = trainset)#removed humidity
summary(Model)
Model<- lm(EP~wind+pressure+FFMC+DMC, data = trainset)#removed visability
summary(Model)
Model<- lm(EP~wind+pressure+DMC, data = trainset)#removed FFMC
summary(Model)
Model<- lm(EP~pressure+DMC, data = trainset)#removed wind
summary(Model)
Reduced_Model<- lm(EP~pressure, data = trainset)#removed DMC
summary(Model)
anova(Reduced_Model, Model)
install.packages("leaps")
library(leaps)
predict_model<-predict(Model,newdata = testset)
head(predict_model)#using saturated model
Observed_Values <- testset$EP
head(Observed_Values)
#calculate the the R_Squared Value and RMSE for Model
SSE<-sum((Observed_Values-predict_model)^2)
SST<-sum((Observed_Values-mean(Observed_Values))^2)
r_squared<-1-SSE/SST
r_squared
RMSE<-sqrt(mean((Observed_Values-predict_model)^2))
RMSE
RSS <- sum((predict_model - Observed_Values)^2)
RSS
MAE <- mean(abs(Observed_Values - predict_model))
MAE
#predict reduced model
predict_reduced<- predict(Reduced_Model, newdata=testset)
head(predict_reduced)
#calculate the the R_Squared Value and RMSE for Model
SSE1<-sum((Observed_Values-predict_reduced)^2)
SST1<-sum((Observed_Values-mean(Observed_Values))^2)
r_squared1<-1-SSE1/SST1
r_squared1
RMSE1<-sqrt(mean((Observed_Values-predict_reduced)^2))
RMSE1
RSS1 <- sum((predict_reduced - Observed_Values)^2)
RSS1
MAE1 <- mean(abs(Observed_Values - predict_reduced))
MAE1
#install.packages("ISLR")
#require(caTools)
install.packages("ISLR")
library(ISLR)
fit_model <- lm(EP~., data = a3)
Step_AIC <- step(fit_model, direction = "backward")
#part2
#a
x_train<-model.matrix(EP~.,trainset)[,-1]
y_train<-trainset$EP
x_test<-model.matrix(EP~.,testset)[,-1]
y_test<-testset$EP
head(x_train)
head(y_train)
head(x_test)
head(y_test)
#b
install.packages("glmnet")
library(glmnet)
Lasso_model<-glmnet(x_train,y_train,alpha=1,nlambda = 100,lambda.min.ratio = 0.001)
plot(Lasso_model,xvar = "lambda")
Cross_Validation<-cv.glmnet(x_train,y_train,nlambda = 100,alpha=0.5,lambda.min.ratio=0.001)
plot(Cross_Validation)
best_lambda<-Cross_Validation$lambda.min
predict_y<-predict(Lasso_model, s = best_lambda, newx = x_test)
#c
Observe_y<-y_test
rss <- sum((predict_y - Observe_y) ^ 2) ## residual sum of squares
tss <- sum((Observe_y - mean(Observe_y)) ^ 2) ## total sum of squares
rsq <- 1 - rss/tss
rsq
#part 3
install.packages("pls")
library(pls)
set.seed (1000)
pcr_model <- pcr(EP~., data = a3, scale = TRUE, validation = "CV")
summary(pcr_model)
##RMSEP
validationplot(pcr_model,val.type = c("RMSEP"))
##MSEP
validationplot(pcr_model,val.type = c("MSEP"))
##R2
validationplot(pcr_model,val.type = c("R2"))
##5 comm
pcr_model_train<-pcr(EP~.,data=trainset,scale=TRUE,validation='CV')
pcr_pred <- predict(pcr_model, testset, ncomp = 5)
mean((pcr_pred - testset$EP)^2)
summary(pcr_pred)
lrm <- lm(EP~., data = trainset)
lrm_pred <- predict(lrm, testset)
mean((lrm_pred - testset$EP)^2)
|
8e27b0455382e3b79f6dd601997ce479eb2c2ea2
|
d9d213281d875a47089f1a0114919eeceedf4a60
|
/Figure S1/Figure_S1_plot_gray_value_all_time_points.R
|
e03525f4716590f4c8416c62b781e32895aa6852
|
[] |
no_license
|
hibberd-lab/Xiong_High-light-response-of-the-rice-bundle-sheath
|
6845c4eac2300e8621cd7cd738cda95b2d91906a
|
221b6b7a9dfd87b67d6325ea77a3efb3db1d8d96
|
refs/heads/main
| 2023-03-31T08:04:41.543999
| 2021-04-15T08:20:10
| 2021-04-15T08:20:10
| 306,381,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,772
|
r
|
Figure_S1_plot_gray_value_all_time_points.R
|
all1 <- read_csv("Gray value.csv")
head(all1)
all1$Distance<-as.numeric(all1$Distance)
all1$time<-factor(all1$time,levels = c("0 min","5 min","10 min","15 min","20 min","30 min","40 min","50min","60 min"))
all1$Treatment<-factor(all1$Treatment,levels = c("high light","0 min"))
str(all1)
celltype_position <- read_csv("celltype_position.csv")
head(celltype_position)
celltype_position$time <- factor(celltype_position$time,levels = c("0 min","5 min","10 min","15 min","20 min","30 min","40 min","50min","60 min"))
celltype_position$Treatment<-factor(celltype_position$Treatment,levels = c("high light","0 min"))
celltype_position$'Cell type' <- factor(celltype_position$'Cell type',levels=c("M","BSS"))
str(celltype_position)
head(celltype_position)
pd <- position_dodge(0.05) # move them .05 to the left and right
ggplot(all1, aes(x=Distance, y=mean)) +
geom_rect(aes(x=NULL, y=NULL, xmin = start, xmax = end,
fill = celltype_position$'Cell type'), ymin = -Inf, ymax =Inf,
data = celltype_position) +
scale_fill_manual(values=alpha(c("green","brown"),0.1))+
geom_errorbar(aes(ymin=mean-se, ymax=mean + se),colour="black", width=.1, position=pd) +
geom_line(aes(colour=Treatment),position=pd,alpha=1, size=0.5) +
facet_wrap(~time, ncol=4)+
xlab("Distance (??m)")+
ylab("Gray value")+
xlim(0,140)+
ylim(0,160)+
theme_bw() +
theme(strip.background = element_rect(colour = "black", fill = "white"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.x =element_text(size = 9,colour = "black"),
axis.text.y =element_text(size = 9,colour = "black"),
aspect.ratio = 1,
strip.text.x = element_text(size = 9,colour = "black")
)
|
a784b73d2cb2bf22311b41f1e1567a97ad5e1872
|
7021962b10e1fa20e48a99807b85e422bee02cec
|
/Plot1.R
|
542619889e876d0d4bfa743a0970f1ecfe6853b2
|
[] |
no_license
|
pbarry129/ExData_Plotting1-1
|
271090b688105ce53066377e48c53327e0ce6632
|
47d203e5742bb7b46347d5fa392088a4bd49f5db
|
refs/heads/master
| 2020-06-13T17:22:35.808014
| 2016-12-05T01:36:28
| 2016-12-05T01:36:28
| 75,575,966
| 0
| 0
| null | 2016-12-05T01:07:49
| 2016-12-05T01:07:48
| null |
UTF-8
|
R
| false
| false
| 904
|
r
|
Plot1.R
|
#Read in the power consumption data
my_power_data<-read.csv("household_power_consumption.txt", sep=";")
# Create date string
my_power_dates<-paste(as(my_power_data$Date, "character"), as(my_power_data$Time, "character"))
# Put date string in Date format
POS_dates<-strptime(my_power_dates, "%d/%m/%Y %T", tz="")
# Add Dates to data
power_data <- cbind(my_power_data, POS_dates)
# Get start and end dates
start_date <-strptime("31/01/2007 23:59:59","%d/%m/%Y %T", tz="")
end_date <- strptime("02/02/2007 23:59:59","%d/%m/%Y %T", tz="")
# Subset the data
two_days <- subset(power_data, power_data$POS_dates > start_date & power_data$POS_dates<end_date)
hist(as.numeric(two_days$Global_active_power), main="Global Active Power", xlab = "Global Active Power (kilowatt)", col="red")
dev.copy(png, file="plot1.png", width=480, height=480, units="px")
dev.off()
|
625df374735a2600cc9eede5821522bf68d0cb4e
|
2631efe4c86afda92bff9141bf09ec1b359da511
|
/man/topgenes.Rd
|
3ad7449e2c39b5dd8c3b94c5fae1280b5204dd51
|
[] |
no_license
|
Oshlack/AllSorts_v1
|
d4e753805a04fc8e2d80ad8cfcdd540aa9e39c37
|
529f6f9c42266c74a3de87531e3e01847d8ca8ff
|
refs/heads/master
| 2023-02-12T07:00:28.985590
| 2021-01-18T03:35:06
| 2021-01-18T03:35:06
| 76,910,901
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 251
|
rd
|
topgenes.Rd
|
\name{topgenes}
\docType{data}
\alias{topgenes}
\title{Random Forest Classification genes}
\description{
A list of genes required by the random forest classifier.
}
\usage{topgenes}
\format{A character vector of 20 genes required for classification.}
|
8a221b7840510f7ef824040794d90a110956226f
|
73cdd09cf558fa34e0799043837afdbbea61dcbd
|
/R/exportfuns.R
|
c041f134a0036e7a7a5f000cf705a092acd6edd3
|
[] |
no_license
|
epi-chen/bioage
|
1561974f28b2b2fc48dc8c3017c2204e7df0b31b
|
c67e377984b370c82b244131f7ccd166a9ecf0b1
|
refs/heads/master
| 2021-09-16T19:16:54.732472
| 2018-06-23T19:46:58
| 2018-06-23T19:46:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41
|
r
|
exportfuns.R
|
#these are for exporting output to stata
|
a799817c5a33e18d0f2647f05d4ab8074b228657
|
b779522919291bcc1a2c05e6034394fd82858fc9
|
/skript_EDA_2017.R
|
fd41a6bd7b2f653af0d1e8dcb425012a37825f92
|
[] |
no_license
|
Kobzol/statistics
|
e63b419d01cd3e248901825169cce116e65d98b0
|
56637eb4b69d24d7268b244b88ff9c15177a39eb
|
refs/heads/master
| 2021-03-30T17:36:04.324846
| 2017-03-27T18:31:39
| 2017-03-27T18:31:39
| 86,370,452
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 21,539
|
r
|
skript_EDA_2017.R
|
#######################################################################################
################ Preprocesing dat a explorační analýza ################################
############### Adéla Vrtková, Martina Litschmannová ##################################
#######################################################################################
## Máme data, a co dál?
# 1. Spustíme potřebné balíčky, které obsahují další statistické funkce
# 2. Nastavíme pracovní adresář, odkud importujeme data, popř. kam chceme ukládat výstupy
# 3. Importujeme data (z pracovního adresáře, z internetu)
# 4. Pre-processing -> a) Podíváme se na data
# b) uložíme si data ve více formátech (každá funkce má "raději" jiný formát)
# 5. Analýza kvalitativních proměnných
# 6. Analýza kvantitativních proměnných
# 7. Identifikace a rozhodnutí o vyloučení/ponechání odlehlých pozorování
#######################################################################################
## 1. Jak nainstalovat a spustit rozšiřující balíček funkcí? ##########################
# Instalování balíčku
install.packages("openxlsx")
# Načtení balíčku (nutno opakovat při každém novém spuštění Rka, vhodné mít na začátku skriptu)
library(openxlsx)
#######################################################################################
## 2. Kde se ukládají generované výstupy, nastavení pracovního adresáře ###############
# Výpis pracovního adresáře
getwd()
# Nastavení pracovního adresáře -> do uvozovek, celou cestu
setwd("C:/Users/lit40/.rkward")
#######################################################################################
## 3. Načtení datového souboru ########################################################
# základní funkce - read.table, read.csv, read.csv2, ...
# záleží hlavně na formátu souboru (.txt, .csv), na tzv. oddělovači jednotlivých hodnot, desetinné čárce/tečce
# Načtení a uložení datového souboru ve formátu csv2 z lokálního disku do datového rámce data
data=read.csv2(file="C:/Martina/STA1/DATA/aku.csv")
# Načtení a uložení datového souboru ve formátu csv2 z internetu do datového rámce data
data=read.csv2(file="http://am-nas.vsb.cz/lit40/DATA/aku.csv")
# Načtení a uložení datového souboru ve formátu xlsx z lokálního disku do datového rámce data - používáme funkci z balíčku XLConnect, který jsme v úvodu rozbalili
install.packages("openxlsx")
library(openxlsx)
data=readWorkbook("C:/Users/lit40/Desktop/aku.xlsx",
sheet=1, # číslo listu (defaultní hodnota 1)
colNames=TRUE, # informace, zda v prvním řádku je hlavička s názvy sloupců (defaultní hodnota TRUE)
startRow = 4, # na kterém řádku má načítání začít (není-li tento parametr zadán, začíná načítání na řádku 1)
cols = 2:9) # které sloupce se mají načíst (není-li tento parametr zadán, načítají se všechny neprázdné sloupce)
colnames(data)=c("A5","B5","C5","D5","A100","B100","C100","D100")
# nebo
install.packages("XLConnect")
library(XLConnect)
wb=loadWorkbook("C:/Martina/STA1/DATA/aku.xlsx")
data=readWorksheet(wb, sheet="Data", header=TRUE, startRow = 4, startCol=2)
colnames(data)=c("A5","B5","C5","D5","A100","B100","C100","D100")
# Načtení a uložení datového souboru ve formátu xlsx z internetu (pomocí balíčku XLConnect)
# do datového rámce data (komplikovanější, doporučujeme raději si stáhnout xlsx soubor na lokální disk)
tmp = tempfile(fileext = ".xlsx")
download.file(url = "http://am-nas.vsb.cz/lit40/DATA/aku.xlsx", destfile = tmp, mode="wb")
wb=loadWorkbook(tmp)
data=readWorksheet(wb,sheet="Data",header=TRUE,startRow = 4,startCol=2)
colnames(data)=c("A5","B5","C5","D5","A100","B100","C100","D100")
#######################################################################################
## 4. Pre-processing dat ##############################################################
# Výpis datového souboru
data
# Zobrazení prvních šesti řádků
head(data)
# Zobrazení posledních šesti řádků
tail(data)
# Zobrazení 10. řádku
data[10,]
# Zobrazení 3. sloupce
data[,3]
# nebo (víme-li, jak se jmenuje proměnná zapsána ve 3. sloupci)
data[["C5"]]
# nebo
data$C5
# Uložení prvního a pátého sloupce dat. rámce data do dat. rámce pokus
pokus=data[,c(1,5)]
# Uložení prvních 4 sloupců dat. rámce data do dat. rámce data5
data5=data[,c(1:4)]
#nebo
data5=data[,-c(5:8)]
## Pozn. při ukládání dat mysleme na přehlednost v názvech, data5 obsahují kapacity akumulátorů všech výrobců po 5ti cyklech
## Převod dat do standardního datového formátu
data5S=reshape(data[,1:4], # část datového rámce, která bude převáděna do std. datového formátu
direction="long", # parametr určující tzv. "long" nebo "wide" formát
varying=c("A5","B5","C5","D5"), # názvy proměnných, které mají být zařazeny do sloupce hodnot
v.names="kap5", # pojmenování sloupce hodnot
times=c("A","B","C","D"), # varianty proměnné, která přiřazuje identifikátory jednotlivým hodnotám proměnné kap5
timevar="vyrobce") # pojmenování proměnné obsahující identifikátory proměnné kap5
# převedení proměnné data5S$vyrobce na typ factor
data5S$vyrobce=as.factor(data5S$vyrobce)
# odstranění nadbytečné proměnné id z datového rámce data5S
data5S=data5S[,-3]
# odstranění NA z datového rámce data5S
data5S=na.omit(data5S)
## Převod párových dat do standardního datového formátu
dataS=reshape(data,
direction="long",
varying=list(c("A5","B5","C5","D5"),
c("A100","B100","C100","D100")),
v.names=c("kap5","kap100"),
times=c("A","B","C","D"),
timevar="vyrobce")
# převedení proměnné dataS$vyrobce na typ factor
dataS$vyrobce=as.factor(dataS$vyrobce)
# odstranění nadbytečné proměnné id z datového rámce dataS
dataS=dataS[,-4]
# odstranění NA z datového rámce dataS
dataS=na.omit(dataS)
# Definování nové proměnné v stávajícím datovém rámci
dataS$pokles=dataS$kap5-dataS$kap100
## Vytvoření samostatných proměnných
a5=dataS$kap5[dataS$vyrobce=="A"]
b5=dataS$kap5[dataS$vyrobce=="B"]
c5=dataS$kap5[dataS$vyrobce=="C"]
d5=dataS$kap5[dataS$vyrobce=="D"]
a100=dataS$kap100[dataS$vyrobce=="A"]
b100=dataS$kap100[dataS$vyrobce=="B"]
c100=dataS$kap100[dataS$vyrobce=="C"]
d100=dataS$kap100[dataS$vyrobce=="D"]
pokles.a=dataS$pokles[dataS$vyrobce=="A"]
pokles.b=dataS$pokles[dataS$vyrobce=="B"]
pokles.c=dataS$pokles[dataS$vyrobce=="C"]
pokles.d=dataS$pokles[dataS$vyrobce=="D"]
### Poznámky pro zopakování principu grafiky v R ######################################
# základem jsou tzv. high-level funkce, které vytvoří graf (tj. otevřou grafické oknou a vykreslí dle zadaných parametrů)
# na ně navazují tzv. low-level funkce, které něco do aktviního grafického okna přidají, samy o sobě neotevřou nové
# př. low-level funkcí - např. abline, points, lines, legend, title, axis ... které přidají přímku, body, legendu...
# tzn. před použitím "low-level" funkce je potřeba, volat "high-level" funkci (např. plot, boxplot, hist, barplot, pie,...)
# další grafické parametry naleznete v nápovědě
# nebo např. zde http://www.statmethods.net/advgraphs/parameters.html
# nebo zde https://flowingdata.com/2015/03/17/r-cheat-sheet-for-graphical-parameters/
# nebo http://bcb.dfci.harvard.edu/~aedin/courses/BiocDec2011/2.Plotting.pdf
## Barvy v R
# http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf
# https://www.nceas.ucsb.edu/~frazier/RSpatialGuides/colorPaletteCheatsheet.pdf
#######################################################################################
## 5. Explorační analýza a vizualizace kategoriální proměnné ##########################
## Výpočet četostí ####################################################################
cetnosti=table(dataS$vyrobce)
cetnosti # výpis
# Výpočet relativních četností - 2 ekvivalentní způsoby
rel.cetnosti=100*cetnosti/sum(cetnosti)
rel.cetnosti # výpis
rel.cetnosti2=prop.table(cetnosti)*100
# Zaokrouhlení relativních četností (%) na 1 desetinné místo
rel.cetnosti=round(rel.cetnosti,digits=1)
rel.cetnosti # výpis
# Pozor na zaokrouhlovací chybu!!
rel.cetnosti[4]=100-sum(rel.cetnosti[1:3])
rel.cetnosti # výpis
# Sloučení četností a relativních četností do tabulky četností
tabulka=cbind(cetnosti,rel.cetnosti) # cbind() ... sloučení sloupců
tabulka # výpis
# Přejmenování názvů sloupců v tabulce četností
colnames(tabulka)=c("četnost","rel.četnost (%)")
tabulka
# Uložení tabulky do csv souboru pro export do MS Excel
write.csv2(tabulka,file="tabulka.csv")
# Kde je tabulka uložena?
getwd()
## Výsečový (koláčový) graf - angl. piechart ##########################################
cetnosti=table(dataS$vyrobce) # v tuto chvíli není nutno používat, četnosti jsme již spočetli a uložili do proměnné cetnosti výše
pie(cetnosti)
# Zabarvení grafu
pie(cetnosti,
col=c("red","green","yellow","blue"))
pie(cetnosti,
col=heat.colors(4))
# Přidání názvu grafu a popisků
pie(cetnosti,
col=heat.colors(4),
main="Zastoupení výrobců ve výběru",
labels=c("Výrobce A","Výrobce B","Výrobce C","Výrobce D"))
pie(cetnosti,
col=heat.colors(4),
main="Zastoupení výrobců ve výběru",
labels=paste("Výrobce",names(cetnosti),"\n",cetnosti)) # funkce paste() umožňuje sloučit textové řetězce a hodnoty proměnných, symbol "\n" tvoří nový řádek v textu
# rel. četnosti byly spočteny a uloženy do proměnné rel.cetnosti výše
pie(cetnosti,
col=heat.colors(4),
main="Zastoupení výrobců ve výběru",
labels=paste("Výrobce",names(cetnosti),"\n",cetnosti,";",rel.cetnosti,"%"))
# Pro zájemce - balíček plotrix a funkce pie3D vytvoří 3D koláčový graf
## Sloupcový graf - angl. barplot #####################################################
cetnosti=table(data5S$vyrobce) # v tuto chvíli není nutno používat, četnosti jsme již spočetli a uložili do proměnné cetnosti výše
barplot(cetnosti)
# Změna barev, přidání názvu
barplot(cetnosti,
col=heat.colors(4),
main="Zastoupení výrobců ve výběru",
space=0.6) # parametr space vytvoří mezeru mezi sloupci
# Přidání dalších popisků a legendy
barplot(cetnosti,
col=heat.colors(4),
horiz=TRUE, # horizontální orientace grafu
border=FALSE, # nevykresluje čáru kolem sloupečků
main="Zastoupení výrobců ve výběru",
names.arg=paste("Výrobce",names(cetnosti)))
legend("topright",
paste("Výrobce",names(cetnosti)),
col=heat.colors(4),
fill=heat.colors(4),
border=FALSE,
bty="n")
# Přidání absolutních a relativních četností k odpovídajícím sloupcům
bp = barplot(cetnosti,
col=heat.colors(4),
main="Zastoupení výrobců ve výběru",
names.arg=paste("Výrobce",names(cetnosti)))
text(bp,cetnosti,cetnosti)
bp = barplot(cetnosti,
col=heat.colors(4),
main="Zastoupení výrobců ve výběru",
names.arg=paste("Výrobce",names(cetnosti)))
text(bp,
cetnosti,paste(cetnosti,";",rel.cetnosti,"%"),
pos=1) # parametr pos udává, kde bude text uveden vzhledem k dané pozici (1 = pod, 2 = vlevo, 3 = nad, 4 = vpravo)
## Jak graf uložit? ##################################################################
# Zjištění aktivního okna, nastavení aktivního okna - tj. který graf chceme uložit?
dev.cur()
dev.set(2)
# Uložení obrázku ve formátu pdf (výška a šířka jsou uvedeny v palcích (inch), 1inch=2,54cm)
dev.print(device=pdf,file="barplot.pdf",width=6.5,height=5)
# Zavření grafického okna
dev.off()
# Kam se obrázek uložil?
getwd()
#######################################################################################
#####Pie charts are a very bad way of displaying information.##########################
##The eye is good at judging linear measures and bad at judging relative areas.########
##A bar chart or dot chart is a preferable way of displaying this type of data. #######
#######################################################################################
#######################################################################################
## 6. Explorační analýza a vizualizace kvantitativní proměnné #########################
## Popisná statistika #################################################################
summary(dataS$kap5)
# Výpočet průměru jedné proměnné
mean(dataS$kap5)
mean(a5)
# Pozor na chybějící hodnoty
mean(data$C5)
mean(data$C5,na.rm=TRUE)
mean(na.omit(data$C5))
# Výpočet mediánu jedné proměnné
quantile(dataS$kap5,probs=0.5)
quantile(a5,probs=0.5)
# Určení rozsahu
length(na.omit(dataS$kap5))
#######################################################################################
## funkce, které umožní aplikaci vybrané funkce na sloupce datového rámce, na hodnoty vektoru apod.
x=1:4
# - lapply - na vstup aplikuje zvolenou funkci, vrátí list (seznam) hodnot
lapply(x,sqrt)
# - sapply - na vstup aplikuje volenou funkci, vrátí vektor hodnot
sapply(x,sqrt)
# - vapply - stejné jako sapply, akorát s parametrem navíc, který specifikuje výstup
vapply(x,sqrt,numeric(1))
# - tapply - lze aplikovat na vektor, jemuž je přiřazen faktor rozlišující hodnoty do skupin
# - použijeme dále
#######################################################################################
# Výpočet průměrů kapacit po 5 cyklech dle výrobců
tapply(data5S$kap5, data5S$vyrobce, mean)
# Výpočet mediánu kapacit po 5 cyklech dle výrobců
tapply(data5S$kap5, data5S$vyrobce, quantile, probs=0.5)
# Obdobně lze určit další charakteristiky.
# Pozor! Funkce pro výpočet šikmosti (skewness) a špičatosti (kurtosis) nejsou součástí základního R, najdete je v balíčku moments
install.packages("moments")
library(moments)
# další charakteristiky -> var(), sd(), min(), max(), skewness(), kurtosis()
## Krabicový graf - angl. boxplot #####################################################
boxplot(b5)
# Úprava grafu
boxplot(b5,
main="Kapacita po 5 cyklech (mAh)",
xlab="Výrobce B",
ylab="kapacita (mAh)")
# Zabarvení boxplotu, zobrazení průměru
boxplot(b5,
main="Kapacita po 5 cyklech (mAh)",
xlab="Výrobce B",
ylab="kapacita (mAh)",
col="grey")
points(1, mean(b5,na.rm=TRUE), pch=3)
# Horizontální boxplot
boxplot(b5,
main="Kapacita po 5 cyklech (mAh), výrobce B",
horizontal=TRUE,
xlab="kapacita (mAh)")
# Další parametry
boxplot(b5,
main="Kapacita po 5 cyklech (mAh)",
xlab="Výrobce B",
ylab="kapacita (mAh)",
col="grey",
outline=FALSE, # nezobrazí odlehlá pozorování
boxwex=0.5) # změní šířku krabice na 1/2
# Přidání přímky y=mean(b5)
abline(h=mean(b5,na.rm=TRUE), col="red",lty=2)
# Funkci boxplot lze využít nejen k vykreslení grafu
boxplot(b5,plot=FALSE)
## Vícenásobný krabicový graf - angl. multiple boxplot ################################
boxplot(data5)
boxplot(data5,
boxwex=0.5,
col="grey")
boxplot(data5,
boxwex=c(0.5,0.5,1.5,1.5),
col=c("red","yellow","grey","blue"))
boxplot(data5,
boxwex=c(0.5,0.5,1.5,1.5),
col=terrain.colors(4))
abline(h=1900,col="red",lty=2)
# Mezera v boxplotu - myšlenka - třetí pozice bude vynechána
boxplot(data5,
at=c(1,2,4,5))
# Boxplot konstruován z dat ve standartním datovém formátu - s využitím funkce split
pom=split(data5S$kap5, data5S$vyrobce) #do proměnné pom jsou uložena data ve formátu list, tj. seznam proměnných
boxplot(pom)
# Pořadí krabic dle přání s využitím funkce list
pom=list(a5, b5, c5, d5)
boxplot(pom)
pom=list(d5, c5, b5, a5)
boxplot(pom)
## Histogram ##########################################################################
hist(a5)
hist(a5,breaks=10) # Co dělají různé hodnoty parametru breaks s grafem?
hist(a5,
main="Histogram pro kapacitu akumulátorů po 5 cyklech, výrobce A",
xlab="kapacita (mAh)",
ylab="f(x)",
col="blue",
border="grey",
labels=TRUE) # přidá absolutní četnosti daných kategorií ve formě popisků
hist(a5,
main="Histogram pro kapacitu akumulátorů po 5 cyklech, výrobce A",
xlab="kapacita (mAh)",
ylab="f(x)",
col="blue",
border="grey",
labels=TRUE, # přidá absolutní četnosti daných kategorií ve formě popisků
freq=FALSE) # změna měřítka na ose y --> f(x)
lines(density(a5)) # připojí graf odhadu hustoty pravděpodobnosti
# Generování hustoty normálního rozdělení
xfit=seq(min(a5), max(a5), length=40)
yfit=dnorm(xfit, mean=mean(a5), sd=sd(a5))
lines(xfit, yfit, col="black", lwd=2)
## QQ-graf - aneb grafický nástroj pro posouzení normality ############################
qqnorm(a5)
qqline(a5)
#######################################################################################
## Více grafů do jednoho obrázku -> funkce layout nebo par ############################
## Podívejte se na možnosti kombinování grafů - http://www.statmethods.net/advgraphs/layout.html
## Kombinace histogramu a boxplotu ####################################################
pom=layout(mat = matrix(1:8,2,4, byrow=FALSE), height = c(2.5,1))
layout.show(pom)
par(oma=c(2,2,3,2),mar=c(2,2,3,2))
hist(a5,
main="Výrobce A",
xlab="kapacita (mAh) po 5 cyklech",
ylab="četnost",
ylim=c(0,32),
xlim=c(1730,2040))
boxplot(a5,
horizontal=TRUE,
ylim=c(1700,2040),
boxwex=1.5)
hist(b5,
main="Výrobce B",
xlab="kapacita (mAh) po 5 cyklech",
ylab="četnost",
ylim=c(0,32),
xlim=c(1730,2040))
boxplot(b5,
horizontal=TRUE,
ylim=c(1700,2040),
boxwex=1.5)
hist(c5,
main="Výrobce C",
xlab="kapacita (mAh) po 5 cyklech",
ylab="četnost",
ylim=c(0,32),
xlim=c(1730,2040))
boxplot(c5,
horizontal=TRUE,
ylim=c(1700,2040),
boxwex=1.5)
hist(d5,
main="Výrobce D",
xlab="kapacita (mAh) po 5 cyklech",
ylab="četnost",
ylim=c(0,32),
xlim=c(1730,2040))
boxplot(d5,
horizontal=TRUE,
ylim=c(1700,2040),
boxwex=1.5)
mtext("Histogramy a boxploty pro jednotlivé výrobce po 5 cyklech", cex = 1.5, outer=TRUE, side=3)
# Pro pokročilé - pomocí for-cyklu
pom=layout(mat = matrix(1:8,2,4, byrow=FALSE), height = c(2.5,1))
layout.show(pom)
par(oma=c(2,2,3,2), mar=c(2,2,3,2))
for (i in 1:4){
hist(data5[,i],
main=paste("Výrobce",colnames(data5)[i]),
xlab="kapacita (mAh) po 5 cyklech",
ylab="četnost",
xlim=c(min(data5,na.rm=TRUE), max(data5,na.rm=TRUE)),
ylim=c(0,32))
boxplot(data5[,i],
horizontal=TRUE,
ylim=c(min(data5,na.rm=TRUE), max(data5,na.rm=TRUE)),
boxwex=1.5)
}
mtext("Histogramy a boxploty pro jednotlivé výrobce po 5 cyklech", cex = 1.5, outer=TRUE, side=3)
## Kombinace histogramu a QQ-plotu ####################################################
pom=layout(mat = matrix(1:8,2,4, byrow=FALSE), height = c(2,1.5))
layout.show(pom)
par(oma=c(2,2,3,2), mar=c(2,2,3,2))
for (i in 1:4){
hist(data5[,i],
main=paste("Výrobce",colnames(data5)[i]),
xlab="kapacita (mAh) po 5 cyklech",
ylab="četnost",
xlim=c(min(data5,na.rm=TRUE), max(data5,na.rm=TRUE)),
ylim=c(0,0.037),
freq=FALSE)
lines(density(data5[,i], na.rm=TRUE))
xfit=seq(min(data5[,i], na.rm=TRUE), max(data5[,i], na.rm=TRUE), length=40)
yfit=dnorm(xfit, mean=mean(data5[,i], na.rm=TRUE), sd=sd(data5[,i], na.rm=TRUE))
lines(xfit, yfit, col="blue", lty=2)
qqnorm(data5[,i])
qqline(data5[,i])
}
mtext("Histogramy a QQ-ploty pro jednotlivé výrobce po 5 cyklech", cex = 1.5, outer=TRUE, side=3)
#######################################################################################
## 7. Identifikace odlehlých pozorování (a jejich odstranění z dat. rámce)#############
## S individuálním posouzením, jak naložit s odlehlými hodnotami ######################
# Seřázení datového rámce aku.st podle proměnných aku.st$vyrobce a data.st$kap5
data5S=data5S[with(data5S, order(data5S$vyrobce,data5S$kap5)),]
# V RkWardu lze pro seřazení dat použít záložku Data / Sort data
# Zápis proměnné do nového sloupce, v němž budeme odstraňovat vybraná odlehlá pozorování
data5S$kap5.bez=data5S$kap5
# Následně lze pohodlně "ručně" odstranit (popř. opravit) vybraná odlehlá pozorování
# Sofistikovanější metody -> např. vnitřní hradby, mnohorozměrná detekce odlehlých hodnot...
|
c8e6348d6b28caddf95147aeb30a875483dc6edf
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/datadr/examples/convert.Rd.R
|
6825df9b4d8631b2c73986563402387192faf462
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 262
|
r
|
convert.Rd.R
|
library(datadr)
### Name: convert
### Title: Convert 'ddo' / 'ddf' Objects
### Aliases: convert
### ** Examples
d <- divide(iris, by = "Species")
# convert in-memory ddf to one stored on disk
dl <- convert(d, localDiskConn(tempfile(), autoYes = TRUE))
dl
|
73379c322f349dde606049efa7ea3f77e089ebaf
|
9bdef83f28b070321ba27709d2c7ec028474b5c3
|
/R/packages/package.creation.R
|
bcf8ffd6ab3ab0e3aa9e4e6c4f5b03f8ef145c1e
|
[] |
no_license
|
antagomir/scripts
|
8e39ce00521792aca1a8169bfda0fc744d78c285
|
c0833f15c9ae35b1fd8b215e050d51475862846f
|
refs/heads/master
| 2023-08-10T13:33:30.093782
| 2023-05-29T08:19:56
| 2023-05-29T08:19:56
| 7,307,443
| 10
| 15
| null | 2023-07-19T12:36:45
| 2012-12-24T13:17:03
|
HTML
|
UTF-8
|
R
| false
| false
| 2,981
|
r
|
package.creation.R
|
replace.description.field <- function (version.name, field, entry) {
# Replaces given field in the DESCRIPTION file
# define DESCRIPTION file based on version.name
f <- paste(version.name,"/DESCRIPTION",sep="")
# Read DESCRIPTION file
lins <- readLines(f)
# search the line that contains the field
field.line <- grep(field,lins)
# Replace the line
lins[[field.line]] <- paste(field,"",entry,sep="")
# Write new Rd file contents
writeLines(lins,file(f))
}
replace.rd.fields <- function(rdfile, fills)
{
# Replace the given fields (in fills) in a man/*.Rd file
# fills is a list that lists the replacement texts for the fields
# fills <- list(author = "N.N", reference = "RandomJournal", ...)
# Read the Rd file
rdlines <- readLines(rdfile)
# Go through specified fields
for (field in names(fills)) {
# If replacment is given for the field..
if (!is.na(fills[[field]])) {
# Replace the given field with replacement text
rdlines <- replace.rd.field(rdlines, field, replacement = fills[[field]])
}
}
# Write new Rd file contents
writeLines(rdlines,file(rdfile))
}
replace.rd.field <- function(rdlines, field, replacement) {
# Replace annotation field in a man/*.Rd file
# Detect starting line for the field
sind <- grep(paste(field,"\\{",sep=""),rdlines)
# Proceed only if the field is present in the file
if (length(sind)>0) {
# start line
sline <- rdlines[[sind]]
# Detect last line of the field
eind <- sind
while (length(grep("}", rdlines[[eind]]))==0) {
eind <- eind+1
}
# Set new field on the first line of old field
rdlines[[sind]] <- paste("\\",field,"{",replacement,"}",sep="")
# remove the rest of the old field
if (eind>sind) {
rdlines <- rdlines[-((sind+1):eind)]
}
} else {}
rdlines
}
set.keywords <- function (rdfile, keywords) {
# Read the Rd file
rdlines <- readLines(rdfile)
# detect keyword lines
inds <- grep("keyword\\{",rdlines)
# store file lines before and after keywords
after.keywords <- rdlines[-seq(max(inds))]
before.keywords <- rdlines[seq(min(inds)-1)]
# write keyword lines
keywordlines <- vector(mode="character")
for (i in 1:length(keywords)) {
keywordlines[[i]] <- paste("\\keyword{",keywords[[i]],"}",sep="")
}
# add the parts
rdlines <- c(before.keywords, keywordlines, after.keywords)
# Write new Rd file contents
writeLines(rdlines,file(rdfile))
}
remove.help.lines <- function (rdfile, helplines) {
# Remove specified (help) lines from the Rd files
# Read the Rd file
rdlines <- readLines(rdfile)
remove.idx <- c()
for (lin in helplines) {
remove.idx <- c(remove.idx, grep(lin, rdlines))
}
if (length(remove.idx)>0) {
rdlines <- rdlines[-remove.idx]
# Write new Rd file contents
writeLines(rdlines,file(rdfile))
} else {}
}
|
71bc643423256db4b3ab9a2b49a35cd62cf85b31
|
0eb87c697d21c87fe6ffdeb731033ea17e92a327
|
/man/grid_spatialpoints.Rd
|
c59287814e5a3c435d8cfc95af66d9d0ea95c643
|
[] |
no_license
|
Grelot/rgeogendiv
|
e4566fcb965a07db5a655a95744eb20221f893ce
|
d261bcea4d12a70d40ce3efd058b0908744a3c27
|
refs/heads/master
| 2023-02-16T18:41:09.436973
| 2020-12-17T16:26:31
| 2020-12-17T16:26:31
| 300,564,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 918
|
rd
|
grid_spatialpoints.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grid_spatialpoints.R
\name{grid_spatialpoints}
\alias{grid_spatialpoints}
\title{Build a map grid spatialpoints}
\usage{
grid_spatialpoints(
siteSize = 260000,
projectionCRS = "+proj=cea +lon_0=0 +lat_ts=30 +x_0=0 +y_0=0 +datum=WGS84 +ellps=WGS84 +units=m +no_defs"
)
}
\arguments{
\item{siteSize}{size of each cell of the grid in meter}
\item{projectionCRS}{Interface class to the PROJ projection and transformation system.
Default value is "+proj=cea +lon_0=0 +lat_ts=30 +x_0=0 +y_0=0 +datum=WGS84 +ellps=WGS84 +units=m +no_defs"}
}
\value{
a spatialpoint object of all sites of the grid into the projection `projectionCRS` given as argument
}
\description{
The function download from NaturalEarth database a worldmap raster.
A grid is built from this raster.
Then the list of centroids of each site of the grid is returned.
}
|
98348fa9e5648a988db50027e9a7742470468d7d
|
80ebdb34963c9d62e9d70bad0dae43673c40f0e8
|
/server.R
|
0c0a1cb3615f02adb2c13ec23b40f7fd7c4171a3
|
[
"MIT"
] |
permissive
|
AdamSpannbauer/shiny_groom_proposal
|
8d4d61b5b369e178d8194965eafedbd5582dcb80
|
9df9c512ccf0f0d091a99fca14f3f9882f40cb23
|
refs/heads/master
| 2020-04-12T00:22:50.920568
| 2018-12-18T21:38:10
| 2018-12-18T21:38:10
| 162,198,075
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,931
|
r
|
server.R
|
shinyServer(function(input, output, session) {
# get groomsman id from url query (returns FALSE if invalid/missing)
groomsman_id = reactive({
query = parseQueryString(session$clientData$url_search)
gm_id = query[['gm_id']]
if (is.null(gm_id)) gm_id = FALSE
if (!(gm_id %in% groomsman_ids)) gm_id = FALSE
gm_id
})
# if valid groomsman id display beer gif, response buttons, & groomposal gif
# if invalid groomsman id display message to go away
output$beer_gif = renderUI({
if (groomsman_id() == FALSE) return(unknown_id_response)
p(
img(src='wide_pbr_spill.gif', width='40%')
)
})
output$goomposal_gif = renderUI({
req(groomsman_id(),
sum(input$yes_button, input$no_button) == 0)
p(
img(src=glue('groomposal_gifs/{groomsman_id()}_groomposal.gif'), width='50%')
)
})
output$response_buttons = renderUI({
req(groomsman_id(),
sum(input$yes_button, input$no_button) == 0)
p(
actionGroupButtons(inputIds = c('yes_button', 'no_button'),
labels = c('yes', 'no'),
direction = 'horizontal',
status='danger')
)
})
output$button_press_response = renderUI({
req(groomsman_id(),
sum(input$yes_button, input$no_button) > 0)
email_subject = glue('{groomsman_id()} groomposal response')
if (input$yes_button > 0) {
send_email(subject = email_subject,
message_text = 'yes',
stop_on_fail = FALSE)
response_gif = img(src='reaction_gifs/happy_michael_scott.gif', width='40%')
} else {
send_email(subject = email_subject,
message_text = 'no',
stop_on_fail = FALSE)
response_gif = img(src='reaction_gifs/bummed_michael_scott.gif', width='30%')
}
p(response_gif)
})
})
|
88c556f232a55684bc9c3de8c03460b041701572
|
1fb167f75fac5fd1438cc442d1e21296c68fe277
|
/combine_data.R
|
6e6a2edc04ed5a48b4b843f4f72a6abc9eb048be
|
[] |
no_license
|
BrianDarmitzel/INFO-201-Group-Project
|
8cf4d3f7b56f0a61d9e0a8d37ee6a90e315a084b
|
6e9eeddaab914b61a1b5b8765d8ce20e94663a6c
|
refs/heads/master
| 2020-08-31T06:42:21.936296
| 2019-12-05T06:02:30
| 2019-12-05T06:02:30
| 218,626,398
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,049
|
r
|
combine_data.R
|
library("dplyr")
library("stringr")
library("plotly")
# load in filtered data set
emissions_data <- read.csv(unz("data/filtered_datasets.zip",
"filtered_datasets/emissions_data.csv"))
fuel_economy_data <- read.csv(
unz("data/filtered_datasets.zip",
"filtered_datasets/vehicles_individual_data.csv"))
# convert values to upper case
fuel_economy_data$make <- str_to_upper(
fuel_economy_data$make)
fuel_economy_data$model <- str_to_upper(
fuel_economy_data$model)
emissions_data$Represented.Test.Vehicle.Model <- str_to_upper(
emissions_data$Represented.Test.Vehicle.Model)
# rename a few columns
emissions_data <- rename(emissions_data,
"Vehicle Manufacturer" = Represented.Test.Vehicle.Make)
fuel_economy_data <- rename(fuel_economy_data,
"Vehicle Manufacturer" = make)
emissions_data <- rename(emissions_data,
"Vehicle Model" = Represented.Test.Vehicle.Model)
fuel_economy_data <- rename(fuel_economy_data,
"Vehicle Model" = model)
brands1 <- emissions_data %>%
group_by(`Vehicle Manufacturer`) %>%
summarize(average_emission_emitted = sum(Emission_Emitted) / n())
brands2 <- fuel_economy_data %>%
group_by(`Vehicle Manufacturer`) %>%
summarize(`Average city MPG` = sum(Average.city.MPG) / n(),
`Average highway MPG` = sum(Average.highway.MPG) / n(),
`Combined MPG` = sum(Combined.MPG) / n(),
`Annual gas Consumption in Barrels` =
sum(Annual.gas.Consumption.in.Barrels) / n(),
`Tailpipe Emissions in g/mi` =
sum(Tailpipe.Emissions.in.g.mi) / n(),
`Annual Fuel Cost` = sum(Annual.Fuel.Cost) / n(),
`Cost Savings for Gas over 5 Years` =
sum(Cost.Savings.for.Gas.over.5.Years) / n())
all_brands <- merge(x = brands1, y = brands2, by = "Vehicle Manufacturer")
all_cars <- merge(x = emissions_data,
y = fuel_economy_data,
by = c("Vehicle Manufacturer", "Vehicle Model")) %>%
rename("Average Emissions Emitted" = Emission_Emitted,
"Average city MPG" = Average.city.MPG,
"Average highway MPG" = Average.highway.MPG,
"Combined MPG" = Combined.MPG,
"Annual gas Consumption in Barrels" =
Annual.gas.Consumption.in.Barrels,
"Tailpipe Emissions in g/mi" = Tailpipe.Emissions.in.g.mi,
"Annual Fuel Cost" = Annual.Fuel.Cost,
"Cost Savings for Gas over 5 Years" =
Cost.Savings.for.Gas.over.5.Years) %>%
select(-Number.of.Models.in.Data)
averages <- lapply(all_cars[3:10], mean)
graph_ranking <- function(car_model, column) {
cars_avg <- all_cars %>%
mutate(result = all_cars[[column]] - averages[[column]])
data <- cars_avg %>% filter(`Vehicle Model` == car_model) %>% select(result)
color_map <- c()
if (data$result < 0) {
if (is.element(column, c("Average city MPG",
"Average highway MPG",
"Combined MPG",
"Cost Savings for Gas over 5 Years"))) {
color_map[colnames(data)] <- c("data" = "red")
} else {
color_map[colnames(data)] <- c("data" = "blue")
}
} else {
if (is.element(column, c("Average city MPG",
"Average highway MPG",
"Combined MPG",
"Cost Savings for Gas over 5 Years"))) {
color_map[colnames(data)] <- c("data" = "blue")
} else {
color_map[colnames(data)] <- c("data" = "red")
}
}
plot_ly() %>%
add_bars(
x = column,
y = as.vector(data$result, mode = "numeric"),
marker = list(color = color_map[[colnames(data)]]),
text = round(as.vector(data$result, mode = "numeric"), 1),
hoverinfo = "message",
textposition = "auto"
) %>%
layout(
yaxis = list(range = c(min(cars_avg[["result"]]),
max(cars_avg[["result"]])))
)
}
|
c122c438af228805083f06f5424b53a7149ffb66
|
0a5cf23a3ef5fb9f36a9051ec59d1215ee62a99c
|
/RScripts/DataVisualization.R
|
914ac74f1d30c477acd5c9f4d9c6e00ca3eb1c59
|
[] |
no_license
|
phanisrikar93/cricScoreR
|
23ec98a5ad17c6e7712ce8ecd43c6df8430c55ed
|
b2d18d8c4f4d79e18484b07e8c9e25c03d834314
|
refs/heads/master
| 2021-01-19T02:39:15.435212
| 2016-07-20T16:57:53
| 2016-07-20T16:57:53
| 63,249,022
| 0
| 1
| null | 2016-07-20T16:57:54
| 2016-07-13T13:40:13
|
R
|
UTF-8
|
R
| false
| false
| 1,462
|
r
|
DataVisualization.R
|
require(lattice)
#To read data file into frame.
FinalDataFrame = read.csv("C:\\Users\\admin\\Downloads\\Use_Case_Dhruv\\BindIPLData.csv")
#Summary of RunsScored column of FinalData Frame.
Summary1 = summary(FinalDataFrame$RunsScored)
Summary1
#Standard deviation of RunsScored column of FinalDataFrame.
sd1 = sd(FinalDataFrame$RunsScored)
sd1
#Summary of Extras column.
Summary2 = summary(FinalDataFrame$Extras)
Summary2
#Standard deviation of Extras column.
sd2 = sd(FinalDataFrame$Extras)
sd2
FinalDataFrame$Over = factor(trunc(FinalDataFrame$Over))
#Histogram of RunsScored with Over as a factor variable.
histogram(~RunsScored | Over,data = FinalDataFrame,type = "count")
#Histogram of Runs Scored with Team as a factor variable.
histogram(~RunsScored | Team,data = FinalDataFrame,type = "percent")
#Histogram of Runs Scored with StrikeBatsman as a factor variable.
histogram(~RunsScored | StrikeBatsman,data = FinalDataFrame,type = "count")
#Histogram of Runs Scored with Bowler as a factor variable.
histogram(~RunsScored | Bowler,data = FinalDataFrame,type = "count")
FinalDataFrame$RunsScored = factor(FinalDataFrame$RunsScored)
histogram(~RunsScored,data = FinalDataFrame,type = "count")
histogram(~RunsScored,data = FinalDataFrame,type = "percent")
#To find correalation between Extras and Runs Scored.
correalation = cov(FinalDataFrame$RunsScored,FinalDataFrame$Extras)/(var(FinalDataFrame$RunsScored)*var(FinalDataFrame$Extras))
correalation
|
4ca0b7ee9f2b701404c8568f3a9e287031c1c05d
|
c7c5813adee3d966baced00501b4f7d15ecc3e4c
|
/man/Rboot.Rd
|
ab9b5be4a9611233c16910948cae8ab2b53cd2ec
|
[] |
no_license
|
E-Caron/slm
|
8f181ce1a03526843f1b4ea1b647186b67145edc
|
a80d9765fda9e29fa3af78c990fea3931199d0f2
|
refs/heads/master
| 2020-06-06T09:43:52.667197
| 2020-01-08T19:08:17
| 2020-01-08T19:08:17
| 192,704,653
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,414
|
rd
|
Rboot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary-fun.R
\name{Rboot}
\alias{Rboot}
\title{Risk estimation for a tapered covariance matrix estimator via bootstrap method}
\usage{
Rboot(epsilon, treshold, block_size, block_n, model_max, kernel_fonc)
}
\arguments{
\item{epsilon}{an univariate process.}
\item{treshold}{number of estimated autocovariance terms that we consider for the estimation of the covariance matrix.}
\item{block_size}{the size of the bootstrap blocks. \code{block_size} must be greater than \code{model_max}.}
\item{block_n}{blocks number used for the bootstrap.}
\item{model_max}{the maximal dimension, that is the maximal number of terms available to estimate the covariance matrix.}
\item{kernel_fonc}{the kernel to use. The user can define his own kernel and put it in the argument.}
}
\value{
This function returns a list with:
\item{risk}{for one treshold, the value of the estimated risk.}
\item{SE}{the standard-error due to the bootstrap.}
}
\description{
This function computes an estimation of the risk for the tapered covariance matrix estimator of a process via a bootstrap method,
for a specified treshold and a specified kernel.
}
\references{
E. Caron, J. Dedecker and B. Michel (2019). Linear regression with stationary errors: the R package slm. \emph{arXiv preprint arXiv:1906.06583}.
\url{https://arxiv.org/abs/1906.06583}.
}
|
73333ab5b9efff43160c08a9094cd68ab8e26426
|
1dd840b99146dbdd57e3b267d6243d695354bdd0
|
/DTF_Create.R
|
3d31bbcb8946136a212c30fdfad21e0491634276
|
[] |
no_license
|
diardelavega/TotalPrediction
|
654e6ca7d4d6f1df3153dd94ab31c7a0790e738b
|
464cafdc346c419ba58d7c34ac7ff82ebfddb4ff
|
refs/heads/master
| 2020-04-12T08:52:37.906304
| 2017-03-04T17:22:23
| 2017-03-04T17:22:23
| 61,332,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,382
|
r
|
DTF_Create.R
|
#page to be used for loading /7 initiating all the data for the creation of DTF obj
os<-Sys.info()["sysname"]; # find the operating system
runAll<- function(trPaths,dtfKind, fld=10, betNoBet='nobet',fff='f5'){
exit <- "DTF_END_OK";
#trPaths is a vector with all the tr paths of the competitions
# dtfKind is a vector with the kind of dtf that we want to create {h,s,p1,p2,ht,ft}
print(paste("trPaths :",trPaths));
print(paste("dtfKind :",dtfKind));
# -0 load the dtf-crfv-estimation files (head,score,1p,2p,ht,ft)
library(methods);
print("methods");
libLoader();
print("libLoader");
DTFLoader();
print("DTFLoader");
predAtt_Loader();
print("predAtt_Loader");
print("@ DataStructLoader");
dataStructLoader();
print("dataStructLoader");
dtfExit <- tryCatch({
#os<-Sys.info()["sysname"];
log <- "/home/user/BastData/R_LOG";# initial log file path is for linux o-systems
if(grepl("win",tolower(os))){
log <- "C:/BastData/R_LOG";
}
print("@ log-> funcs loaded");
write(paste0("DTF..."," ",dtfKind), file = log, ncolumns = 10, append = T, sep = ",")
for(path in trPaths){
write(c("\t", path), file = log, ncolumns = 10, append = T, sep = ",")
# -1 create datasets to work with
dtf <<- read.csv(path);
ndtf <<- diffFunc();
dirNam<- dirmaker(path);# create the folder of the competiton where the dtf object files will be stored
# -2 start the object that will hold the pred data CREATION
tryCatch({
if("h" %in% dtfKind){
print("\t ------: HEAD");
if(!ishead(dirNam)){ # if file doesnt exzist
headCrfvInit(fld,betNoBet,fff); # ret :hDtf calculate
headmaker(dirNam) # store
write("\t HEAD", file = log, ncolumns = 10, append = T, sep = ",")
}}
})
tryCatch({
print("\t ------: SCORE");
if("s" %in% dtfKind){
if(!isscore(dirNam)){
scoreCrfvInit(fld,betNoBet,fff); # ret :csDtf
scoremaker(dirNam)
write("\t SCORE", file = log, ncolumns = 10, append = T, sep = ",")
}}
})
tryCatch({
if("ft" %in% dtfKind){
print("\t ------: FT");
if(!isft(dirNam)){
totFtCrfvInit(fld,betNoBet,fff); # ret :tftDtf
ftmaker(dirNam);
write("\t FT", file = log, ncolumns = 10, append = T, sep = ",")
}}
})
# tryCatch({
# if(mean(dtf$t1AvgHtScoreIn)<=0){
# write("mean(dtf$t1AvgHtScoreIn)<=0", file = log, ncolumns = 10, append = T, sep = ",")
# # average of ht scores is 0 or less (-1) -> no real ht results
# # so skip the ht dtf objects
# next;
# }
# })
tryCatch({
if("p1" %in% dtfKind){
print("\t ------: P1");
if(!isp1(dirNam)){
p1CrfvInit(fld,betNoBet,fff); # ret :p1Dtf
p1maker(dirNam);
write("\t P1", file = log, ncolumns = 10, append = T, sep = ",")
}}
})
tryCatch({
if("p2" %in% dtfKind){
print("\t ------: P2");
if(!isp2(dirNam)){
p2CrfvInit(fld,betNoBet,fff); # ret :p2Dtf
p2maker(dirNam);
write("\t P2", file = log, ncolumns = 10, append = T, sep = ",")
}}
})
tryCatch({
if("ht" %in% dtfKind){
print("\t ------: HT");
if(!isht(dirNam)){
totHtCrfvInit(fld,betNoBet,fff); # ret :thtDtf
htmaker(dirNam);
write("\t HT", file = log, ncolumns = 10, append = T, sep = ",")
}}
})
}
return(exit);
},
error = function(err) {
# error handler picks up where error was generated
#print(paste("MY_ERROR: ",err));
write(paste("\t MY_ERROR: ",err), file = log, ncolumns = 10, append = T, sep = ",")
exit <- "DTF_ERR_END";
return(exit);
},
finally = {
# in case of error save whatever can be saved
#fileMaker(path); # create folder/subfolders & save the dtfs
}) # END tryCatch
return(dtfExit);
}
diffFunc <- function(){
t1adoe <- dtf$t1AtackIn - abs(dtf$t2DefenseOut) # because defence is negative nr
t2adoe <- abs(dtf$t1DefenseIn) - dtf$t2AtackOut
t1e <- dtf$t1Atack - abs(dtf$t2Defense) # because defence is negative nr
t2e <- dtf$t2Atack - abs(dtf$t1Defense)
#----------
datk <- dtf$t1Atack-dtf$t2Atack
datkin <- dtf$t1AtackIn-dtf$t2AtackIn
datkout <- dtf$t1AtackOut-dtf$t2AtackOut
ddef <- dtf$t1Defense-dtf$t2Defense
ddefin <- dtf$t1DefenseIn-dtf$t2DefenseIn
ddefout <- dtf$t1DefenseOut-dtf$t2DefenseOut
doav_ht <- dtf$t1AvgHtScoreIn-dtf$t2AvgHtScoreOut
doav_ft <- dtf$t1AvgFtScoreIn-dtf$t2AvgFtScoreOut
#----------
dav_htin <- dtf$t1AvgHtScoreIn-dtf$t2AvgHtScoreIn
dav_htout <- dtf$t1AvgHtScoreOut-dtf$t2AvgHtScoreOut
dav_ftin <- dtf$t1AvgFtScoreIn-dtf$t2AvgFtScoreIn
dav_ftout <- dtf$t1AvgFtScoreOut-dtf$t2AvgFtScoreOut
owd <- dtf$t1WinsIn-dtf$t2WinsOut
odd <- dtf$t1DrawsIn- dtf$t2DrawsOut
old <- dtf$t1LosesIn - dtf$t2LosesOut
#----------
dwin <- dtf$t1WinsIn-dtf$t2WinsIn
dwout <- dtf$t1WinsOut-dtf$t2WinsOut
ddin <- dtf$t1DrawsIn-dtf$t2DrawsIn
ddout <- dtf$t1DrawsOut-dtf$t2DrawsOut
dlin <- dtf$t1LosesIn-dtf$t2LosesIn
dlout <- dtf$t1LosesOut-dtf$t2LosesOut
pd <- dtf$t1Points-dtf$t2Points
fd <- dtf$t1Form-dtf$t2Form
mfd1<-c()
mfd2<-c()
for(i in 1:dim(dtf)[1]){mfd1[i] <- mean(dtf[i,13],dtf[i,14],dtf[i,15],dtf[i,16])}
for(i in 1:dim(dtf)[1]){mfd2[i] <- mean(dtf[i,39],dtf[i,40],dtf[i,41],dtf[i,42])}
#owd <- dtf$t1WinsIn-dtf$t2WinsOut
#odd <- dtf$t1DrawsIn- dtf$t2DrawsOut
#old <- dtf$t1LosesIn - dtf$t2LosesOut
#----------dtf data
dtf$mfd1 <<-mfd1
dtf$mfd2 <<-mfd2
dtf$odd <<- odd
dtf$old <<- old
dtf$owd <<-owd
#ttdf$mfd1 <-mfd1
#ttdf$mfd2 <-mfd2
#ttdf$odd <- odd
#tdf$old<- old
#tdf$owd <-owd
#----------------
f1d <- dtf[,13]-dtf[,39]
f2d <- dtf[,14]-dtf[,40]
f3d <- dtf[,15]-dtf[,41]
f4d <- dtf[,16]-dtf[,42]
#--------------
ndf <- data.frame(
mfd1,mfd2,pd,fd,
# f1d,f2d,f3d,f4d,
t1adoe,t2adoe,t1e,t2e,
owd,odd,old,
dwin,dwout,ddin,ddout,dlin,dlout,
datk,datkin,datkout,ddef,ddefin,ddefout,
doav_ht,doav_ft,
dav_htin,dav_htout,dav_ftin,dav_ftout
)
ndf$week <- dtf$week
ndf$headOutcome <-dtf$headOutcome
ndf$scoreOutcome<-dtf$scoreOutcome
ndf$ht1pOutcome <-dtf$ht1pOutcome
ndf$ht2pOutcome <-dtf$ht2pOutcome
ndf$ggOutcome <-dtf$ggOutcome
ndf$totHtScore <- dtf$totHtScore
ndf$totFtScore <-dtf$totFtScore
ndf$t1 <-dtf$t1
ndf$t2<-dtf$t2
ndf$bet_1<-dtf$bet_1
ndf$bet_X<-dtf$bet_X
ndf$bet_2<-dtf$bet_2
ndf$bet_O<-dtf$bet_O
ndf$bet_U<-dtf$bet_U
ndf$t1Classification<-dtf$t1Classification
ndf$t2Classification<-dtf$t2Classification
ndf$mfd <- ndf$mfd1-ndf$mfd2
ndf$t1Form <- dtf$t1Form
ndf$t2Form <- dtf$t2Form
ndf$f1d <- f1d
ndf$f2d <- f2d
ndf$f3d <- f3d
ndf$f4d <- f4d
# rm(datk,datkin,datkout,ddef,ddefin,ddefout,doav_ht,doav_ft,dav_htin,dav_htout,
# dav_ftin,dav_ftout, owd,odd,old,dwin,dwout,ddin,ddout,dlin,dlout,pd,fd,mfd1,mfd2,f1d,f2d,f3d,f4d,
# t1adoe,t2adoe,t1e,t2e )
return(ndf);
}
fileMaker <- function(file_path){
# file_path <- patha
fileName<- gsub("Pred/Data","DTF",file_path);
fileName<- gsub("__Data",".dtf.RData",fileName);
pathSegment <- strsplit(fileName,"/")[[1]];
dirName <- paste0(pathSegment[1:length(pathSegment)-1],collapse = "/")
if(!dir.exists(dirName)){
dir.create(dirName,recursive = T,mode = 753)
}
if(dir.exists(dirName)){
#create a new file and sotre the dtf objs created for the competition
save(hDtf,csDtf,p1Dtf,p2Dtf,tftDtf,thtDtf,file=fileName);
}
print(fileName);
}
dirmaker<- function(trPath){
dirName <- gsub("Pred/Data","DTF",trPath);
dirName <- gsub("__Data","",dirName);
if(!dir.exists(dirName)){
dir.create(dirName,recursive = T,mode = 753);
# print(dirName);
}
return(dirName);
}
ishead<-function(dirPath){
headFile<- paste0(dirPath,"/head.dtf.RData");
if(file.exists(headFile)){
return (TRUE);
}
return(FALSE);
}
headmaker<- function(dirPath){
headFile<- paste0(dirPath,"/head.dtf.RData");
save(hDtf,file=headFile);
}
isscore<-function(dirPath){
scoreFile<- paste0(dirPath,"/score.dtf.RData");
if(file.exists(scoreFile)){
return (TRUE);
}
return(FALSE);
}
scoremaker<- function(dirPath){
scoreFile<- paste0(dirPath,"/score.dtf.RData");
save(csDtf,file=scoreFile);
}
isp1<-function(dirPath){
p1File<- paste0(dirPath,"/p1.dtf.RData");
if(file.exists(p1File)){
return (TRUE);
}
return(FALSE);
}
p1maker<- function(dirPath){
p1File<- paste0(dirPath,"/p1.dtf.RData");
save(p1Dtf,file=p1File);
}
isp2<-function(dirPath){
p2File<- paste0(dirPath,"/p2.dtf.RData");
if(file.exists(p2File)){
return (TRUE);
}
return(FALSE);
}
p2maker<- function(dirPath){
p2File<- paste0(dirPath,"/p2.dtf.RData");
save(p2Dtf,file=p2File);
}
isht<-function(dirPath){
htFile<- paste0(dirPath,"/ht.dtf.RData");
if(file.exists(htFile)){
return (TRUE);
}
return(FALSE);
}
htmaker<- function(dirPath){
htFile<- paste0(dirPath,"/ht.dtf.RData");
save(thtDtf,file=htFile);
}
isft<-function(dirPath){
ftFile<- paste0(dirPath,"/ft.dtf.RData");
if(file.exists(ftFile)){
return (TRUE);
}
return(FALSE);
}
ftmaker<- function(dirPath){
ftFile<- paste0(dirPath,"/ft.dtf.RData");
save(tftDtf,file=ftFile);
}
remover <- function(){
# aparently we dont need to use remove afterall, we are not saving the entire workspace just the DTF objs
# remove all unwanted vars, save only the dtf objs
rm(dtf,ndf,DTFLoader,predAtt_Loader,diffFunc);
#
DTFRemover();
dataStructRemover();
predAtt_Remover();
libRemover();
}
libLoader <- function(){
library(plyr)
library(e1071) #svm
library(C50)
library(randomForest)
library(ipred)
library(RWeka)
library(rpart)
library(tree)
}
libRemover <- function(){
detach(package:plyr, unload=TRUE)
detach(package:e1071, unload=TRUE)
detach(package:C50, unload=TRUE)
detach(package:randomForest, unload=TRUE)
detach(package:ipred, unload=TRUE)
detach(package:RWeka, unload=TRUE)
detach(package:rpart, unload=TRUE)
detach(package:tree, unload=TRUE)
}
DTFLoader <- function(){
base<-"/home/user/Git"; #initial base directory is for linux
if(grepl("win",tolower(os))){
base <- "C:";
}
source(paste0(base,"/TotalPrediction/HeadCrfvEstimation.R"));
source(paste0(base,"/TotalPrediction/ScoreCrfvEstimation.R"));
source(paste0(base,"/TotalPrediction/P1CrfvEstimation.R"));
source(paste0(base,"/TotalPrediction/P2CrfvEstimation.R"));
source(paste0(base,"/TotalPrediction/totHtScoreCrfvEstimation.R"));
source(paste0(base,"/TotalPrediction/totFtScoreCrfvEstimation.R"));
}
DTFRemover <- function(){
rm(headCrfvInit,headPredFunc,headTreBestChoser,headCrfv);
rm(scoreCrfvInit,scorePredFunc,scoreTreBestChoser,scoreCrfv);
rm(p2CrfvInit,p2PredFunc,p2TreBestChoser,p2Crfv);
rm(p1CrfvInit,p1PredFunc,p1TreBestChoser,p1Crfv);
rm(fulltotFtBet,fullTotFtNoBet,differencedTotFtBet,differencedTotFtNoBet)
rm(totHtCrfvInit,totHtPredFunc,totHtTreBestChoser,totHtCrfv);
}
predAtt_Loader <- function(){
base<-"/home/user/Git"; #initial base directory is for linux
if(grepl("win",tolower(os))){
base <- "C:";
}
source(paste0(base,"/TotalPrediction/Head_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/Score_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/P1_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/P2_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/totFt_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/totHt_AttPredDataset.R"));
}
predAtt_Remover <- function(){
rm(fullHeadBet,fullHeadNoBet,differencedHeadBet,differencedHeadNoBet)
rm(fullScoreBet,fullScoreNoBet,differencedScoreBet,differencedScoreNoBet)
rm(full2pBet,full2pNoBet,differenced2pBet,differenced2pNoBet)
rm(full1pBet,full1pNoBet,differenced1pBet,differenced1pNoBet)
rm(totFtCrfvInit,totFtPredFunc,totFtTreBestChoser,totFtCrfv)
rm(fullTotHtBet,fullTotHtNoBet,differencedTotHtBet,differencedTotHtNoBet);
}
dataStructLoader <- function(){
base<-"/home/user/Git"; #initial base directory is for linux
if(grepl("win",tolower(os))){
base <- "C:";
}
# the file with the description of the structure of the DTF obj
source(paste0(base,"/TotalPrediction/dataStructure.R"));
}
dataStructRemover <- function(){
rm(Instance,AlgoData,CleanScoreDtf,CleanHeadDtf,Clean2pDtf,Clean1pDtf,CleanTotFtDtf,CleanTotHtDtf)
rm(modelFunc,attDtsFunc,scoreResultCount,headResultCount,p2ResultCount,p1ResultCount,totFtResultCount,totHtResultCount);
}
test <- function(v, vec){
base<-"/home/user/Git"; #initial base directory is for linux
if(grepl("win",tolower(os))){
base <- "C:";
}
dir_nam <- paste0(base,'/ff1/ff2/ff3/ff5');
fil_nam <- paste(dir_nam,'marioFile.mar',sep="/");
dir.create(dir_nam,recursive = T,mode = 753)
for(i in 1:v){
write(i,fil_nam,append=TRUE);
}
for(i in 1:length(vec)){
write(vec[i],fil_nam,append=TRUE);
}
return(v+4);
}
#----------------------------------------------------
#----------------------------------------------------
#----------------------------------------------------
|
2fa9516d586ad18b5c5faf49c31ae5dd6d792710
|
56ea0cd71b0982ca04835a6e7179f00ab1ff94d8
|
/DutchessStarter.R
|
60af26b48047e9d1503214698e929ac2ddffc634
|
[] |
no_license
|
alizzo/Traffic_Data_HVHackathon
|
bb4f617e9819c06669dfef91c25d36d13e32f3a2
|
5d91ec7968078e66e8a9a231f87b36ea53b47767
|
refs/heads/main
| 2023-09-01T21:53:49.524240
| 2021-10-30T19:25:08
| 2021-10-30T19:25:08
| 422,969,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,826
|
r
|
DutchessStarter.R
|
# Load Libraries
library(tidyverse)
library(sf)
library(leaflet)
library(viridis)
# Read in the Shape File
municipalities <- read_sf("./dutchesscounty4326/muni_boundaries-polygon.shp")
# Read in the geospatial data for Dutchess County Fire & EMS Stations.
fire_ems <- read_csv("fire_ems_stations-point.csv") %>%
st_as_sf(coords = c("X", "Y"),
crs = 4326, agr = "field")
# Read in the geospatial data for Dutchess County Police Stations.
police <- read_csv("police_stations.csv") %>%
st_as_sf(coords = c("X", "Y"),
crs = 4326, agr = "field")
# Transform the data
capwords <- function(s) {
cap <- function(s) paste(toupper(substring(s, 1, 1)),
{tolower(substring(s, 2))},
sep = "", collapse = " ")
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
# Municipalities dataset transformation to proper projection system (4326 or WGS 84)
municipalities_clean <- municipalities %>%
st_transform(4326) %>%
mutate(municipalities_label = paste0('<b>Location:</b> ',
capwords(NAME)))
fire_ems_clean <- fire_ems %>%
rename(ANA = `ANA`) %>%
mutate(popup_label = paste(paste0('<b>City: ', MCN, '</b>'),
paste0('Address: ', AAD),
sep = '<br/>'))
police_clean <- police %>%
mutate(popup_label = paste(paste0('<b>Dept.: ', DEPARTMENT, '</b>'),
paste0('Address: ', LOCATION),
sep = '<br/>'))
# Build the Map Using Leaflet
fire_ems_clean$MCN <- factor(sample.int(5L, nrow(fire_ems_clean), TRUE))
factpal <- colorFactor(topo.colors(5), fire_ems_clean$MCN)
map <-leaflet(fire_ems) %>%
addTiles(group="Dutchess County") %>%
addPolygons(data = municipalities_clean,
color = 'white',
weight = 1.5,
opacity = 1,
fillColor = 'black',
fillOpacity = .8,
highlightOptions = highlightOptions(color = "#FFF1BE",
weight = 5),
popup = ~municipalities_label) %>%
addCircleMarkers(data = fire_ems_clean,
popup = ~popup_label,
stroke = F,
radius = 4,
fillColor = ~factpal(MCN),
fillOpacity = 1,
group = "Fire-EMS") %>%
addMarkers(data = police_clean,
popup = ~popup_label,
group = "Police") %>%
addLayersControl(
baseGroups = c("Dutchess County"),
overlayGroups = c("Fire-EMS", "Police"),
options=layersControlOptions(collapsed=FALSE))
map
|
74fa2efe453a83b8096ab45ba9e8de7595e0f224
|
bfde25295f3330b108ba93d5f90e71382627c639
|
/A2_submission.R
|
823ed92a98cfaf131aee71613f4fb8c2617e40b2
|
[] |
no_license
|
jgow68/CDA2
|
84630ec4ab76c339980e7fb3287acce3b2559cf6
|
6a18358dee4a37a4d743326cbd176f6d98f5937a
|
refs/heads/master
| 2021-01-22T22:20:19.133256
| 2017-03-30T06:20:27
| 2017-03-30T06:20:27
| 85,531,396
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,724
|
r
|
A2_submission.R
|
# Q1 ----------------------------------------------------------------------
dat = data.frame(interview=c("no", "yes"), cases=c(195,46), controls=c(979,370))
dat = data.frame(med_status=c("no", "yes"), cases=c(979,195), controls=c(370,46)) # cases is non-participation
dat
# Q1 - use med status as predictor ----------------------------------------
fm = glm(cbind(cases,controls) ~ med_status, family=binomial, data=dat)
summary(fm) # med status coef is significant
anova(fm, test="Chisq") # med status coef is significant, there is a relationship between participation and medical aid
exp(coef(fm)[2]) # odds of non-participation from mothers having med status is 1.602 higher than non med-status
# calculate CI of log odds, then transform back
lor_CI = coef(fm)[2] + c(-1,1)*qnorm(0.975)*sqrt(diag(vcov(fm))[2])
exp(lor_CI) # CI does not include 1, med-status is a risk factor for participation
dat_white = data.frame(med_status=c("no", "yes"), cases=c(104,22), controls=c(10,2))
dat_black = data.frame(med_status=c("no", "yes"), cases=c(91,957), controls=c(36,368))
(fm_white = glm(cbind(cases,controls) ~ med_status, family=binomial, data=dat_white))
(fm_black = glm(cbind(cases,controls) ~ med_status, family=binomial, data=dat_black))
# both shows that med-status doesnt affect participation
anova(fm_white, test="Chisq")
anova(fm_black, test="Chisq")
# participation may have higher dependence on black / white race instead on med_status
dat_race = data.frame(race=c("black", "white"), cases=c(1048, 126), controls=c(404, 12))
(fm_race = glm(cbind(cases,controls) ~ race, family=binomial, data=dat_race))
anova(fm_race, test="Chisq") # zero residual deviance, coef race is significant
dat_race_nomed = data.frame(race=c("black", "white"), cases=c(957, 22), controls=c(368, 22))
(fm_race_nomed = glm(cbind(cases,controls) ~ race, family=binomial, data=dat_race_nomed))
anova(fm_race_nomed, test="Chisq")
dat_race_med = data.frame(race=c("black", "white"), cases=c(91, 104), controls=c(36, 10))
(fm_race_med = glm(cbind(cases,controls) ~ race, family=binomial, data=dat_race_med))
anova(fm_race_med, test="Chisq")
# Q1 - initial use interview as predictor ---------------------------------
fm = glm(cbind(cases,controls) ~ interview, family=binomial, data=dat)
summary(fm) # interview coef is significant
anova(fm, test="Chisq") # interview coef is significant, there is a relationship between participation and medical aid
exp(coef(fm)[2]) # odds of having medical aid from interviewed mothers is 0.624 times the odds of not interviewed
# calculate CI of log odds, then transform back
lor_CI = coef(fm)[2] + c(-1,1)*qnorm(0.975)*sqrt(diag(vcov(fm))[2])
exp(lor_CI) # CI does not include 1, participation is a risk factor for medical aid involvment
plot(cases/(cases+controls) ~ interview, data=dat)
plot(predict(fm))
dat_white = data.frame(interview=c("no", "yes"), cases=c(104,10), controls=c(22,2))
dat_black = data.frame(interview=c("no", "yes"), cases=c(91,36), controls=c(957,368))
(fm_white = glm(cbind(cases,controls) ~ interview, family=binomial, data=dat_white))
(fm_black = glm(cbind(cases,controls) ~ interview, family=binomial, data=dat_black))
# both shows that participation doesnt affect the medical aid status
anova(fm_white, test="Chisq")
anova(fm_black, test="Chisq")
# med aid may have higher dependence on black / white race instead on participation
dat_race = data.frame(race=c("black", "yes"), cases=c(127, 114), controls=c(1325, 24))
(fm_race = glm(cbind(cases,controls) ~ race, family=binomial, data=dat_race))
anova(fm_race, test="Chisq") # zero residual deviance
# Q2 ----------------------------------------------------------------------
data_set2 = read.csv("task2.csv", header=T) #show the tas2.csv layout
str(data_set2)
# Q2a ---------------------------------------------------------------------
# smoking and family as response, rest as explanatory variables
# min model is (family, smoking, race:sex:age)
fm = glm(Count ~ (.)^5, data_set2, family=poisson())
summary(fm)
drop1(fm, test="Chisq")
fm = update(fm, .~. -Family:Race:Sex:Age:Smoking_I) # drop 5 way interaction (p-value 0.05644)
drop1(fm, test="Chisq") # drop Family:Race:Sex:Age, p-value=0.7923
fm = update(fm, .~. -Family:Race:Sex:Age)
drop1(fm, test="Chisq") # drop Family:Race:Sex:Smoking_I, p-value 0.5966
fm = update(fm, .~. -Family:Race:Sex:Smoking_I)
drop1(fm, test="Chisq") # drop Family:Race:Sex, p-value 0.7459
fm = update(fm, .~. -Family:Race:Sex)
drop1(fm, test="Chisq") # drop ace:Sex:Age:Smoking_I, p-value 0.61798
fm = update(fm, .~. -Race:Sex:Age:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Race:Sex:Smoking_I, pva=0.57454
fm = update(fm, .~. -Race:Sex:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Family:Sex:Age:Smoking_I, pva=0.34809
fm = update(fm, .~. -Family:Sex:Age:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Sex:Age:Smoking_I , pva=0.81672
fm = update(fm, .~. -Sex:Age:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Family:Sex:Age , pva=0.29404
fm = update(fm, .~. -Family:Sex:Age)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Family:Sex:Smoking_I , pva=0.12656
fm = update(fm, .~. -Family:Sex:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Family:Sex , pva=0.46289
fm = update(fm, .~. -Family:Sex)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Sex:Smoking_I, pva=0.1887
fm = update(fm, .~. -Sex:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Family:Race:Age:Smoking_I, pva=0.0847
fm = update(fm, .~. -Family:Race:Age:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Family:Age:Smoking_I, pva=0.95417
fm = update(fm, .~. -Family:Age:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Family:Race:Age , pva=0.8473
fm = update(fm, .~. -Family:Race:Age )
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Race:Age:Smoking_I , pva=0.2882
fm = update(fm, .~. -Race:Age:Smoking_I )
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Family:Age , pva=0.26258
fm = update(fm, .~. -Family:Age)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Family:Race:Smoking_I, pva=0.05113
fm = update(fm, .~. -Family:Race:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, drop Race:Smoking_I, pva=0.450285
fm = update(fm, .~. -Race:Smoking_I)
drop1(fm, test="Chisq") # can't drop Race:Sex:Age, cant drop any more
# Final model: Count ~ Family + Race + Sex + Age + Smoking_I + Family:Race +
# Family:Smoking_I + Race:Sex + Race:Age + Sex:Age + Age:Smoking_I + Race:Sex:Age
pchisq(fm$deviance, fm$df.residual, lower.tail=F) # pval 0.334 reject H0, model is adequate
# state the conditional independence structure in the selected model
# Q2b ---------------------------------------------------------------------
# smoking as response, rest as explanatory variables
# min model is (smoking, family:race:sex:age)
fm = glm(Count ~ (.)^5, data_set2, family=poisson())
summary(fm)
drop1(fm, test="Chisq")
fm = update(fm, .~. -Family:Race:Sex:Age:Smoking_I) # drop 5 way interaction (p-value 0.05644)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Family:Race:Sex:Smoking_I, pval 0.6502
fm = update(fm, .~. -Family:Race:Sex:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Race:Sex:Age:Smoking_I, pval 0.64504
fm = update(fm, .~. -Race:Sex:Age:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Race:Sex:Smoking_I , pval 0.53462
fm = update(fm, .~. -Race:Sex:Smoking_I )
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Family:Sex:Age:Smoking_I , pval 0.36547
fm = update(fm, .~. -Family:Sex:Age:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Sex:Age:Smoking_I , pval 0.79750
fm = update(fm, .~. -Sex:Age:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Family:Sex:Smoking_I, pval 0.10118
fm = update(fm, .~. -Family:Sex:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Sex:Smoking_I , pval 0.1994
fm = update(fm, .~. -Sex:Smoking_I )
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Family:Race:Age:Smoking_I, pval 0.0847
fm = update(fm, .~. -Family:Race:Age:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Family:Age:Smoking_I, pval 0.95417
fm = update(fm, .~. -Family:Age:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Race:Age:Smoking_I , pval 0.2845
fm = update(fm, .~. -Race:Age:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Family:Race:Smoking_I, pval 0.05025
fm = update(fm, .~. -Family:Race:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Race:Smoking_I , pval 0.46187
fm = update(fm, .~. -Race:Smoking_I )
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , drop Age:Smoking_I , pval 0.05157
fm = update(fm, .~. -Age:Smoking_I)
drop1(fm, test="Chisq") # cannot drop Family:Race:Sex:Age , cant drop any other predictors
summary(fm)
pchisq(fm$deviance, fm$df.residual, lower.tail=F) # pval 0.079 reject H0, model is adequate
# residuals test
library(boot)
fm.diag = glm.diag(fm)
round(ftable(xtabs(fm.diag$rp ~ Family + Race + Sex + Age + Smoking_I, data=data_set2)),2)
# overest smokers from mother family, black, female, age<13
# underest smokers from mother family, black, female, age>13
cbind(data_set2$Count,fitted(fm)) # check the fitted values against data
# Count ~ Family + Race + Sex + Age + Smoking_I + Family:Race +
# Family:Sex + Family:Age + Family:Smoking_I + Race:Sex + Race:Age + Sex:Age + Family:Race:Sex +
# Family:Race:Age + Family:Sex:Age + Race:Sex:Age + Family:Race:Sex:Age
tmp = xtabs(Count ~ Family + Race + Sex + Age + Smoking_I, data=data_set2)
tmp
ftable(tmp)
Race_Smoking = apply(tmp, c("Race", "Smoking_I"), sum)
Family_Smoking = apply(tmp, c("Family", "Smoking_I"), sum)
Sex_Smoking = apply(tmp, c("Sex", "Smoking_I"), sum)
Age_Smoking = apply(tmp, c("Age", "Smoking_I"), sum)
library(vcd)
or_race_smoking = oddsratio(Race_Smoking, log=F) # odds of white kids smoking is 0.9894 times the odds of black kids
or_family_smoking = oddsratio(Family_Smoking, log=F) # odds of children from family with mother only to smoke is 1.7467 times the odds of children from family with both parents
or_sex_smoking = oddsratio(Sex_Smoking, log=F) # odds of Male smoking is 0.8322 times the odds of Females?? SURPRISING!!
or_age_smoking = oddsratio(Age_Smoking, log=F) # odds of children >=13 smoking is 1.496 times the odds of childre <12
# sample size dominated by both parents, white
confint(or_race_smoking) # CI incl. 1
confint(or_family_smoking) # CI do not incl. 1
confint(or_sex_smoking) # CI incl. 1
confint(or_age_smoking) # CI do not incl. 1
# family and age significantly affects smoking patterns at 95% confidence level
# state the logit model equivalent to the selected loglinear model
# prepare the logit data set
dat.logit = cbind(expand.grid(A=levels(data_set2$Age), S=levels(data_set2$Sex),# need to relevel Sex, default set Female as first level
R=levels(data_set2$Race), F=levels(data_set2$Family)),
SN=data_set2$Count[data_set2$Smoking_I=="none"], SS=data_set2$Count[data_set2$Smoking_I=="some"])
data_set2$Sex = relevel(data_set2$Sex, "Male")
dat.logit
fm.logit = glm(cbind(SN, SS) ~ F+R+S+A, dat.logit, family=binomial)
fm.logit$deviance; fm$deviance
summary(fm.logit)$call;summary(fm)$call # logit much simpler than log linear
update(fm.logit, .~. -A-R-S, dat.logit )$deviance; fm$deviance # matched log linear deviance
|
6b73ad3a1e1cc5a75fa2003406c68c7a11aeaec4
|
8b1d00ae218ab6b4c300083fb179bbc3d441e221
|
/scripts/topGO_vignetteTrial.R
|
f0b921b2cce9449de5b65572defe60bf79654420
|
[] |
no_license
|
davetgerrard/LiverProteins
|
79d1aa8ee265b27c6b161b22e189a4cba891dca6
|
d99c0adab3c71579151d72eb7a5ebb65921209d9
|
refs/heads/master
| 2021-01-01T17:58:07.123517
| 2011-08-05T15:12:10
| 2011-08-05T15:12:10
| 1,941,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,976
|
r
|
topGO_vignetteTrial.R
|
#########################################
# #
# Dave Gerrard #
# University of Manchester #
# 2011 #
# #
#########################################
library(topGO)
library(ALL)
data(ALL)
data(geneList)
affyLib <- paste(annotation(ALL), "db", sep = ".")
library(package = affyLib, character.only = TRUE)
sum(topDiffGenes(geneList))
sampleGOdata <- new("topGOdata", description = "Simple session", ontology = "BP",
allGenes = geneList, geneSel = topDiffGenes, nodeSize = 10, annot = annFUN.db,
affyLib = affyLib)
sampleGOdata
resultFisher <- runTest(sampleGOdata, algorithm = "classic", statistic = "fisher")
resultFisher
resultKS <- runTest(sampleGOdata, algorithm = "classic", statistic = "ks")
resultKS.elim <- runTest(sampleGOdata, algorithm = "elim", statistic = "ks")
allRes <- GenTable(sampleGOdata, classicFisher = resultFisher, classicKS = resultKS,
elimKS = resultKS.elim, orderBy = "elimKS", ranksOf = "classicFisher",
topNodes = 10)
pValue.classic <- score(resultKS)
pValue.elim <- score(resultKS.elim)[names(pValue.classic)]
gstat <- termStat(sampleGOdata, names(pValue.classic))
gSize <- gstat$Annotated/max(gstat$Annotated) * 4
gCol <- colMap(gstat$Significant)
colMap <- function(x) {
.col <- rep(rev(heat.colors(length(unique(x)))), time = table(x))
return(.col[match(1:length(x), order(x))])
}
plot(pValue.classic, pValue.elim, xlab = "p-value classic", ylab = "p-value elim",
pch = 19, cex = gSize, col = gCol)
gCol <- colMap(gstat$Significant)
plot(pValue.classic, pValue.elim, xlab = "p-value classic", ylab = "p-value elim",
pch = 19, cex = gSize, col = gCol)
BPterms <- ls(GOBPTerm)
head(BPterms)
library(genefilter)
selProbes <- genefilter(ALL, filterfun(pOverA(0.2, log2(100)), function(x) (IQR(x) >
0.25)))
eset <- ALL[selProbes, ]
selProbes
eset
geneID2GO <- readMappings(file="C:/Users/dave/Documents/R/win-library/2.11/topGO/examples/ensembl2go.map") ## replaced the file name here
geneID2GO
str(head(geneID2GO))
GO2geneID <- inverseList(geneID2GO)
str(head(GO2geneID))
geneNames <- names(geneID2GO)
head(geneNames)
myInterestingGenes <- sample(geneNames, length(geneNames)/10)
geneList <- factor(as.integer(geneNames %in% myInterestingGenes))
names(geneList) <- geneNames
str(geneList)
GOdata <- new("topGOdata", ontology = "MF", allGenes = geneList, annot = annFUN.gene2GO,
gene2GO = geneID2GO)
GOdata
y <- as.integer(sapply(eset$BT, function(x) return(substr(x, 1, 1) == "T")))
table(y)
y
geneList <- getPvalues(exprs(eset), classlabel = y, alternative = "greater")
geneList
topDiffGenes <- function(allScore) {
return(allScore < 0.01)
}
x <- topDiffGenes(geneList)
sum(x)
GOdata <- new("topGOdata", description = "GO analysis of ALL data; B-cell vs T-cell",
ontology = "BP", allGenes = geneList, geneSel = topDiffGenes, annot = annFUN.db,
nodeSize = 5, affyLib = affyLib)
#############more detritus
#######THIS TRIAL SECTION WORKS
library(topGO)
library(org.Hs.eg.db)
go2entrez <- annFUN.org("BP", mapping = "org.Hs.eg.db", ID = "Entrez")
allGenes <- unique(unlist(go2entrez ))
someGenes <- sample(allGenes,400)
geneList <- 1:length(someGenes)
names(geneList) <- someGenes
GOdata <- new("topGOdata",
description = "Liver proteins data set",
ontology = "BP",
allGenes = geneList,
geneSelectionFun = topDiffGenes,
nodeSize = 5,
annot = annFUN.org,
mapping = "org.Hs.eg.db",
ID = "Entrez")
test.stat <- new("classicScore", testStatistic = GOKSTest, name = "KS tests")
resultKS <- getSigGroups(GOdata, test.stat)
test.stat <- new("elimScore", testStatistic = GOKSTest, name = "Fisher test", cutOff = 0.01)
resultElim <- getSigGroups(GOdata, test.stat)
allRes <- GenTable(GOdata, KS = resultKS , elim=resultElim, orderBy = "elim", ranksOf = "KS", topNodes=20)
#################
#prot_list <- list()
prot_list <- unique(protGoMap$DB_Object_ID)
listGoPerProt <- function(dataFrame,thisProt) {
dataFrame$GO_ID[which(dataFrame$DB_Object_ID == thisProt)]
}
prot2go <- lapply(prot_list,FUN = function (x) listGoPerProt(protGoMap,x))
names(prot2go) <- prot_list
go2prot <- inverseList(prot2go)
prot2go <- inverseList(go2prot)
#topGoToProt <- annFUN.GO2genes("BP", feasibleGenes = NULL, go2prot)
#topGoProtToGo.BP <- annFUN.gene2GO("BP", feasibleGenes = NULL, prot2go )
allGenes <- unique(names(prot2go ))
someGenes <- sample(allGenes,400)
geneList <- 1:length(someGenes)
names(geneList) <- someGenes
#geneList <- 1:length(prot_list)
#names(geneList) <- prot_list
GOdata <- new("topGOdata",
description = "Liver proteins data set",
ontology = "BP",
allGenes = geneList,
geneSelectionFun = topDiffGenes,
nodeSize = 5,
annot = annFUN.GO2genes,
GO2genes=go2prot
)
test.stat <- new("classicScore", testStatistic = GOKSTest, name = "KS tests")
resultKS <- getSigGroups(GOdata, test.stat)
test.stat <- new("elimScore", testStatistic = GOKSTest, name = "Fisher test", cutOff = 0.01)
resultElim <- getSigGroups(GOdata, test.stat)
allRes <- GenTable(GOdata, KS = resultKS , elim=resultElim, orderBy = "elim", ranksOf = "KS", topNodes=20)
######DEVELOPMENT
sampleGOdata <- new("topGOdata", description = "Simple session", ontology = "BP",
allGenes = geneList, geneSel = topDiffGenes, nodeSize = 10, annot = annFUN.db,
affyLib = affyLib)
library(org.Hs.eg.db) # not sure if this is already loaded.
raw.ent2up <- org.Hs.egUNIPROT
# Get the entrez gene IDs that are mapped to a Uniprot ID
mapped_genes <- mappedkeys(raw.ent2up)
# Convert to a list
ent2up <- as.list(raw.ent2up[mapped_genes])
#ent2up [1:5]
#inverseList(ent2up) ## could use this to get entrez IDs from Uniprot?
up2entrez <- inverseList(ent2up)
up2entrez[[proteinByLiverSample$spAccession]]
up2entrez[proteinByLiverSample$spAccession]
## some proteins have multiple gene identifiers.
go2entrez <- annFUN.org("BP", mapping = "org.Hs.eg.db", ID = "Entrez")
head(go2entrez)
en2go <- inverseList(go2entrez)
##
BPterms <- ls(GOBPTerm) # loaded with topGO
GOdata <- new("topGOdata", ontology = "MF", allGenes = geneList, annot = annFUN.gene2GO,
gene2GO = geneID2GO)
y <- as.integer(sapply(eset$BT, function(x) return(substr(x, 1, 1) == "T")))
allGenes <- unique(unlist(go2entrez ))
#myInterestedGenes <- sample(allGenes, 500)
#geneList <- factor(as.integer(allGenes
#names(geneList) <- allGenes
################# From Using GO
GOTERM$"GO:0019083"
GOBPANCESTOR$"GO:0019083"
GOBPPARENTS$"GO:0019083"
GOMFCHILDREN$"GO:0019083"
GOTERM$"GO:0019084"
GOTERM$"GO:0019085"
GOTERM$"GO:0006350"
GOTERM$"GO:0019080"
GOTERM$"GO:0000003"
GOBPPARENTS$"GO:0019084"
GOTERM$"GO:0009299"
GOTERM$"GO:0019083"
#######THIS TRIAL SECTION WORKS
library(topGO)
library(org.Hs.eg.db)
go2entrez <- annFUN.org("BP", mapping = "org.Hs.eg.db", ID = "Entrez")
allGenes <- unique(unlist(go2entrez ))
topDiffGenes <- function(allScore) {
return(allScore > 1)
}
someGenes <- sample(allGenes,400)
geneList <- 1:length(someGenes)
names(geneList) <- someGenes
GOdata <- new("topGOdata",
description = "Liver proteins data set",
ontology = "BP",
allGenes = geneList,
geneSelectionFun = topDiffGenes,
nodeSize = 5,
annot = annFUN.org,
mapping = "org.Hs.eg.db",
ID = "Entrez")
test.stat <- new("classicScore", testStatistic = GOKSTest, name = "KS tests")
resultKS <- getSigGroups(GOdata, test.stat)
test.stat <- new("elimScore", testStatistic = GOKSTest, name = "Fisher test", cutOff = 0.01)
resultElim <- getSigGroups(GOdata, test.stat)
allRes <- GenTable(GOdata, KS = resultKS , elim=resultElim, orderBy = "elim", ranksOf = "KS", topNodes=20)
#goID <- allRes[1, "GO.ID"]
#print(showGroupDensity(GOdata, goID, ranks = TRUE)) ## doesn't work unless Bioconductor annotated chip.
#showSigOfNodes(GOdata, score(resultKS), firstSigNodes = 5, useInfo = "all")
#showSigOfNodes(GOdata, score(resultWeight), firstSigNodes = 5, useInfo = "def")
#printGraph(GOdata, resultKS, firstSigNodes = 5, fn.prefix = "tGO", useInfo = "all", pdfSW = TRUE)
#printGraph(GOdata, resultElim, firstSigNodes = 5, fn.prefix = "tGO", useInfo = "def", pdfSW = TRUE)
#####END OF TRIAL SECTION
##############from the vignette
library(topGO)
library(ALL)
data(ALL)
data(geneList)
affyLib <- paste(annotation(ALL), "db", sep = ".")
library(package = affyLib, character.only = TRUE)
sum(topDiffGenes(geneList))
sampleGOdata <- new("topGOdata", description = "Simple session", ontology = "BP",
allGenes = geneList, geneSel = topDiffGenes, nodeSize = 10, annot = annFUN.db,
affyLib = affyLib)
sampleGOdata
resultFisher <- runTest(sampleGOdata, algorithm = "classic", statistic = "fisher")
resultFisher
resultKS <- runTest(sampleGOdata, algorithm = "classic", statistic = "ks")
resultKS.elim <- runTest(sampleGOdata, algorithm = "elim", statistic = "ks")
allRes <- GenTable(sampleGOdata, classicFisher = resultFisher, classicKS = resultKS,
elimKS = resultKS.elim, orderBy = "elimKS", ranksOf = "classicFisher",
topNodes = 10)
pValue.classic <- score(resultKS)
pValue.elim <- score(resultKS.elim)[names(pValue.classic)]
gstat <- termStat(sampleGOdata, names(pValue.classic))
gSize <- gstat$Annotated/max(gstat$Annotated) * 4
gCol <- colMap(gstat$Significant)
colMap <- function(x) {
.col <- rep(rev(heat.colors(length(unique(x)))), time = table(x))
return(.col[match(1:length(x), order(x))])
}
plot(pValue.classic, pValue.elim, xlab = "p-value classic", ylab = "p-value elim",
pch = 19, cex = gSize, col = gCol)
gCol <- colMap(gstat$Significant)
plot(pValue.classic, pValue.elim, xlab = "p-value classic", ylab = "p-value elim",
pch = 19, cex = gSize, col = gCol)
BPterms <- ls(GOBPTerm)
head(BPterms)
library(genefilter)
selProbes <- genefilter(ALL, filterfun(pOverA(0.2, log2(100)), function(x) (IQR(x) >
0.25)))
eset <- ALL[selProbes, ]
selProbes
eset
geneID2GO <- readMappings(file="C:/Users/dave/Documents/R/win-library/2.11/topGO/examples/ensembl2go.map") ## replaced the file name here
geneID2GO
str(head(geneID2GO))
GO2geneID <- inverseList(geneID2GO)
str(head(GO2geneID))
geneNames <- names(geneID2GO)
head(geneNames)
myInterestingGenes <- sample(geneNames, length(geneNames)/10)
geneList <- factor(as.integer(geneNames %in% myInterestingGenes))
names(geneList) <- geneNames
str(geneList)
GOdata <- new("topGOdata", ontology = "MF", allGenes = geneList, annot = annFUN.gene2GO,
gene2GO = geneID2GO)
GOdata
y <- as.integer(sapply(eset$BT, function(x) return(substr(x, 1, 1) == "T")))
table(y)
y
geneList <- getPvalues(exprs(eset), classlabel = y, alternative = "greater")
geneList
topDiffGenes <- function(allScore) {
return(allScore < 0.01)
}
x <- topDiffGenes(geneList)
sum(x)
GOdata <- new("topGOdata", description = "GO analysis of ALL data; B-cell vs T-cell",
ontology = "BP", allGenes = geneList, geneSel = topDiffGenes, annot = annFUN.db,
nodeSize = 5, affyLib = affyLib)
|
b063efeaeca24f59b0cd0f142f50d067bbb23248
|
aa103303a64aac3a17160833d8136cd6a7bd21a4
|
/LinearAlgebra/Unit_01/vectors.R
|
5cf3d6339f621f19e74c782d034708e6f431e69e
|
[] |
no_license
|
anhnguyendepocen/MSDS-Supervised-Learning
|
4e9ed791d686eae2bb7519a563ad05f9b5c98b5d
|
67dc32d564bdcd498137620f1efbbf2445a87364
|
refs/heads/master
| 2022-02-22T08:08:05.342690
| 2019-10-11T23:08:32
| 2019-10-11T23:08:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 63
|
r
|
vectors.R
|
m <- matrix(c(1, 2, -1, 3, -4, 5, 1, -8, 7), nrow = 3)
det(m)
|
63c063b794ab78a9ccb77b1ce78a2720bf542650
|
7dd0f3d19b98750e34d2dfa62533cbd50fa18db5
|
/plot5.R
|
132937d2abedd8ebc32979857bb41048e6541510
|
[] |
no_license
|
gregorypierce/datascience-exdata-courseproject2
|
e551f29fb5ee9b87ffd24526efcf03ef569a39ec
|
4445d60d26a6b30c37fa6f5f6e168ff0b3da9e56
|
refs/heads/master
| 2016-09-01T20:51:24.448542
| 2015-07-25T05:37:54
| 2015-07-25T05:37:54
| 39,669,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,090
|
r
|
plot5.R
|
library(ggplot2)
## set the project Directory
projectDirectory <- "/projects/datascience/exploraratorydata/courseproject2"
dataDirectory <- paste0( projectDirectory,"/data")
setwd( projectDirectory )
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS( paste0( dataDirectory, "/summarySCC_PM25.rds" ) )
SCC <- readRDS( paste0( dataDirectory, "/Source_Classification_Code.rds") )
## Parse out a logical vector of the vehicle data
vehicles <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case = TRUE)
vehiclesSCC <- SCC[vehicles,]$SCC
vehiclesNEI <- NEI[NEI$SCC %in% vehiclesSCC,]
## Extract the data specifically for baltimore
baltimoreNEI <- vehiclesNEI[vehiclesNEI$fips==24510,]
baltimoreNEIPlot <- ggplot(baltimoreNEI, aes(factor(year), Emissions)) +
geom_bar( stat = "identity", fill="grey") +
theme_bw() +
guides(fill=FALSE) +
labs( x="Year", y=expression("Total PM2.5 Emissions (Tons)")) +
labs( title=expression("PM2.5 Motor Vehicle Source Emissions from 1999 - 2008 (Baltimore)"))
print( baltimoreNEIPlot )
dev.copy(png, 'plot5.png')
dev.off()
|
0abead7ffa99418df8fc1dfbc532d93b0649eeff
|
97e3baa62b35f2db23dcc7f386ed73cd384f2805
|
/inst/app/app.R
|
d2dbc951cbdf95b988aebd26bf6b3eb7496089f0
|
[] |
no_license
|
conservation-decisions/smsPOMDP
|
a62c9294fed81fcecc4782ac440eb90a299bca44
|
48b6ed71bdc7b2cb968dc36cd8b2f18f0e48b466
|
refs/heads/master
| 2021-06-25T22:23:31.827056
| 2020-10-27T08:56:07
| 2020-10-27T08:56:07
| 161,746,931
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,169
|
r
|
app.R
|
source("helper.R")
## UI ####
ui <- shinydashboard::dashboardPage(
title = "POMDP solver: When to stop managing or surveying cryptic threatened species ?",
# HEADER #############################
shinydashboard::dashboardHeader(
title = "smsPOMDP",
shiny::tags$li(
a(
strong("Building an app"),
href = "guidance.pdf",
height = 40,
title = "",
target = "_blank"
),
class = "dropdown"
),
shiny::tags$li(
a(
strong("ABOUT smsPOMDP"),
height = 40,
href = "https://github.com/conservation-decisions/smsPOMDP",
title = "",
target = "_blank"
),
class = "dropdown"
)
),
# SIDEBAR #####################
shinydashboard::dashboardSidebar(disable = TRUE),
# BODY ##############################
shinydashboard::dashboardBody(
shiny::tags$head(shiny::tags$style(shiny::HTML(".shiny-output-error-validation
{color: red; font-size: large; font-weight: bold;}"))),
shiny::fluidRow(
#POMDP PARAMETERS ####
shiny::tags$div(class = "another-box", id = "primariy2",
shinydashboard::box(
title = "POMDP parameters", width = 3, solidHeader = TRUE, status = "primary",
# Probabilities ####
shiny::h3("Probabilities"),
bsplus::shinyInput_label_embed(
shiny::numericInput('p0', 'Local probability of persistence (if survey or stop)',
min = 0, max = 1, value = 0.9),
bsplus::bs_attach_modal(bsplus::shiny_iconlink(),id_modal = "modal_p0")
),
bsplus::shinyInput_label_embed(
shiny::numericInput('pm', 'Local probability of persistence (if manage)',
min = 0, max = 1, value = 0.94184),
bsplus::bs_attach_modal(bsplus::shiny_iconlink(),
id_modal = "modal_pm")
),
bsplus::shinyInput_label_embed(
shiny::numericInput('d0', 'Local probability of detection (if stop)',
min = 0, max = 1, value = 0.01),
bsplus::bs_attach_modal(
bsplus::shiny_iconlink(),
id_modal = "modal_d0")
),
bsplus::shinyInput_label_embed(
shiny::numericInput('dm', 'Local probability of detection (if manage)',
min = 0, max = 1, value = 0.01),
bsplus::bs_attach_modal(bsplus::shiny_iconlink(),
id_modal = "modal_dm")
),
bsplus::shinyInput_label_embed(
shiny::numericInput('ds', 'Local probability of detection (if survey)',
min = 0, max = 1, value = 0.78193),
bsplus::bs_attach_modal(
bsplus::shiny_iconlink(),
id_modal = "modal_ds")
),
# Costs ####
shiny::h3("Costs"),
bsplus::shinyInput_label_embed(
shiny::numericInput('V', 'Estimated economic value of the species ($/yr)',
value = 175.133),
bsplus::bs_attach_modal(bsplus::shiny_iconlink(),
id_modal = "modal_V")
),
bsplus::shinyInput_label_embed(
shiny::numericInput('Cm', 'Estimated cost of managing ($/yr)',
value = 18.784),
bsplus::bs_attach_modal(
bsplus::shiny_iconlink(),
id_modal = "modal_Cm")
),
bsplus::shinyInput_label_embed(
shiny::numericInput('Cs', 'Estimated cost of surveying ($/yr)',
min = 0, value = 10.840),
bsplus::bs_attach_modal(
bsplus::shiny_iconlink() ,
id_modal = "modal_Cs")
),
# Case studies ####
shiny::h3("Case studies"),
bsplus::shinyInput_label_embed(
shiny::selectInput("case_study", "Select case study",
choices = c("Sumatran tiger",
"Expensive management",
"Detection in management")),
bsplus::bs_attach_modal(
bsplus::shiny_iconlink(),
id_modal = "modal_case_study")
),
shiny::actionButton("reload", "Reload parameters")
),
shiny::tags$style(shiny::HTML("
#primariy2 .box.box-solid.box-primary>.box-header {
color:#fff;
background:#666666
}
.box.box-solid.box-primary {
border-bottom-color:#666666;
border-left-color:#666666;
border-right-color:#666666;
border-top-color:#666666;
}
"))
),
# Plot parameters ####
shinydashboard::box(width = 3,
bsplus::shinyInput_label_embed(
shiny::numericInput("initial_belief", "Initial belief state",
value = 1, min = 0, max = 1),
bsplus::bs_attach_modal(
bsplus::shiny_iconlink(),
id_modal = "modal_initial_belief")
)
),
shinydashboard::box(width = 3,
bsplus::shinyInput_label_embed(
shiny::numericInput('Tmanage', "Duration of past data (time steps)",
value = 5, min = 0),
bsplus::bs_attach_modal(
bsplus::shiny_iconlink(),
id_modal = "modal_Tmanage")
)
),
shinydashboard::box(width = 3,
bsplus::shinyInput_label_embed(
shiny::numericInput('Tsim', "Duration of simulation (time steps)",
value = 10, min = 0, max = 20),
bsplus::bs_attach_modal(
bsplus::shiny_iconlink(),
id_modal = "modal_Tsim")
)
),
# plots####
shinydashboard::box(width = 9,
bsplus::bs_attach_modal(bsplus::shiny_iconlink(),
id_modal = "modal_gif"),
"Choose actions performed in the past",
plotly::plotlyOutput("plot_actions", height = "350px")),
shinydashboard::box(width = 9,
"Choose observations following the actions",
plotly::plotlyOutput("plot_observations", height = "300px")),
shinydashboard::box(width = 9,
"Explore discounted expected rewards over time",
plotly::plotlyOutput("plot_reward", height = "300px"))
# add modals ####
, modal_p0
, modal_pm
, modal_d0
, modal_dm
, modal_ds
, modal_V
, modal_Cm
, modal_Cs
, modal_initial_belief
, modal_Tmanage
, modal_Tsim
, modal_case_study
, modal_gif
# activate tooltips, popovers, and MathJax ####
, bsplus::use_bs_tooltip()
, bsplus::use_bs_popover()
, shiny::withMathJax()
)
)
)
# SERVER ###############################
server <- function(input, output, session){
#Inputs #####
p0 <- shiny::reactive({
shiny::validate(shiny::need(input$p0 >=0 & input$p0 <=1 , "Please select local probability of persistence (if survey or stop) between 0 and 1") )
input$p0
})
pm <- shiny::reactive({
shiny::validate( shiny::need(input$pm >=0 & input$pm<=1 , "Please select local probability of persistence (if manage) between 0 and 1") )
input$pm
})
d0 <- shiny::reactive({
shiny::validate( shiny::need(input$d0 >=0 & input$d0 <=1 , "Please select local probability of detection (if stop) between 0 and 1") )
input$d0
})
dm <- shiny::reactive({
shiny::validate( shiny::need(input$dm >=0 & input$dm <=1 , "Please select local probability of detection (if manage) between 0 and 1") )
input$dm
})
ds <- shiny::reactive({
shiny::validate( shiny::need(input$ds >=0 & input$ds <=1 , "Please select local probability of detection (if survey) between 0 and 1") )
input$ds
})
V <- shiny::reactive({
shiny::validate( shiny::need(input$V >=0 , "Please select estimated economic value of the species ($/yr) positive") )
input$V
})
Cm <- shiny::reactive({
shiny::validate( shiny::need(input$Cm >=0, "Please select estimated cost of managing ($/yr) positive") )
input$Cm
})
Cs <- shiny::reactive({
shiny::validate( shiny::need(input$Cs >=0, "Please select estimated cost of survey ($/yr) positive") )
input$Cs
})
Tmanage <- shiny::reactive({
shiny::validate( shiny::need(input$Tmanage >=0, "Please select horizon of past management positive") )
input$Tmanage
})
init_belief <- shiny::reactive({
shiny::validate( shiny::need(input$initial_belief >=0 & input$initial_belief <=1 , "Please select initial belief state (extant) between 0 and 1") )
c(input$initial_belief, 1-input$initial_belief)
}) #initial belief state
Tsim <- shiny::reactive({
shiny::validate( shiny::need(input$Tsim >=0, "Please select a positive duration of simulation") )
input$Tsim
})
Tplot <- shiny::reactive({
shiny::validate( shiny::need(input$Tsim >=0, "Please select a positive duration of simulation") )
max(10, input$Tsim)
})
# Treat inputs #####
input_past <- shiny::reactiveValues(
data_actions=c(),
actions = c(),
data_observations = c(),
observations = c(),
belief_extant = isolate({matrix(init_belief(), ncol = 2)}),
rewards = c()
)
data_action_reactive <- shiny::reactive({
return(input_past$data_actions)
})
data_observation_reactive <- shiny::reactive({
return(input_past$data_observations)
})
actions_past <- shiny::reactive({input_past$actions})
observations_past <- shiny::reactive({input_past$observations})
# change Tmanage ####
shiny::observeEvent(Tmanage(), {
if (input$Tmanage == 0){
input_past$data_actions <- c()
input_past$actions <- c()
input_past$data_observations <- c()
input_past$observations <- c()
input_past$belief_extant <- matrix(init_belief(), ncol = 2)
return()
}
if (is.null(input_past$data_actions)) {
actions <- c("Stop","Survey","Manage")
time_steps <- seq_len(Tmanage())
tab <- expand.grid(actions, time_steps)
tab$color <- "Off"
names(tab) <- c("action", "step", "color")
input_past$data_actions <- tab
} else {
data <- input_past$data_actions
Tmax <- max(data$step)
diff <- Tmanage()-Tmax
if (diff > 0){
actions <- c("Stop","Survey","Manage")
time_steps <- seq(Tmax+1,Tmanage())
tab <- expand.grid(actions, time_steps)
tab$color <- "Off"
names(tab) <- c("action", "step", "color")
input_past$data_actions <- rbind(data, tab)
} else {
input_past$data_actions <- data[which(data$step <= Tmanage()),]
}
}
if (is.null(input_past$data_observations)) {
obs <- c("Not seen","Seen")
time_steps <- seq_len(Tmanage())
tab2 <- expand.grid(obs, time_steps)
tab2$color <- "Off"
names(tab2) <- c("obs", "step", "color")
input_past$data_observations <- tab2
} else {
data <- input_past$data_observations
Tmax <- max(data$step)
diff <- Tmanage()-Tmax
if (diff > 0){
obs <- c("Not seen","Seen")
time_steps <- seq(Tmax+1,Tmanage())
tab <- expand.grid(obs, time_steps)
tab$color <- "Off"
names(tab) <- c("obs", "step", "color")
input_past$data_observations <- rbind(data, tab)
} else {
input_past$data_observations <- data[which(data$step <= Tmanage()),]
}
}
if ( (Tmanage()-length(input_past$actions)) < 0){
input_past$actions <- input_past$actions[seq_len(Tmanage())]
}
if ( (Tmanage()-length(input_past$observations)) < 0){
input_past$observations <- input_past$observations[seq_len(Tmanage())]
}
})
# click on action plot ####
shiny::observeEvent(plotly::event_data("plotly_click", source = "A"),{
d <- plotly::event_data("plotly_click", source = "A")
if (is.null(d)){
return()
}
isolate({
x <- d$x + Tmanage() +1
y <- d$y
tab <-input_past$data_actions
tab[which(tab$step == x),]$color <- "Off"
tab[which((tab$step == x)&(tab$action == y)),]$color <- "On"
input_past$data_actions <- tab
input_past$actions[x] <-d$y
})
})
# click on observation plot ####
shiny::observeEvent(plotly::event_data("plotly_click", source = "O"),{
d <- plotly::event_data("plotly_click", source = "O")
if (is.null(d)){
return()
}
isolate({
x <- d$x + Tmanage() +1
y <- d$y
tab <-input_past$data_observations
tab[which(tab$step == x),]$color <- "Off"
tab[which((tab$step == x)&(tab$obs == y)),]$color <- "On"
input_past$data_observations <- tab
input_past$observations[x] <-d$y
})
})
# beliefs ####
#reactive list of beliefs in the past, matrix
beliefs <- shiny::reactive({
if (length(observations_past()) != length(actions_past())
|any(is.na(observations_past()))
|any(is.na(actions_past())) ){
return(input_past$belief_extant)
} else {
input_past$belief_extant <- smsPOMDP::compute_belief_list(p0(), pm(), d0(), dm(), ds(),
V(), Cm(), Cs(),init_belief(),
actions_past(), observations_past())
return(input_past$belief_extant)
}
})
#reactive vector of current belief
current_belief <- shiny::reactive({
b <- beliefs()
return(b[nrow(b),])
# input_past$belief_extant[nrow(input_past$belief_extant), ]
})
# rewards ####
#reactive vector of rewards
rewards <- shiny::reactive({
if (length(observations_past()) != length(actions_past())
|any(is.na(observations_past()))
|any(is.na(actions_past())) ){
return(input_past$rewards)
} else {
input_past$rewards <- smsPOMDP::reward_belief(p0(), pm(), d0(), dm(), ds(),
V(), Cm(), Cs(),beliefs(), actions_past())
return(input_past$rewards)
}
})
#reactive datasets of simulations ####
data_sim <- shiny::reactive({smsPOMDP::simulations_tab(p0(), pm(), d0(), dm(), ds(), V(), Cm(), Cs(), current_belief(), Tsim())})
data_sim_ref <- shiny::reactive({smsPOMDP::simulations_tab(p0(), pm(), d0(), dm(), ds(), V(), Cm(), Cs(), init_belief(), Tmanage()+Tsim())})
# Plots ####
#optimal solution plot ####
optimal_solution <- shiny::reactive({
log_dir <- tempdir()
infile <- paste0(log_dir, "/optimal_sol.png")
png(infile, width = 1280, height = 720, units = "px")
smsPOMDP::graph(p0(), pm(), d0(), dm(),
ds(), V(), Cm(), Cs(), current_belief(),
size = 2)
dev.off()
png::readPNG(infile)
})
#actions plot ####
output$plot_actions <- plotly::renderPlotly({
xaxis <- list(
title = "Time steps",
autotick = FALSE,
ticks = "outside",
dtick = 1,
range = c(-Tmanage()-1, Tplot()+2)
)
yaxis <- list(type = "category",
categoryorder ="array",
categoryarray = c("Stop", "Survey", "Manage"),
title = "Actions")
if (Tmanage() >0){
if (length(unique(data_action_reactive()$color))==1){colors_palette = c("grey")} else {colors_palette = c("grey","red")}
if ((length(observations_past()) == length(actions_past()))
& (length(observations_past()) == Tmanage())){
plotly::plot_ly(
height = 350, source = "A") %>%
plotly::add_trace(x=~step-Tmanage()-1, y=~action, hoverinfo="text",text=~action,
data = data_action_reactive(), type = 'scatter',
mode = 'markers',
marker = list(size = 20, opacity = 0.8) ,
color = ~color,
colors = colors_palette,
showlegend = FALSE) %>%
plotly::layout( xaxis = xaxis,yaxis=yaxis, showlegend = TRUE
, images = list(
source = plotly::raster2uri(as.raster(optimal_solution())),
x = (Tmanage()+1)/(Tmanage()+Tplot()), y = 0.5,
sizex = (Tplot()-1)/(Tmanage()+Tplot()), sizey = 0.9,
xref = "paper", yref = "paper",
xanchor = "left", yanchor = "middle"
)
, legend = list(orientation = 'h', y = 1.1)
, margin = list(l=100, r=100)
)
} else {
plotly::plot_ly(
height = 350, source = "A") %>%
plotly::add_trace(x=~step-Tmanage()-1, y=~action, hoverinfo="text",text=~action,
data = data_action_reactive(), type = 'scatter',
mode = 'markers',
marker = list(size = 20, opacity = 0.8) ,
color = ~color,
colors = colors_palette,
showlegend = FALSE) %>%
plotly::layout( xaxis = xaxis,yaxis=yaxis, showlegend = FALSE
, margin = list(l=100, r=100)
)
}
} else if (Tmanage()==0){
xaxis <- list(
title = "Time steps",
autotick = FALSE,
ticks = "outside",
dtick = 1,
range = c(-Tmanage()-1, Tplot() + 2)
)
yaxis <- list(type = "category", categoryorder ="array",
categoryarray = c("Stop", "Survey", "Manage"),
visible = FALSE)
plotly::plot_ly(
height = 350, source = "A") %>%
plotly::layout( xaxis = xaxis,yaxis=yaxis, showlegend = TRUE
, images = list(
source = plotly::raster2uri(as.raster(optimal_solution())),
x = (Tmanage()+1)/(Tmanage()+Tplot()), y = 0.5,
sizex = (Tplot()-1)/(Tmanage()+Tplot()), sizey = 0.9,
xref = "paper", yref = "paper",
xanchor = "left", yanchor = "middle"
)
, legend = list(orientation = 'h', y = 1.1)
, margin = list(l=100, r=100)
)
}
})
#observations plot ####
output$plot_observations<- plotly::renderPlotly({
xaxis <- list(
title = "Time steps",
autotick = FALSE,
ticks = "outside",
dtick = 1,
range = c(-Tmanage()-1, Tplot() + 2)
)
yaxis <- list(
type = "category",
categoryorder ="array",
categoryarray = c("Not seen", "Seen"),
title = "Observations"
)
ay <- list(
overlaying = "y",
side = "right",
title = "b(extant)",
range = c(-0.3, 1.3)
)
if (Tmanage()>0){
b <- beliefs()
extant <- c(b[,1])
xtime <- seq(1,length(extant))-Tmanage()-1
if (length(unique(data_observation_reactive()$color))==1){colors_palette = c("grey")} else {colors_palette = c("grey","red")}
if ((length(observations_past()) == length(actions_past()))
& (length(observations_past()) == Tmanage())){
plotly::plot_ly(
height = 300, source = "O") %>%
plotly::add_trace(x=~step-Tmanage()-1, y=~obs, hoverinfo="text",text=~obs,
data = data_observation_reactive(), type = 'scatter',
mode = 'markers',
marker = list(size = 20, opacity = 0.8) ,
color= ~color,
colors = colors_palette,
showlegend = FALSE) %>%
#simulations from present
plotly::add_trace(x=seq(0, Tsim()), y=~mean_belief, name = "User's optimal future trajectory b(extant)",
data=data_sim(), yaxis = "y2", type = 'scatter',
mode = 'lines+markers', visible = TRUE,
line = list(color = "green"),
marker =list(color = "green"),
colors = "green") %>%
plotly::add_ribbons(x=seq(0, Tsim()), ymin=~low_belief, ymax=~up_belief,
name="User's optimal future trajectory b(extant) 95%",
line=list(color="green", opacity=0.4, width=0),
data = data_sim(), yaxis = "y2", visible = TRUE,
fillcolor ="rgba(0,255,0,0.2)", showlegend = FALSE) %>%
#current belief
plotly::add_trace(x=xtime, y=extant, name = "User's current b(extant)",
visible = TRUE, yaxis = "y2",
type = 'scatter', mode = 'lines+markers',
showlegend = TRUE, line = list(color = "red"),
marker =list(color = "red"), colors = "red") %>%
#simulations from reference point
plotly::add_trace(x=seq(-Tmanage(), Tsim()), y=~mean_belief, name = "Optimal trajectory b(extant)",
data=data_sim_ref(), yaxis = "y2",
type = 'scatter', mode = 'lines+markers',
visible = TRUE, line = list(color = "blue"),
marker =list(color = "blue"), colors="blue") %>%
plotly::add_ribbons(x=seq(-Tmanage(), Tsim()), ymin=~low_belief, ymax=~up_belief,
name="Optimal trajectory b(extant) 95%", line=list(color="blue",
opacity=0.4, width=0),
data = data_sim_ref(), yaxis = "y2",
visible = TRUE, showlegend = FALSE, fillcolor ="rgba(0,0,255,0.2)") %>%
plotly::layout( xaxis = xaxis, yaxis=yaxis,
yaxis2 = ay, legend = list(orientation = 'h',
# y = -0.3,
x=0)
, margin = list(l=100, r=100)
)
} else {
plotly::plot_ly(
height = 300, source = "O") %>%
plotly::add_trace(x=~step-Tmanage()-1, y=~obs, hoverinfo="text",text=~obs,
data = data_observation_reactive(), type = 'scatter',
mode = 'markers',
marker = list(size = 20, opacity = 0.8) ,
color= ~color,
colors = colors_palette,
showlegend = FALSE) %>%
#current belief
plotly::add_trace(x=xtime, y=extant, name = "User's current b(extant)",
visible = TRUE, yaxis = "y2",
type = 'scatter', mode = 'lines+markers',
showlegend = TRUE, line = list(color = "red"),
marker =list(color = "red"), colors = "red"
) %>%
#simulations from reference point
plotly::add_trace(x=seq(-Tmanage(), Tsim()), y=~mean_belief, name = "Optimal trajectory b(extant)",
data=data_sim_ref(), yaxis = "y2",
type = 'scatter', mode = 'lines+markers',
visible = TRUE, line = list(color = "blue"),
marker =list(color = "blue"), colors = "blue") %>%
plotly::add_ribbons(x=seq(-Tmanage(), Tsim()), ymin=~low_belief, ymax=~up_belief,
name="Optimal trajectory b(extant) 95%", line=list(color="blue",
opacity=0.4, width=0),
data = data_sim_ref(), yaxis = "y2", fillcolor = "rgba(0,0,255,0.2)",
visible = TRUE, showlegend = FALSE) %>%
plotly::layout( xaxis = xaxis, yaxis=yaxis,
yaxis2 = ay, legend = list(orientation = 'h',
# y = -0.3,
x=0)
, margin = list(l=100, r=100)
)
}
} else {
plotly::plot_ly(
height = 300, source = "O") %>%
#simulations from reference point
plotly::add_trace(x=seq(-Tmanage(), Tsim()), y=~mean_belief, name = "Optimal trajectory b(extant)",
data=data_sim_ref(), yaxis = "y2",
type = 'scatter', mode = 'lines+markers',
visible = TRUE, line = list(color = "blue"),
marker =list(color = "blue"), colors="blue") %>%
plotly::add_ribbons(x=seq(-Tmanage(), Tsim()), ymin=~low_belief, ymax=~up_belief,
name="Optimal trajectory b(extant) 95%", line=list(color="blue",
opacity=0.4, width=0),
data = data_sim_ref(), yaxis = "y2",
visible = TRUE, showlegend = FALSE, fillcolor ="rgba(0,0,255,0.2)") %>%
plotly::layout( xaxis = xaxis, yaxis=yaxis,
yaxis2 = ay, legend = list(orientation = 'h',
# y = -0.3,
x=0)
, margin = list(l=100, r=100)
)
}
})
# rewards plot ####
output$plot_reward <- plotly::renderPlotly({
if (Tmanage()>0){
if (length(rewards())>=1){
xtime <- seq(1,length(rewards()))-Tmanage()-1
xaxis <- list(
autotick = FALSE,
title = "Time steps",
ticks = "outside",
dtick = 1,
range = c(-Tmanage()-1, Tplot() + 2)
)
yaxis <-list(
title = "Expected reward"
)
if ((length(observations_past()) == length(actions_past()))
& (length(observations_past()) == Tmanage())){
plotly::plot_ly(height = 300, source = "R") %>%
#simulations from present
plotly::add_lines(x=seq(0, Tsim()), y=~mean_reward,
data=data_sim(), name = "User's optimal future expected reward",
visible = TRUE, line = list(color = "green"),
marker =list(color = "green"), colors="green") %>%
plotly::add_ribbons(x=seq(0, Tsim()), ymin=~low_reward, ymax=~up_reward,
name="Reward 95%", data = data_sim(),
line=list(color="green", opacity=0.4, width=0),
visible = TRUE, showlegend = FALSE,
fillcolor ="rgba(0,255,0,0.2)") %>%
#current instant reward
plotly::add_trace(x=c(xtime, 0), y=c(rewards(), data_sim()$mean_reward[1]),
name = "User's current expected reward",
visible = TRUE, type = 'scatter',
mode = 'lines+markers',
showlegend = TRUE, line = list(color = "red"),
marker =list(color = "red"), colors = "red") %>%
#simulations from reference point
plotly::add_lines(x=seq(-Tmanage(), Tsim()), y=~mean_reward,
data=data_sim_ref(), name = "Optimal expected reward",
line = list(color = "blue"),
marker =list(color = "blue"), colors="blue") %>%
plotly::add_ribbons(x=seq(-Tmanage(), Tsim()), ymin=~low_reward, ymax=~up_reward,
name="Ref reward 95%", data = data_sim_ref(),
line=list(color="blue", opacity=0.4, width=0),
visible = TRUE, showlegend = FALSE,
fillcolor ="rgba(0,0,255,0.2)") %>%
plotly::layout( xaxis = xaxis, yaxis=yaxis,
showlegend = TRUE, legend = list(orientation = 'h'
#, y = -0.4
)
, margin = list(l=100, r=100)
)
} else {
plotly::plot_ly(height = 300, source = "R") %>%
#current instant reward
plotly::add_trace(x=xtime, y=rewards(), name = "User's current expected reward",
visible = TRUE, type = 'scatter',
mode = 'lines+markers',
showlegend = TRUE, line = list(color = "red"),
marker =list(color = "red"), colors = "red") %>%
#simulations from reference point
plotly::add_lines(x=seq(-Tmanage(), Tsim()), y=~mean_reward,
data=data_sim_ref(), name = "Optimal expected reward",
line = list(color = "blue"),
marker =list(color = "blue"), colors="blue") %>%
plotly::add_ribbons(x=seq(-Tmanage(), Tsim()), ymin=~low_reward, ymax=~up_reward,
name="Ref reward 95%", data = data_sim_ref(),
line=list(color="blue", opacity=0.4, width=0),
visible = TRUE, showlegend = FALSE,
fillcolor ="rgba(0,0,255,0.2)") %>%
plotly::layout( xaxis = xaxis, yaxis=yaxis,
showlegend = TRUE, legend = list(orientation = 'h'
#, y = -0.4
)
, margin = list(l=100, r=100))
}
} else {
xaxis <- list(
autotick = FALSE,
title = "Time steps",
ticks = "outside",
dtick = 1,
range = c(-Tmanage()-1, Tplot() + 2)
)
yaxis <-list(
title = "Expected reward"
)
plotly::plot_ly(height = 300, source = "R") %>%
#simulations from reference point
plotly::add_lines(x=seq(-Tmanage(), Tsim()), y=~mean_reward,
data=data_sim_ref(), name = "Optimal expected reward",
line = list(color = "blue"),
marker =list(color = "blue"), colors="blue") %>%
plotly::add_ribbons(x=seq(-Tmanage(), Tsim()), ymin=~low_reward, ymax=~up_reward,
name="Ref reward 95%", data = data_sim_ref(),
line=list(color="blue", opacity=0.4, width=0),
visible = TRUE, showlegend = FALSE,
fillcolor ="rgba(0,0,255,0.2)") %>%
plotly::layout( xaxis = xaxis, yaxis=yaxis,
showlegend = TRUE, legend = list(orientation = 'h'
#, y = -0.4
)
, margin = list(l=100, r=100)
)
}
} else {
xaxis <- list(
autotick = FALSE,
title = "Time steps",
ticks = "outside",
dtick = 1,
range = c(-Tmanage()-1, Tplot() + 2)
)
yaxis <-list(
title = "Expected reward"
)
plotly::plot_ly(height = 300, source = "R") %>%
#simulations from reference point
plotly::add_lines(x=seq(-Tmanage(), Tsim()), y=~mean_reward,
data=data_sim_ref(), name = "Optimal expected reward",
line = list(color = "blue"),
marker =list(color = "blue"), colors="blue") %>%
plotly::add_ribbons(x=seq(-Tmanage(), Tsim()), ymin=~low_reward, ymax=~up_reward,
name="Ref reward 95%", data = data_sim_ref(),
line=list(color="blue", opacity=0.4, width=0),
visible = TRUE, showlegend = FALSE,
fillcolor ="rgba(0,0,255,0.2)") %>%
plotly::layout( xaxis = xaxis, yaxis=yaxis,
showlegend = TRUE, legend = list(orientation = 'h'
#, y = -0.4
)
, margin = list(l=100, r=100)
)
}
})
# reload values depending on the case of study ################
shiny::observeEvent(input$reload, {
if (input$case_study == "Sumatran tiger"){
shiny::updateNumericInput(session, 'p0', value = 0.9)
shiny::updateNumericInput(session, 'pm', value = 0.94184)
shiny::updateNumericInput(session, 'd0', value = 0.01)
shiny::updateNumericInput(session, 'dm', value = 0.01)
shiny::updateNumericInput(session, 'ds', value = 0.78193)
shiny::updateNumericInput(session, 'V', value = 175.133)
shiny::updateNumericInput(session, 'Cm', value = 18.784)
shiny::updateNumericInput(session, 'Cs', value = 10.840)
} else if (input$case_study == "Expensive management"){
shiny::updateNumericInput(session, 'p0', value = 0.9)
shiny::updateNumericInput(session, 'pm', value = 0.94184)
shiny::updateNumericInput(session, 'd0', value = 0.01)
shiny::updateNumericInput(session, 'dm', value = 0.01)
shiny::updateNumericInput(session, 'ds', value = 0.78193)
shiny::updateNumericInput(session, 'V', value = 200)
shiny::updateNumericInput(session, 'Cm', value = 50)
shiny::updateNumericInput(session, 'Cs', value = 25)
} else if (input$case_study == "Detection in management"){
shiny::updateNumericInput(session, 'p0', value = 0.9)
shiny::updateNumericInput(session, 'pm', value = 0.94184)
shiny::updateNumericInput(session, 'd0', value = 0.01)
shiny::updateNumericInput(session, 'dm', value = 0.5)
shiny::updateNumericInput(session, 'ds', value = 0.78193)
shiny::updateNumericInput(session, 'V', value = 175.133)
shiny::updateNumericInput(session, 'Cm', value = 18.784)
shiny::updateNumericInput(session, 'Cs', value = 10.840)
}
})
}
shiny::shinyApp(ui, server)
|
cc6f94771e418cbfc5308c03ff0490c11592ee4c
|
f936ecec924cd1a5f430dd01e2540767a7df29e8
|
/R-ML/prepocx.R
|
8d317e79ca21f4f9cccfb76bef4897819e4b8cab
|
[] |
no_license
|
PiscatorX/Project-Roger-Dodger
|
b4359d79e7234e8385b4d6115b14b2a6524e888e
|
e402876c7aa3c6e92036fcb8dfbba872887e5e33
|
refs/heads/master
| 2023-07-29T12:22:00.861078
| 2021-09-09T07:23:32
| 2021-09-09T07:23:32
| 292,900,383
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,597
|
r
|
prepocx.R
|
library(tidyverse)
library(magrittr)
library(ggplot2)
library(ggpubr)
library(dplyr)
zeolite = read.table("zeolitex_final.tsv", sep = "\t", header = T)
colnames(zeolite) %>% data.frame()
zeolite <- zeolite %>% mutate_all(na_if,"")
##################### Multicollinearity Analysis ###############################
#http://www.sthda.com/english/articles/39-regression-model-diagnostics/160-multicollinearity-essentials-and-vif-in-r/
#A VIF value that exceeds 5 or 10 indicates a problematic amount of collinearity (James et al. 2014)
#James, Gareth, Daniela Witten, Trevor Hastie, and Robert Tibshirani. 2014. An Introduction to Statistical Learning: With Applications in R. Springer Publishing Company, Incorporated.
set.seed(1)
# [1] "Adsorbent" "SA" "Vmicro" "Vmeso" "pore.size"
# [6] "Si_Al" "Ag" "Ce" "Cu" "Ni"
# [11] "Zn" "La" "Cs" "Pd" "Nd"
# [16] "adsorbate" "C_start" "solvent" "Batch_Dynamic" "Oil_adsorbent_ratio"
# [21] "Temp" "Capacity"
zeolite_subset <- zeolite %>%
select(Adsorbent, Capacity) %>% #,Vmicro,Vmeso,pore.size,Si_Al,C_start,solvent,Oil_adsorbent_ratio,Temp)
filter
#C_start,solvent,Oil_adsorbent_ratio,Temp,Capacity
training_samples <- zeolite_subset %>% sample_frac(size = 0.8)
zeolite_subset$Adsorbent <- gsub('_', '-', zeolite_subset$Adsorbent)
simone1 <- lm(Adsorbent ~ Capacity, data = zeolite_subset)
summary(training_samples)
head(training_samples)
colnames(training_samples)
nrow(training_samples)
#training_samples %>% as_tibble() %>% View()
test_data <- setdiff(zeolite_subset, training_samples,Vmeso)
model1 <- lm(Capacity ~ ., data = training_samples)
factor(training_samples$Adsorbent)
levels(test_data$Adsorbent)
predictions <- model1 %>% predict(test_data)
# ################################################################################
#
# N <- nrow(zeolite)
#
# missing <- zeolite %>% summarise_all(funs(100*sum(is.na(.))/N)) %>% data.frame() %>% round(2)
#
# #Inspect missing data
# t(missing)
#
# #write to file to keep a record
# write.table(missing, "zeolite.miss", sep="\t", row.names = F, quote = F)
#
# #Adsorbetcounts
# Adsorbent_analysis <-zeolite %>% group_by(Adsorbent) %>% summarise(Count=n()) %>% arrange(desc(Count))
#
# p <- ggbarplot(Adsorbent_analysis, x = "Adsorbent", y = "Count",
# fill = "Count",
# sort.by.groups = FALSE,
# sort.val = "asc")
#
# ggpar(p, x.text.angle = 45)
#
# Adsorbent_analysis %>% filter(Count<=2) %>% arrange(Adsorbent) %>% data.frame()
# #Singletone maybe typos
# #eg Ag-Y vs AgY
# #What about CsY could it be CeY
#
# #Missing hyphens may be an issue
# #Count duplicates when hyphen are removed
#
# gsub("-", "", Adsorbent_analysis$Adsorbent) %>%
# as.tibble() %>%
# group_by(value) %>%
# summarise(count=n()) %>%
# filter(count != 1) %>%
# data.frame()
#
# #These below need to be checked
# # 1 AgY 2
# # 2 CeY 2
# # 3 CuY 2
# # 4 HY 2
# # 5 NaY 2
# # 6 NiCeY 2
# # 7 NiY 2
#
# #Surface area
# ggdotchart(data = zeolite %>% filter(!is.na(SA)),
# x = "Adsorbent",
# y = "SA",
# color = "Adsorbent",
# sorting = "descending",
# ggtheme = theme_pubr()) +
# theme(legend.position = "none")
#
#
# SA <- zeolite %>% filter(!is.na(SA))
#
#
# ggplot(data=SA,
# aes(x = SA)) +
# geom_density(aes(y = ..count..)) +
# geom_vline(aes(xintercept = mean(SA)),
# linetype = "dashed", size = 0.6) +
# ylab("Density") +
# theme_pubr()
#
#
#
#
#
# ggviolin(data = SA,
# y = "SA",
# fill = "light blue",
# palette = c("#00AFBB", "#E7B800", "#FC4E07"),
# add = "boxplot")
#
# iqr <- summary(SA$SA)
#
# SA %>% filter(SA < 500 | SA > 700) %>% arrange(desc(SA))
#
#
# SA_fit <- lm(Capacity ~ SA, data = SA)
#
#
# summary(SA_fit)
#
#
# ggscatter(SA, x = "SA",
# y = "Capacity",
# add = "reg.line",
# conf.int = T) +
# stat_cor(label.x = 450, label.y = 60) +
# stat_regline_equation(label.x = 450, label.y = 65)
#
#
# ############################ Vmicro ############################################
#
#
# Vmicro <- zeolite %>% filter(!is.na(Vmicro))
#
#
#
# ggviolin(data = SA,
# y = "Vmicro",
# fill = "light blue",
# palette = c("#00AFBB", "#E7B800", "#FC4E07"),
# add = "boxplot")
#
#
# ggplot(Vmicro) +
# geom_density(aes(x = Vmicro)) +
# theme_pubr()
#
#
# colnames(Vmicro)
#
# ggplot(Vmicro) +
# geom_point(aes(x = Vmicro, y = Capacity, color = Adsorbent)) +
# theme_pubr()
#
#
#
#
# ggscatter(SA, x = "Vmicro",
# y = "Capacity",
# add = "reg.line",
# conf.int = T) +
# stat_cor() +
# stat_regline_equation(label.x = 0.35)
#
#
#
#
#
#
#
#
#
#
|
9073daeca9c5c8be3e2fce570f8abd55e69d6fe6
|
552d16746aeb43a11a7801f4ee94d55289ced977
|
/function_practice.R
|
a6d85a5c436e54e28fabf58ed70a5c38c994812a
|
[] |
no_license
|
saurabb2297/datasciencecoursera
|
2304e672acb4b579998b0fedcccbe9905140903e
|
1ea24067c60025a1585d91b339e2add5c047dd92
|
refs/heads/master
| 2022-09-28T16:34:08.348076
| 2020-06-06T06:37:09
| 2020-06-06T06:37:09
| 264,628,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 445
|
r
|
function_practice.R
|
add2 <- function(a,b){
a + b
}
above <- function(x,n=10){
use <- x>n
x[use]
}
#calculating mean of each column of dataframe
#removeNA used to deal with missing values
columnmean <- function(x,removeNA = TRUE){
nc <- ncol(x) #number of columns in df
means <- numeric(nc) #empty vector of length number of col to store mean of each column
for (i in 1:nc){
means[i] <- mean(x[,i],na.rm = removeNA)
}
means #to return mean
}
|
fe3e66ebc6c28313dc6709a07d5320685a4d7c16
|
8f2aa4469495a4983a669e44b038f97ce8dda877
|
/plot1.R
|
0d480b2536c114402cf6b7145373222a42e546ac
|
[] |
no_license
|
Juan-Yi/ExData_Plotting1
|
7deded198a75c617067f7a7ba2f9a8d304b431a1
|
eda76a3966ea986e529ff4bce2beee1516d280bc
|
refs/heads/master
| 2021-07-17T16:40:21.206514
| 2017-10-25T00:34:09
| 2017-10-25T00:34:09
| 107,892,587
| 0
| 0
| null | 2017-10-22T18:39:51
| 2017-10-22T18:39:51
| null |
UTF-8
|
R
| false
| false
| 383
|
r
|
plot1.R
|
power<-read.table("household_power_consumption.txt", sep = ";", header = TRUE)
power$Global_active_power<-as.numeric(as.character(power$Global_active_power))
power2day<- power[power$Date %in% c("1/2/2007","2/2/2007") ,]
hist(power2day$Global_active_power,
xlab = "Global Active Power(kilowatts)",
ylab = "Frequency",
main = "Global Active Power",
col = "red",
)
|
288a07fb2e9add0f7add78b2d188644e74695b6d
|
e3d9fdf5a0720b59afa52f62abef64c08b00e8e9
|
/users/Faith/Evenness/analysis/mixed_models/Abundance_Calcluate_Evenness.R
|
adae4646585dbd1301368e5f0c04c304469d5041
|
[] |
no_license
|
bioTIMEHub/bioTIME
|
4d5f960b137b4040de32426ff1c447a256147c38
|
e44a750827cae059f825dcc3c93ba6a0b7ae2a91
|
refs/heads/master
| 2021-06-08T09:53:41.387274
| 2021-03-30T12:42:53
| 2021-03-30T12:42:53
| 92,727,984
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,362
|
r
|
Abundance_Calcluate_Evenness.R
|
setwd("C:\\Users\\faj\\Documents\\OneDrive for Business\\research\\ch3 - metaanalysis\\data")
AbData <- read.csv("AbudnaceOnceRarefy.csv")
library(vegan)
library(reshape2)
#code to calculate evenness change
#----------------------------------------------------------------
head(AbData)
names(AbData)[2] <- "abundance"
#select data to practice on
AbData10 <- AbData[AbData$Study_ID == 10,]
AbData10_y1 <- AbData10[AbData10$Year == AbData10$Year[1],]
#sum abundance for each species
evennessData <- aggregate(AbData10_y1$abundance, list(AbData10_y1$Species_Identity), sum)
H <- diversity(evennessData[,2], index = "simpson")#Simpson index
J <- H/specnumber(evennessData[,2])#calculate Pileu'e evenness
#calculate evenness each year in dataset 10 using a loop
EvenData10 <- data.frame(unique(AbData10$Year)) #make a dataframe to imupt data
names(EvenData10) <- "Year"
EvenData10$Evenness <- 0
for (y in unique(AbData10$Year)){
AbData10_y <- AbData10[AbData10$Year == y,] #select data for approprate year
evennessData_y <- aggregate(AbData10_y$abundance, list(AbData10_y$Species_Identity), sum) #how many individuals of each species
H <- diversity(evennessData_y[,2])#Shannon index
J <- H/log(specnumber(evennessData_y[,2]))#calculate Pileu'e evenness
EvenData10$Evenness[EvenData10$Year == y] <- J
}
#make code to loop through all data
#---------------------------------------------------
EvenChange <- list()
i <- 1
for(s in unique(AbData$Study_ID)){
AbData_s <- AbData[AbData$Study_ID == s,]
EvenData_s <- data.frame(unique(AbData_s$Year)) #make a dataframe to imupt data
names(EvenData_s) <- "Year"
EvenData_s$Evenness <- 0
EvenData_s$Study_ID <- s
for (y in unique(AbData_s$Year)){
AbData_s_y<- AbData_s[AbData_s$Year == y,] #select data for approprate year
evennessData_y <- aggregate(AbData_s_y$abundance, list(AbData_s_y$Species_Identity), sum) #how many individuals of each species
H <- diversity(evennessData_y[,2])#Shannon index
J <- H/log(specnumber(evennessData_y[,2]))#calculate Pileu'e evenness
EvenData_s$Evenness[EvenData_s$Year == y] <- J
}
EvenChange[[i]] <- EvenData_s
i <- i +1
}
evennessAbData <- do.call(rbind, EvenChange)
write.csv(evennessAbData, "evennessAbundance.csv")
|
bdcd73a5219db5cdf1066063bdb08a66dc0afe70
|
0ca71d16a83d6861cab501c19a62a664fdbca559
|
/ui.R
|
fdc4cc433502d98655fd80ed3003fb1fc367b043
|
[] |
no_license
|
checkmyschool/data-portal_shiny
|
6c27c5a1a898aac0d26fb3722a53238d67ab67d8
|
611dc59569671beb0d61dcd7b2552d6ea578765d
|
refs/heads/master
| 2020-08-02T04:42:19.081241
| 2019-10-14T02:16:40
| 2019-10-14T02:16:40
| 211,238,061
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,781
|
r
|
ui.R
|
shinyUI(navbarPage(h5("CheckMySchool Data Portal", style = "color: #ffffff;"), theme = "styles.css",
tabPanel(h5("Welcome", style = "color: #ffffff;"),
mainPanel(width = 11,
column(10, offset = 1,
h1("CheckMySchool School Neediness Index", align = "center"),
h5("This School Neediness Index Map identifies which among
the 44,751 public elementary and secondary schools in the
country are in need of resources. The index consists of
seven variables grouped into three categories: accessibility (remoteness,
percentage of students receiving conditional cash transfers);
amenities (water access, internet access, electricity access);
and classroom condition (student-teacher ratio, student-classroom ratio).
It made use of DepEd data from the Enhanced Basic Education Information
System (E-BEIS), the National School Building Inventory conducted by the
Education Facilities Division (EFD), and the Remoteness Index developed
by the School Effectiveness Division (SED).", align = "center"),
h5("Heather Baier and Angela Yost worked on this map and the study on School Neediness Index.
They came to the Philippines last May to July 2018 as Summer Fellows of William & Mary’s Global
Research Institute.", align = "center"),
h1(" "),
hr(),
h1(" "),
h4(tags$b("School Neediness Index Variables and Definitions"), align = "center"),
div(tableOutput("variables_table"), align = "center"),
div(img(src='all_logos.png', height = 550, width = 1000), align = "center"),
hr()
)
)
),
#navbarMenu(h5("About", style = "color: #ffffff;"),
tabPanel(h5("User Guide", style = "color: #ffffff;"),
h3(strong("School Neediness Index Map"), align = "center"),
h5("The first tab of the CMS Data Portal houses a map of every public schools in the Philippines with
filters available to choose which schools you would like to display.", align = "center"),
h5("Select your desired school pararameters using the selecters and slider inputs on the left to see
schools on the map that fit your desired specifications.", align = "center"),
div(img(src='tutorial1.png', height = 500, width = 900), align = "center"),
h1(" "),
h3(strong("Data Explorer"), align = "center"),
h5("The second tab of the CMS Data Portal, the Data Explorer, works much like the School Neediness Index Map,
except the data is displayed in a table instead of a map.", align = "center"),
h5("Click on the empty box below a column name to choose
the observation you would like to filter for and the table will adjust itself accordingly. For drop down menus,
you can filter for multiple variables. Click on the numbered boxes at the bottom right of the page to see the
next observations fitting your desired criteria.", align = "center"),
div(img(src='tutorial2.png', height = 500, width = 900), align = "center"),
h1(" "),
h3(strong("School Profiles"), align = "center"),
h5("The third tab of the CMS Data Portal, School Profiles, allows you to choose an individual school to see its
respective data.", align = "center"),
h5("Choose a school region in the right-side panel and continue to filter for School District, Divisions
and Name to find you desired school. The table on the top left shows the School Neediness Index data for the chosen school.
The histogram on the top right shows the distribution of the selected variable. The table on the bottom left shows the
basic data for each school. The pie chart on the bottom right shows the gender distribution of the selected school.", align = "center"),
div(img(src='tutorial3.png', height = 500, width = 900), align = "center"),
h1(" ")
),
#tabPanel(h5("Methodology", style = "color: #000000;"),
#h2("Methodology heeeerrrreeee")
#)
#),
# tabPanel("About the School Neediness Index",
#
# column(10, offset = 1,
#
# h1("School Neediness Index Methodology", align = "center"),
#
# h1(" "),
#
# h5("The variables in the School Neediness Index were determined based on focus group and
# individual discussions with school teachers and principals in Guimaras and Rizal.", align = "center"),
#
# h1(" "),
#
# hr(),
#
# fluidRow(
#
# column(10, offset = 1,
#
# tableOutput("variables_table")
#
# )
#
# ),
#
# hr()
#
# )
#
# ),
tabPanel(h5("School Neediness Index Map", style = "color: #ffffff;"),
sidebarLayout(
sidebarPanel(
selectInput('region_map', "School Region", choices = unique(all_data$region)),
selectInput('year_map', "School Year", choices = c(2015, 2016, 2017), selected = 2015),
sliderInput("shi_score_map", "School Neediness Index Score", 0, max(all_data$shi_score),
value = range(0, max(all_data$shi_score)), step = 0.1),
sliderInput("stratio_map", "Student Teacher Ratio", min(all_data$student_teacher_ratio), max(all_data$student_teacher_ratio),
value = range(all_data$student_teacher_ratio), step = 1),
sliderInput("scratio_map", "Student Classroom Ratio", min(all_data$student_classroom_ratio), max(all_data$student_classroom_ratio),
value = range(all_data$student_classroom_ratio), step = 1),
selectInput('water_map', "Access to Water", choices = c("Yes", "No"), multiple = TRUE, selected = c("Yes", "No")),
selectInput('internet_map', "Access to Internet", choices = c("Yes", "No"), multiple = TRUE, selected = c("Yes", "No")),
selectInput('elec_map', "Access to Electricity", choices = c("Yes", "No"), multiple = TRUE, selected = c("Yes", "No")),
sliderInput("ri_map", "Remoteness Index", min(all_data$remoteness_index, na.rm = TRUE), max(all_data$remoteness_index, na.rm = TRUE),
value = range(all_data$remoteness_index, na.rm = TRUE), step = 100),
sliderInput("cct_map", "Percentage of Student's Recieving CCT's", min(all_data$cct_percentage, na.rm = TRUE), max(all_data$cct_percentage, na.rm = TRUE),
value = range(all_data$cct_percentage, na.rm = TRUE), step = 10)
),
mainPanel(
leafletOutput("map", width = '1150px', height = '850px')
)
)
),
tabPanel(h5("Data Explorer", style = "color: #ffffff;"),
DT::dataTableOutput("timeseries_table")
),
tabPanel(h5("School Profiles", style = "color: #ffffff;"),
sidebarLayout(
sidebarPanel(width = 3,
selectInput('region_profile', "Select Region", choices = c("Select Region" = "", sort(unique(as.character(all_data$region))))),
conditionalPanel("input.region_profile",
selectInput('division_profile', "Select Division", choices = c("All Divisions" = "")
)
),
conditionalPanel("input.division_profile",
selectInput('district_profile', "Select District", choices = c("All Districts" = "")
)
),
conditionalPanel("input.district_profile",
selectInput('school_profile', "Select School", choices = c("All Schools" = "")
)
),
hr(),
helpText("Select a school to see its resources and classroom conditions and how it stacks up to national averages."),
leafletOutput('school_select_map')
),
mainPanel(
tabsetPanel(
tabPanel("School Year 2015 - 2016",
h1(" "),
fluidRow(
column(5, style = "background-color: #DCDCDC; border-radius: 5px; height: 500px; height: 700px",
div(h4(tags$u("School Neediness Index Data")), align = "center"),
tableOutput("snitable_profile_2015")
),
column(1, " "),
column(5, style = "background-color: #DCDCDC; border-radius: 2px; height: 700px",
div(h4(tags$u("Basic School Data")), align ="center"),#, style="color:red"),
tableOutput("p_table2_2015")
),
column(1)
),
hr(),
fluidRow(
column(5, #style = "border: 2px solid #DCDCDC; border-radius: 2px; height: 500px;",
div(h4(tags$u("Male to Female Student Ratio")), align ="center"),
plotOutput("distPie_2015")
),#columnrowclose
column(1, h1(" ")),
column(5, #style = "border: 2px solid #DCDCDC; border-radius: 2px; height: 500px;",
div(h4(tags$u("Distribution of Students with Disabilities")), align ="center"),
highchartOutput("pwdChart_2015")
)#columnrowclose
),
hr()#fluidrowclose
),#mainpanelrowclose
tabPanel("School Year 2016 - 2017",
h1(" "),
fluidRow(
column(5, style = "background-color: #DCDCDC; border-radius: 5px; height: 700px",
div(h4(tags$u("School Neediness Index Data")), align = "center"),
tableOutput("snitable_profile_2016")
),
column(1, " "),
column(5, style = "background-color: #DCDCDC; border-radius: 2px; height: 700px",
div(h4(tags$u("Basic School Data")), align ="center"),#, style="color:red"),
tableOutput("p_table2_2016")
),
column(1)
),
hr(),
fluidRow(
column(5, #style = "border: 2px solid #DCDCDC; border-radius: 2px; height: 500px;",
div(h4(tags$u("Male to Female Student Ratio")), align ="center"),
plotOutput("distPie_2016")
),#columnrowclose
column(1, h1(" ")),
column(5, #style = "border: 2px solid #DCDCDC; border-radius: 2px; height: 500px;",
div(h4(tags$u("Distribution of Students with Disabilities")), align ="center"),
highchartOutput("pwdChart_2016")
)#columnrowclose
),
hr()#fluidrowclose
),
tabPanel("School Year 2017 - 2018",
fluidRow(
column(5, style = "background-color: #DCDCDC; border-radius: 5px; height: 700px",
div(h4(tags$u("School Neediness Index Data")), align = "center"),
tableOutput("snitable_profile_2017")
),
column(1, " "),
column(5, style = "background-color: #DCDCDC; border-radius: 2px; height: 700px",
div(h4(tags$u("Basic School Data")), align ="center"),#, style="color:red"),
tableOutput("p_table2_2017")
),
column(1)
),
hr(),
fluidRow(
column(5, #style = "border: 2px solid #DCDCDC; border-radius: 2px; height: 500px;",
div(h4(tags$u("Male to Female Student Ratio")), align ="center"),
plotOutput("distPie_2017")
),#columnrowclose
column(1, h1(" ")),
column(5, #style = "border: 2px solid #DCDCDC; border-radius: 2px; height: 500px;",
div(h4(tags$u("Distribution of Students with Disabilities")), align ="center"),
highchartOutput("pwdChart_2017")
)#columnrowclose
),
hr()#fluidrowclose
)
)#sidebarlayoutclose
)
)
),
tabPanel(h5("Data Set Builder", style = "color: #ffffff;"),
fluidRow(
column(width = 4, offset = 1, style = "background-color: #DCDCDC; border-radius: 2px;",
h3("Choose Columns"),
checkboxGroupInput('columns', label = 'Choose Columns to include in CSV', choices = c('Remoteness Index' = 'remoteness_index',
"Total Number of Learners Receiving CCT's" = 'total_recieving_cct',
"Percentage of Students Recieving CCT's" = 'cct_percentage',
'Water Access' = 'original_water_boolean',
'Internet Access' = 'original_internet_boolean',
'Electricty Access' = 'original_electricity_boolean',
'Total Number of Learners with Gender Distribution' = 'total_enrollment',
"Total Number of Learners With Disability" = 'pwds')),
conditionalPanel("input.columns.indexOf('Total Number of Learners With Disability') != -1",
checkboxGroupInput('pwd_breakdown', label = NULL, choices = c('Difficulty Seeing Manifestation' = 'ds_total',
'Cerebral Palsy' = 'cp_total',
"Difficulty Communicating Manifestation" = 'dcm_total',
"Difficulty Remembering, Concentrating, Paying Attention and Understanding based on Manifestation" = 'drcpau_total',
"Difficulty Hearing Manifestation" = 'dh_total',
"Autism Spectral Disorder" = 'autism_total',
"Difficulty Walking, Climbing and Grasping" = 'wcg_total',
"Emotional-Behavioral Disorder" = 'eb_total',
"Hearing Impairment" = 'hi_total',
"Intellectual Impairment" = 'id_total',
"Learning Impairment" = 'li_total',
"Multiple Disabilities" = 'md_total',
"Orthopedic/Physical Disorder" = 'pd_total',
"Special Health Problem/Chronic Illness" = 'shp_total',
"Speech Disorder" = 'speech_total',
"Visual Impairment Disorder" = 'vi_total',
"Intellectual Impairment" = 'ii_total',
"Orthopedic/Physical Disorder" = 'p_total'))
)
),
column(width = 2,
h1(" ")
),
column(width = 4, style = "background-color: #DCDCDC; border-radius: 2px;",
h3("Choose Rows"),
radioButtons('FilterGeo', label = 'Choose geographic breakdown to filter data',
choices = c('School Region', 'School Province', 'School Division', 'School District', 'School Municipality')
),
conditionalPanel("input.FilterGeo == 'School Region'",
selectInput('QueryRegion', "Choose School Regions", choices = unique(all_data$region), multiple = TRUE)
),
conditionalPanel("input.FilterGeo == 'School Province'",
selectInput('QueryProvince', "Choose School Provinces", choices = unique(all_data$province), multiple = TRUE)
),
conditionalPanel("input.FilterGeo == 'School Division'",
selectInput('QueryDivision', "Choose School Divisions", choices = unique(all_data$division), multiple = TRUE)
),
conditionalPanel("input.FilterGeo == 'School District'",
selectInput('QueryDistrict', "Choose School Districts", choices = unique(all_data$district), multiple = TRUE)
),
conditionalPanel("input.FilterGeo == 'School Municipality'",
selectInput('QueryMunicipality', "Choose School Municipalities", choices = unique(all_data$municipality), multiple = TRUE)
),
div(downloadButton('QueryBuilder', h4('Download CSV')), align = 'center')
)
),
hr(),
div(tags$u(h3("Data Set Preview")), align = 'center'),
h1(" "),
DT::dataTableOutput("QueryTablePreview")
)
)
)
|
3d824809116ada929e6be510d1e4b318644aaac0
|
d892409b67c45508a018c4a3d46d490310ddd06e
|
/Codigoredes.R
|
1d13c50fda2c184c450e94220716a032b4f83f07
|
[] |
no_license
|
camilavalenciarod/TrabajoFinal
|
e57c7a3616cdd5d7648e36fba51f4d6bf2605e6c
|
c759e543e7a09881181cfc3726f306dc97dc78bc
|
refs/heads/master
| 2020-07-06T02:17:54.070106
| 2016-11-25T06:31:27
| 2016-11-25T06:31:27
| 74,062,893
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 14,889
|
r
|
Codigoredes.R
|
install.packages('igraph')
install.packages('network')
install.packages('sna')
install.packages('ndtv')
install.packages('visNetwork')
library(igraph)
library(network)
library(sna)
library(ndtv)
library(visNetwork)
base_1 <- read.csv("C:/Users/camila.valencia/Desktop/Efectos Pares/red1.csv", sep=";")
base_2 <- read.csv("C:/Users/camila.valencia/Desktop/Efectos Pares/red2.csv", sep=";")
base_3 <- read.csv("C:/Users/camila.valencia/Desktop/Efectos Pares/red3.csv", sep=";")
base_4 <- read.csv("C:/Users/camila.valencia/Desktop/Efectos Pares/red4.csv", sep=";")
base_5 <- read.csv("C:/Users/camila.valencia/Desktop/Efectos Pares/red5.csv", sep=";")
base_6 <- read.csv("C:/Users/camila.valencia/Desktop/Efectos Pares/red6.csv", sep=";")
Atributos2 <- read.csv ("C:/Users/camila.valencia/Desktop/Efectos Pares/Atributos2.csv", sep=";")
#La red como una matriz
network_1 <- as.matrix(base_1)
#Crea el grafico de la matriz con el cual se pueden hacer todo tipo de analisis
g_1 <- graph.adjacency(network_1)
#Betweenness es
(b_1 <- betweenness(g_1, directed = FALSE))
#closeness es
(c_1 <- closeness(g_1, mode = "out"))
#degree es
(d_1 <- degree(g_1, mode = "out"))
#Hace un objeto en formato red para que
red_1 <-as.network.matrix(network_1)
#grafica la red
plot.network(red_1)
title(main = "Red de estudio fuera de clase (min 1 x semana)")
## Adjuntar la matriz de la primera base
m=as.matrix(base_1)
net=graph.adjacency(m,mode="undirected",weighted=NULL,diag=FALSE)
#Importar los atributos de los individuos
V(net)$promedio=as.character(Atributos2$nota3[match(V(net)$name,Atributos2$Ind)]) # This code says to create a vertex attribute called "Sex" by extracting the value of the column "Sex" in the attributes file when the Bird ID number matches the vertex name.
V(net)$promedio
V(net)$color=V(net)$promedio
V(net)$color=gsub("1","red",V(net)$color)
V(net)$color=gsub("2","yellow",V(net)$color)
V(net)$color=gsub("3","green",V(net)$color)
plot.igraph(net,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
V(net)$size=degree(net)*0.5
plot.igraph(net,vertex.label=NA,layout=layout.fruchterman.reingold)
title(main = "Red de estudio fuera de clase (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Bajo","Medio","Alto")
, title="Desempeño académico", pt.bg = c("red", "yellow", "green")
, pch = c(21,21,21))
V(net)$Sex=as.character(Atributos2$Genero[match(V(net)$name,Atributos2$Ind)])
#Crea el atributo genero haciendo match en las dos bases por el Ind
V(net)$Sex # Este imprime el atributo
V(net)$color=V(net)$Sex #Asigna el atributo genero al color de los vertices
V(net)$color=gsub("0","purple",V(net)$color) #Mujeres seran moradas
V(net)$color=gsub("1","lightgreen",V(net)$color) #Hombres seran verdes
V(net)$size=degree(net)*0.5 #Hace la grafica segun el grado de conectividad
plot.igraph(net,vertex.label=NA,layout=layout.fruchterman.reingold)
title(main = "Red de estudio fuera de clase (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Mujeres","Hombres")
, title="Genero", pt.bg = c("purple", "lightgreen")
, pch = 21)
V(net)$estrato=as.character(Atributos2$Estrato[match(V(net)$name,Atributos2$Ind)]) # This code says to create a vertex attribute called "Sex" by extracting the value of the column "Sex" in the attributes file when the Bird ID number matches the vertex name.
V(net)$estrato
V(net)$color=V(net)$estrato
V(net)$color=gsub("1","darkblue",V(net)$color)
V(net)$color=gsub("2","darkblue",V(net)$color)
V(net)$color=gsub("3","blue1",V(net)$color)
V(net)$color=gsub("4","blue1",V(net)$color)
V(net)$color=gsub("5","lightblue",V(net)$color)
V(net)$color=gsub("6","lightblue",V(net)$color)
plot.igraph(net,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
V(net)$size=degree(net)*0.5
plot.igraph(net,vertex.label=NA,layout=layout.fruchterman.reingold)
title(main = "Red de estudio fuera de clase (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("1 y 2","3 y 4","5 y 6")
, title="Estratos Socioeconómicos", pt.bg = c("darkblue", "blue1", "lightblue")
, pch = c(21,21,21))
V(net)$pilo=as.character(Atributos2$pilopaga[match(V(net)$name,Atributos2$Ind)])
V(net)$color=V(net)$pilo
V(net)$color=gsub("0","red",V(net)$color)
V(net)$color=gsub("1","darkred",V(net)$color)
plot.igraph(net,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
V(net)$size=degree(net)*0.5
plot.igraph(net,vertex.label=NA,layout=layout.fruchterman.reingold)
title(main = "Red de estudio fuera de clase (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Si","No")
, title="Beneficiario ser pilo paga", pt.bg = c("darkred", "red")
, pch = 21)
V(net)$programa=as.character(Atributos2$programa[match(V(net)$name,Atributos2$Ind)])
V(net)$color=V(net)$programa
V(net)$color=gsub("0","green",V(net)$color) #Programa de Finanzas
V(net)$color=gsub("1","blue",V(net)$color) #Programa de economia
plot.igraph(net,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
V(net)$size=degree(net)*0.5
plot.igraph(net,vertex.label=NA,layout=layout.fruchterman.reingold)
title(main = "Red de estudio fuera de clase (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Finanzas y Comercio Internacional","Economía")
, title="Programa Academico", pt.bg = c("green", "blue")
, pch = 21)
##Ahora trabajare con la red de actividades sociales
network_2 <- as.matrix(base_2)
g_2 <- graph.adjacency(network_2)
(b_2 <- betweenness(g_2, directed = FALSE))
(c_2 <- closeness(g_2, mode = "out"))
(d_2 <- degree(g_2, mode = "out"))
red_2 <-as.network.matrix(network_2)
plot.network(red_2)
title(main = "Red de planes sociales (min 1 x semana)")
m1=as.matrix(base_2)
net1=graph.adjacency(m1,mode="undirected",weighted=NULL,diag=FALSE)
#Importar los atributos de los individuos
V(net1)$promedio=as.character(Atributos2$nota3[match(V(net1)$name,Atributos2$Ind)]) # This code says to create a vertex attribute called "Sex" by extracting the value of the column "Sex" in the attributes file when the Bird ID number matches the vertex name.
V(net1)$promedio
V(net1)$color=V(net1)$promedio
V(net1)$color=gsub("1","red",V(net1)$color)
V(net1)$color=gsub("2","yellow",V(net1)$color)
V(net1)$color=gsub("3","green",V(net1)$color)
plot.igraph(net1,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
title(main = "Red de actividades sociales (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Bajo","Medio","Alto")
, title="Desempeño académico", pt.bg = c("red", "yellow", "green")
, pch = c(21,21,21))
V(net1)$Sex1=as.character(Atributos2$Genero[match(V(net1)$name,Atributos2$Ind)])
#Crea el atributo genero haciendo match en las dos bases por el Ind
V(net1)$Sex1 # Este imprime el atributo
V(net1)$color=V(net1)$Sex1 #Asigna el atributo genero al color de los vertices
V(net1)$color=gsub("0","purple",V(net1)$color) #Mujeres seran moradas
V(net1)$color=gsub("1","lightgreen",V(net1)$color) #Hombres seran verdes
plot.igraph(net1,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
V(net1)$size=degree(net1)* 0.5 #Hace la grafica segun el grado de conectividad
title(main = "Red de actividades sociales (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Mujeres","Hombres")
, title="Genero", pt.bg = c("purple", "lightgreen")
, pch = 21)
V(net1)$estrato=as.character(Atributos2$Estrato[match(V(net1)$name,Atributos2$Ind)])
V(net1)$estrato
V(net1)$color=V(net1)$estrato
V(net1)$color=gsub("1","darkblue",V(net1)$color)
V(net1)$color=gsub("2","darkblue",V(net1)$color)
V(net1)$color=gsub("3","blue1",V(net1)$color)
V(net1)$color=gsub("4","blue1",V(net1)$color)
V(net1)$color=gsub("5","lightblue",V(net1)$color)
V(net1)$color=gsub("6","lightblue",V(net1)$color)
plot.igraph(net1,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
title(main = "Red de actividades sociales (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("1 y 2","3 y 4","5 y 6")
, title="Estratos Socioeconómicos", pt.bg = c("darkblue", "blue1", "lightblue")
, pch = c(21,21,21))
V(net1)$pilo=as.character(Atributos2$pilopaga[match(V(net1)$name,Atributos2$Ind)])
V(net1)$color=V(net1)$pilo
V(net1)$color=gsub("0","red",V(net1)$color)
V(net1)$color=gsub("1","darkred",V(net1)$color)
plot.igraph(net1,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
title(main = "Red de actividades sociales (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Si","No")
, title="Beneficiario ser pilo paga", pt.bg = c("darkred", "red")
, pch = 21)
V(net1)$programa=as.character(Atributos2$programa[match(V(net1)$name,Atributos2$Ind)])
V(net1)$color=V(net1)$programa
V(net1)$color=gsub("0","green",V(net1)$color) #Programa de Finanzas
V(net1)$color=gsub("1","blue",V(net1)$color) #Programa de economia
plot.igraph(net1,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
title(main = "Red de actividades sociales (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Finanzas y Comercio Internacional","Economía")
, title="Programa Academico", pt.bg = c("green", "blue")
, pch = 21)
##Ahora trabajare con la red de actividades deportivas
network_3 <- as.matrix(base_3)
g_3 <- graph.adjacency(network_3)
(b_3 <- betweenness(g_3, directed = FALSE))
(c_3 <- closeness(g_3, mode = "out"))
(d_3 <- degree(g_3, mode = "out"))
red_3 <-as.network.matrix(network_3)
plot.network(red_3)
title(main = "Red de actividades deportivas")
m2=as.matrix(base_3)
net2=graph.adjacency(m2,mode="undirected",weighted=NULL,diag=FALSE)
#Importar los atributos de los individuos
V(net2)$promedio=as.character(Atributos2$nota3[match(V(net2)$name,Atributos2$Ind)]) # This code says to create a vertex attribute called "Sex" by extracting the value of the column "Sex" in the attributes file when the Bird ID number matches the vertex name.
V(net2)$promedio
V(net2)$color=V(net2)$promedio
V(net2)$color=gsub("1","red",V(net2)$color)
V(net2)$color=gsub("2","yellow",V(net2)$color)
V(net2)$color=gsub("3","green",V(net2)$color)
plot.igraph(net2,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
title(main = "Red de actividades deportivas (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Bajo","Medio","Alto")
, title="Desempeño académico", pt.bg = c("red", "yellow", "green")
, pch = c(21,21,21))
V(net2)$Sex=as.character(Atributos2$Genero[match(V(net2)$name,Atributos2$Ind)])
#Crea el atributo genero haciendo match en las dos bases por el Ind
V(net2)$Sex # Este imprime el atributo
V(net2)$color=V(net2)$Sex #Asigna el atributo genero al color de los vertices
V(net2)$color=gsub("0","purple",V(net2)$color) #Mujeres seran moradas
V(net2)$color=gsub("1","lightgreen",V(net2)$color) #Hombres seran verdes
plot.igraph(net2,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
title(main = "Red de actividades deportivas (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Mujeres","Hombres")
, title="Genero", pt.bg = c("purple", "lightgreen")
, pch = 21)
V(net2)$estrato=as.character(Atributos2$Estrato[match(V(net2)$name,Atributos2$Ind)])
V(net2)$estrato
V(net2)$color=V(net2)$estrato
V(net2)$color=gsub("1","darkblue",V(net2)$color)
V(net2)$color=gsub("2","darkblue",V(net2)$color)
V(net2)$color=gsub("3","blue1",V(net2)$color)
V(net2)$color=gsub("4","blue1",V(net2)$color)
V(net2)$color=gsub("5","lightblue",V(net2)$color)
V(net2)$color=gsub("6","lightblue",V(net2)$color)
plot.igraph(net2,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
title(main = "Red de actividades sociales (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("1 y 2","3 y 4","5 y 6")
, title="Estratos Socioeconómicos", pt.bg = c("darkblue", "blue1", "lightblue")
, pch = c(21,21,21))
V(net2)$pilo=as.character(Atributos2$pilopaga[match(V(net2)$name,Atributos2$Ind)])
V(net2)$color=V(net2)$pilo
V(net2)$color=gsub("0","red",V(net2)$color)
V(net2)$color=gsub("1","darkred",V(net2)$color)
plot.igraph(net2,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
title(main = "Red de actividades sociales (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Si","No")
, title="Beneficiario ser pilo paga", pt.bg = c("darkred", "red")
, pch = 21)
V(net2)$programa=as.character(Atributos2$programa[match(V(net2)$name,Atributos2$Ind)])
V(net2)$color=V(net2)$programa
V(net2)$color=gsub("0","green",V(net2)$color) #Programa de Finanzas
V(net2)$color=gsub("1","blue",V(net2)$color) #Programa de economia
plot.igraph(net2,vertex.label=NA,layout=layout.fruchterman.reingold, vertex.size=3)
title(main = "Red de actividades sociales (min 1 x semana)")
l <- legend( "bottomright", inset = .02, cex = 1, bty = "n", legend = c("Finanzas y Comercio Internacional","Economía")
, title="Programa Academico", pt.bg = c("green", "blue")
, pch = 21)
#Descripcion de las otras redes
network_4 <- as.matrix(base_4)
g_4 <- graph.adjacency(network_4)
(b_4 <- betweenness(g_4, directed = FALSE))
(c_4 <- closeness(g_4, mode = "out"))
(d_4 <- degree(g_4, mode = "out"))
red_4 <-as.network.matrix(network_4)
plot.network(red_4)
title(main = "Red de consejos personales")
network_5 <- as.matrix(base_5)
g_5 <- graph.adjacency(network_5)
(b_5 <- betweenness(g_5, directed = FALSE))
(c_5 <- closeness(g_5, mode = "out"))
(d_5 <- degree(g_5, mode = "out"))
red_5 <-as.network.matrix(network_5)
plot.network(red_5)
title(main = "Conocidos antes de entrar a la UR")
network_6 <- as.matrix(base_6)
g_6 <- graph.adjacency(network_6)
(b_6 <- betweenness(g_6, directed = FALSE))
(c_6 <- closeness(g_6, mode = "out"))
(d_6 <- degree(g_6, mode = "out"))
red_6 <-as.network.matrix(network_6)
plot.network(red_6)
title(main = "Red de los considerados lideres")
|
966622ae1e99faf232577e90507ce08560d76fba
|
d072433fb4facac496d0337b4cf22b5e00cf6853
|
/man/flowers.Rd
|
1da9fea70267d0ca79f5b4510677fc7a36344d24
|
[] |
no_license
|
cran/asuR
|
3e3d745182d938acba825b723fc09b14cc16dbaa
|
578f3e0693d29fb8fd3056d52fa8913d5039cc79
|
refs/heads/master
| 2020-12-24T15:05:44.493897
| 2007-06-01T00:00:00
| 2007-06-01T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,538
|
rd
|
flowers.Rd
|
\name{flowers}
\alias{flowers}
\docType{data}
\title{Flower}
\description{
A data set with the dry mass of all flowers and the dry mass of the
total plant from 20 species growing at high and 20 species growing at low altitude.
}
\usage{data(flowers)}
\format{
A data frame with 40 observations on the following 3 variables.
\describe{
\item{\code{alt}}{a factor with levels \code{high} \code{low}}
\item{\code{flower}}{a numeric vector; dry mass of flowers in mg}
\item{\code{total}}{a numeric vector; dry mass of total plant in mg}
}
}
\details{
The data sets shows a random subsample of the original data set.
}
\source{
Fabbro, T. & Koerner, Ch. (2004): \emph{Altitudinal differences in
flower traits and reproductive allocation.} FLORA 199, 70-81.
Fabbro, Koerner ()
}
\examples{
data(flowers)
% ## having a look at the data
% # xyplot(log(flower) ~ log(total)|alt, data=flower,
% # panel=function(x,y,...){
% # panel.xyplot(x,y,...)
% # panel.lmline(x,y,...)
% # })
## a model with two intercepts and two slopes
# m1 <-lm(log(flower) ~ alt/log(total) -1, data=flower)
#
## a model with two intercepts and one slope
# altdiff <- rbind("high-low"=c(1,-1))
# m2 <- lm(log(flower) ~ alt + log(total), data=flower,
# contrasts=list(alt=mycontr(contr=altdiff)))
#
## are separate slopes needed?
# anova(m1, m2) # conclusion:
#
## no difference in slopes but difference in intercept
## for interpretation also test whether the slope is one!
}
\keyword{datasets}
|
10875d8efde3b62455a44c76cc72b209c15c0897
|
52a4315886671fb197e31d4f3e1fd665767814a8
|
/cachematrix.R
|
f4a394fa62d5a98d29c8da427572f6f1cc8e560f
|
[] |
no_license
|
julthida/ProgrammingAssignment2-master
|
d646d41c7287bb8410d215f80366f29d80c18bf6
|
62b9a5c7ce1304216fc2f0bda633e6080f879d29
|
refs/heads/master
| 2020-12-20T15:20:29.750768
| 2020-01-28T06:30:13
| 2020-01-28T06:30:13
| 236,121,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,024
|
r
|
cachematrix.R
|
## Caching the Inverse of a Matrix
## Below are two functions that are used to create a
## special matrix and caches its inverse
## creates the list for cachesolve
makeCacheMatrix <-function(x=Matrix()){
inverse<<-NULL
setmatrix<-function(y){
x<<-y
inverse<<-NULL
}
getmatrix<-function() x
setinverse<-function(x) {
inverse2<-matrix(nrow=ncol(x),ncol=nrow(x))
for(i in seq(1,nrow(x),by=1)){
inverse2[i,]<-x[,i]
}
inverse<<-inverse2
}
getinverse<-function(){
return(inverse)
}
cache<<-(list(set=setmatrix,get=getmatrix,setinv=setinverse,getinv=getinverse))
}
## computes the inverse of the special "matrix" returned by `makeCacheMatrix`
## retrieve the inverse from the cache if the inverse already been calculated
cacheSolve <- function(x) {
inverse<-cache$getinv()
if(!is.null(inverse)){
message("getting cached data")
return(inverse)
}
data<-cache$get()
inverse<-cache$setinv(data)
cache$setinv(inverse)
return(inverse)
}
|
498a273d144b017676e5bee06b6a11f127904072
|
002929791137054e4f3557cd1411a65ef7cad74b
|
/R/isEmpty.R
|
f31db6ee212f3c213d039bd559bace06d8e3503f
|
[
"MIT"
] |
permissive
|
jhagberg/nprcgenekeepr
|
42b453e3d7b25607b5f39fe70cd2f47bda1e4b82
|
41a57f65f7084eccd8f73be75da431f094688c7b
|
refs/heads/master
| 2023-03-04T07:57:40.896714
| 2023-02-27T09:43:07
| 2023-02-27T09:43:07
| 301,739,629
| 0
| 0
|
NOASSERTION
| 2023-02-27T09:43:08
| 2020-10-06T13:40:28
| null |
UTF-8
|
R
| false
| false
| 299
|
r
|
isEmpty.R
|
#' Is vector empty or all NA values.
#'
## Copyright(c) 2017-2020 R. Mark Sharp
## This file is part of nprcgenekeepr
#'
#' @return \code{TRUE} if x is a zero-length vector else \code{FALSE}.
#'
#' @param x vector of any type.
isEmpty <- function(x) {
x <- x[!is.na(x)]
return(length(x) == 0)
}
|
1cca483f6b110481df00925c30a4a5a50a910e3e
|
d2eda24acceb35dc11263d2fa47421c812c8f9f6
|
/R testing/Smoothing.R
|
588ae17fb56aeff1d8ba01c3591949de3d40bfe2
|
[] |
no_license
|
tbrycekelly/TheSource
|
3ddfb6d5df7eef119a6333a6a02dcddad6fb51f0
|
461d97f6a259b18a29b62d9f7bce99eed5c175b5
|
refs/heads/master
| 2023-08-24T05:05:11.773442
| 2023-08-12T20:23:51
| 2023-08-12T20:23:51
| 209,631,718
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
Smoothing.R
|
grid = array(0, dim = c(15,16))
## Add Corners
grid[1,1] = 114
grid[1,16] = 121
grid[15,1] = 116
grid[15,16] = 121
delta.j1 = (grid[15,1] - grid[1,1]) / 14
delta.j6 = (grid[15,16] - grid[1,16]) / 14
## Fill in first and last column
for (i in c(1:15)) {
grid[i,1] = grid[1,1] + delta.j1 * (i-1)
grid[i,16] = grid[1,16] + delta.j6 * (i-1)
}
## Fill in all values
for (i in c(1:15)) {
delta = (grid[i,16] - grid[i,1]) / 15
for (j in 2:15) {
grid[i,j] = grid[i,j-1] + delta
}
}
smooth.gaussian = function(x, sd = 0.2, n = 5) {
stencil = array(1, dim = rep(n, 2))
for (i in 1:n) {
stencil[i,] = stencil[i,] * dnorm(c(1:n), (n+1)/2, sd)
}
for (i in 1:n) {
stencil[,i] = stencil[,i] * dnorm(c(1:n), (n+1)/2, sd)
}
stencil = stencil / sum(stencil, na.rm = T)
new = x
buffer = (n + 1) / 2
for (i in buffer:(dim(x)[1] - buffer)) {
for (j in buffer:(dim(x)[2] - buffer)) {
new[i,j] = sum(stencil * x[(i-buffer+1):(i+buffer-1), (j-buffer+1):(j+buffer-1)])
}
}
new
}
original = array(runif(40000, -1, 1) > 0, dim = c(250,160))
plot.image(z = original)
test = smooth.gaussian(original, sd = 0.1, n = 9)
plot.image(z = test)
poc = read.satellite('C:/Data/Satellite/A20022492002256.L3m_8D_POC_poc_4km.nc', lon = c(-180, -140), lat = c(40, 80))
str(poc$field)
poc.smooth = smooth.gaussian(poc$field[[1]], 0.2, 3)
map = make.map.nga()
add.map.layer(map, poc$lon, poc$lat, poc$field[[1]], pal = 'parula', zlim = c(0, 1e3))
add.map.layer(map, poc$lon, poc$lat, poc.smooth, pal = 'parula', zlim = c(0, 1e3))
redraw.map(map)
frrf = load.frrf('C:/Users/Tom Kelly/Desktop/FRRF/CTG-Act2Run/Auto-saved FLC data files/20220308/')
plot(frrf[[1]]$A$E,
frrf[[1]]$A$JPII,
type = 'l',
lwd = 3,
yaxs = 'i',
xaxs = 'i',
xlim = c(0,700),
xlab = 'E')
lines(frrf[[2]]$A$E,
frrf[[2]]$A$JPII,
lwd = 3,
col = 'dark green')
lines(frrf[[3]]$A$E,
frrf[[3]]$A$JPII,
lwd = 3,
col = 'dark green')
|
82fb82dd36f91bf1f2bbd87dfbfe48bd84060fb3
|
102c6eb2165121a04e4e57e7cf651d6d8d41c0e8
|
/Final Project/bank_branches/app.R
|
0f0684201d541359fa58550b5b63e761d702cebd
|
[] |
no_license
|
Jagdish16/CUNY_DATA_608
|
df2ec2b4cad594e6b7e0a88a345fb3a073fdea5b
|
6ee3d6b496f4d1ef85fd1e906cefebfd345cc361
|
refs/heads/master
| 2023-01-24T14:35:02.841799
| 2020-12-13T03:12:17
| 2020-12-13T03:12:17
| 293,076,162
| 0
| 0
| null | 2020-09-05T12:57:47
| 2020-09-05T12:57:46
| null |
UTF-8
|
R
| false
| false
| 3,379
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
# Find out more about building applications with Shiny here:
# http://shiny.rstudio.com/
#
# Name of application = bank_branches
library(shiny)
library(dplyr)
library(ggplot2)
library(rsconnect)
library(plotly)
library(kableExtra)
library(readr)
library(leaflet)
library(tidyverse)
library(maps)
#rsconnect::deployApp('~/ShinyApps/banks')
# Load the Bank Branches data for different years
all2020<-read_csv('ALL_2020.csv')
# Make a list of columns of interest
cols<-c('ASSET', 'CHARTER', 'CITYBR', 'CNTRYNA', 'DEPDOM', 'DEPSUM', 'DEPSUMBR', 'NAMEBR', 'NAMEFULL', 'REGAGNT', 'RSSDID', 'SIMS_LATITUDE', 'SIMS_LONGITUDE', 'SPECDESC', 'SPECGRP', 'STALPBR', 'STNAMEBR', 'UNINUMBR', 'YEAR', 'ZIPBR')
# Filter the data frame to select only the columns of interest
all2020<-all2020[ ,which((names(all2020) %in% cols)==TRUE)]
# Rename the columns to more intuitive names
all2020<-all2020%>%rename(state_code=STALPBR, region=STNAMEBR, total_assets=ASSET, city=CITYBR, domestic_deposits=DEPDOM, total_deposits=DEPSUM, branch_deposits=DEPSUMBR, branch=NAMEBR, bank_name=NAMEFULL,regulator=REGAGNT, specialization=SPECDESC, branch_id=RSSDID,zipcode=ZIPBR, location_id=UNINUMBR, spec_grp=SPECGRP, branch_lat=SIMS_LATITUDE, branch_lon=SIMS_LONGITUDE)
# Convert the region column to lower case to enable merging
all2020$region<-tolower(all2020$region)
# Create a new dataframe with top banks by branch count
shortlist<-all2020%>%group_by(bank_name)%>%summarise(n=n())%>%arrange(desc(n))%>%top_n(5)
# Create a new dataframe for just the top banks by branch count
top5_2020<-all2020[all2020$bank_name %in% shortlist$bank_name,]
# Create a dataframe with the bank short names
short<-read.table(file="bank_short_names.txt",header=TRUE,sep="|")
# Merge the dataframes so that the bank short name is part of the top bank dataset
top5_2020<-merge(top5_2020,short,by='bank_name')
# Filter for Bank of America branches
bankam_branches_2020<-top5_2020%>%filter(bank=="Bank of America")
# Create icon based on Bank of America logo
bankamIcon<-icons("bankam.png",iconWidth=8,iconHeight=8)
# Define UI for application
ui <- fluidPage(
# Application title
titlePanel("Bank of America Branches By State for 2020"),
# Sidebar with a selection for State
sidebarLayout(
sidebarPanel(
#selectInput('bank',"Select Bank",top15$bank%>%unique,selected=NULL, multiple=FALSE,selectize=TRUE),
selectInput('state',"Select State",bankam_branches_2020$state_code%>%unique,selected=NULL,multiple=FALSE,selectize=TRUE),
),
# Show a spatial density map for selected bank and state
mainPanel(
leafletOutput("densityMap",width="100%",height=400)
)
)
)
server<-shinyServer(function(input,output,session)
#server <- function(input, output)
{
# Create a new dataset by filtering branches by state only
bank_state<-reactive({bankam_branches_2020%>%filter(state_code==input$state)})
output$densityMap<-renderLeaflet({
leaflet()%>%addProviderTiles("CartoDB.Positron")%>%addMarkers(data=bank_state(),lng=~branch_lon,lat=~branch_lat,icon=bankamIcon,label=~branch,popup=~paste("Deposits = $",branch_deposits))
})
}
)
# Run the application
shinyApp(ui = ui, server = server)
|
a4808015bc8bf19c9e7613b9e68fdf4a0b890f7a
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.end.user.computing/man/appstream_delete_stack.Rd
|
ae0637391f0d738a829f56d40fb34ba649b46edd
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 665
|
rd
|
appstream_delete_stack.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appstream_operations.R
\name{appstream_delete_stack}
\alias{appstream_delete_stack}
\title{Deletes the specified stack}
\usage{
appstream_delete_stack(Name)
}
\arguments{
\item{Name}{[required] The name of the stack.}
}
\description{
Deletes the specified stack. After the stack is deleted, the application streaming environment provided by the stack is no longer available to users. Also, any reservations made for application streaming sessions for the stack are released.
See \url{https://www.paws-r-sdk.com/docs/appstream_delete_stack/} for full documentation.
}
\keyword{internal}
|
74a34e80916da916e08f5cff55ba54e35b023617
|
facd83d2c6378682421bb7902191f1f49fa1836f
|
/R/cbk.periodic.R
|
6ecc288344f6a9769068682aec935989fddc6705
|
[] |
no_license
|
misasa/chelyabinsk
|
cbb9e3acdaaefb3254d01d38c42e404164dfa2d1
|
495e8bab926934467a3a7fd7c74bb1be69d2f094
|
refs/heads/master
| 2021-06-28T09:11:19.766488
| 2020-11-20T00:26:38
| 2020-11-20T00:26:38
| 69,550,402
| 0
| 2
| null | 2019-06-21T10:49:18
| 2016-09-29T09:04:01
|
R
|
UTF-8
|
R
| false
| false
| 1,431
|
r
|
cbk.periodic.R
|
#' @title Return properties of elements from a periotic-table
#'
#' @description Return properties of elements from a periotic-table.
#' Specify property of your concern otherwise this return dataframe
#' of periodic table.
#'
#' @param property A name of PROPERTY that is one of 'atomicnumber',
#' 'volatility', or 'compatibility'
#' @return A numeric vector of element property with label or
#' dataframe of periodic-table
#' @export
#' @importFrom utils read.csv
#' @examples
#' cbk.periodic()
#' cbk.periodic("atomicnumber")
#' cbk.periodic("volatility")
#' cbk.periodic("compatibility")
cbk.periodic <- function(property=NULL){
## EXAMPLES
## cbk.periodic("atomicnumber")
## H Li Be B C N F Na Mg Al Si P S Cl K Ca Ti Cr Mn Fe Ni Rb Sr Y Zr Nb
## 1 3 4 5 6 7 9 11 12 13 14 15 16 17 19 20 22 24 25 26 28 37 38 39 40 41
## In Cs Ba La Ce Pr Nd Sm Eu Gd Tb Dy Ho Er Tm Yb Lu Hf Ta Tl Pb Bi Th U
## 49 55 56 57 58 59 60 62 63 64 65 66 67 68 69 70 71 72 73 81 82 83 90 92
## csvfile <- cbk.path("periodic-table.csv")
## message(sprintf("The csvfile is located at |%s|.",csvfile))
## foo <- read.csv(csvfile,header=T,row.names=1)
foo <- cbk.read.dflame(cbk.path("periodic-dflame0.csv"),verbose=FALSE)
if(!is.null(property)){
bar <- foo[,property]
names(bar) <- rownames(foo)
out <- sort(bar)
} else {
out <- foo
}
return(out)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.