blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b3d43d37ae9e8723ee75e8461282701dd30c233b
|
88c805daf65c1aedf987ce92ad3ac087c052f06d
|
/R/alm.alg.R
|
0a389f26787a938782784de3604f292f3b3c9b86
|
[] |
no_license
|
Tao-Hu/ZIBBSeq
|
0dc29b8abf57bf0546d1cb247b74896882d612cb
|
808ca2898a86eeae4143421837b592459313f43e
|
refs/heads/master
| 2021-07-04T11:13:23.640267
| 2017-09-27T01:32:13
| 2017-09-27T01:32:13
| 104,956,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,117
|
r
|
alm.alg.R
|
alm.alg <- function(para.init, s, lambda, grpIdx, grpSize, X, Y, Y.c,
ziMatrix, K, rho, tol.fista, imax.fista,
tol.alm, imax.alm) {
# get dims
n <- dim(Y)[1]
m <- dim(Y)[2]
p <- dim(X)[2]
q <- dim(ziMatrix)[2]
# initial Lagrangian multiplier w's
theta <- matrix(para.init[1:((p+q+1)*m)], p+q+1, m)
B.current <- theta[1:p, ]
#*Re-parameterize over-dispersion
psi <- theta[p+1, ]
# phi <- theta[p+1, ]
gamma.k <- para.init[((p+q+1)*m + 1):((p+q+1)*m + K + 1)]
x <- apply(X %*% B.current, 2, mean) # m-vector
tmp <- rep(gamma.k[1], m)
for (i in 2:(K+1)) {
tmp <- tmp + gamma.k[i]*(x^(i-1))
}
# tmp <- log(phi / (1-phi)) - tmp
tmp <- psi - tmp
w.out <- rho * tmp
# w.out <- rep(0.1, m)
para.out <- para.init
para.old <- para.init
for (i in 1:imax.alm) {
# find the unconstrained minimum
# para.out <- fista.alg(para.old, s, lambda, grpIdx, grpSize, X, Y, Y.c,
# ziMatrix, p, q, K, w.out, rho, tol.fista, imax.fista)
para.out <- cd.fista(para.old, s, lambda, grpIdx, grpSize, X, Y, Y.c,
ziMatrix, p, q, K, w.out, rho, tol.fista, imax.fista)
# update the multiplier w's
theta <- matrix(para.out[1:((p+q+1)*m)], p+q+1, m)
B.current <- theta[1:p, ]
#*Re-parameterize over-dispersion
psi <- theta[p+1, ]
# phi <- theta[p+1, ]
gamma.k <- para.out[((p+q+1)*m + 1):((p+q+1)*m + K + 1)]
x <- apply(X %*% B.current, 2, mean) # m-vector
tmp <- rep(gamma.k[1], m)
for (i in 2:(K+1)) {
tmp <- tmp + gamma.k[i]*(x^(i-1))
}
# tmp <- log(phi / (1-phi)) - tmp
tmp <- psi - tmp
w.out <- w.out + rho * tmp
# check for convergence
dpara <- (para.out - para.old) / para.old
bmax <- max(abs(dpara), na.rm = TRUE)
# idx.max <- order(abs(dpara), decreasing = TRUE)[1:5]
# print(paste("ALM change is", bmax))
# print(paste("Corresponding parameter index is", idx.max))
if (bmax < tol.alm) {break}
# store previous updates
para.old <- para.out
}
return(para.out)
}
|
3188507cfa0ef902150a7d79e3b3cb2a77db0d17
|
8f42c61ef8c4fd5807030397796896b571f65407
|
/functions.R
|
4b7209bdc748be4827cc536bd80591fb2f46334d
|
[] |
no_license
|
sidbdri/enrichment_rshiny
|
1109e136155b9a7f5e44bf462b767ef4c6f0b555
|
036532b1d981a60e278d8b16abf4f8322775d41b
|
refs/heads/master
| 2021-02-15T18:41:16.508562
| 2020-04-02T15:13:39
| 2020-04-02T15:13:39
| 244,921,318
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,289
|
r
|
functions.R
|
draw_venn<-function(query, ref, back){
query<-unlist(str_split(query, "\n"))
ref<-unlist(str_split(ref, "\n"))
back<-unlist(str_split(back, "\n"))
query<-query[query != ""]
ref<-ref[ref != ""]
back<-back[back != ""]
overlap12 <- length(calculate.overlap(x=list(query, ref))$a3)
overlap13 <- length(calculate.overlap(x=list(query, back))$a3)
overlap23 <- length(calculate.overlap(x=list(ref, back))$a3)
overlap123 <- length(calculate.overlap(x=list(query, ref, back))$a5)
venn<-draw.triple.venn(length(query), length(ref), length(back), overlap12, overlap23, overlap13, overlap123, col = c("lightpink", "lightblue", "lightgreen"), fill = c("lightpink", "lightblue", "lightgreen"), category=c("Query", "Reference", "Background"))
return(venn)
}
scan_in<-function(filename){
test_data<-scan(filename, what=character())
test_data<-paste(test_data, collapse="\n")
return(test_data)
}
calculate_intersect<-function(x, y){
i<-intersect(x, y)
return(i)
}
fisher_test<-function(contingency_tbl){
#check that the contingency table does not contain negative values
#if none, return result, if negative values, return error message
if (all(contingency_tbl >= 0)) {
test_result<-fisher.test(contingency_tbl)
} else {
test_result<-NULL
#
}
return(test_result)
}
print_results<-function(r){
# fisher test function returns NULL if negative vaklues present in contigency table
# check first if parameter passed in is fisher test result or NULL, otherwise extract relevant values
if (!is.null(r)){
ci_lower<-r$conf.int[1]
ci_upper<-r$conf.int[2]
ci<-paste(ci_lower, ci_upper, sep=", ")
p<-r$p.value
e<-r$estimate
results<-paste("confidence interval: ", ci, "\n", "odds ratio: ", e, "\n", "pvalue: ", p, "\n")
} else {
results<-"There are negative values in the contingency table. Did you enter the lists in the correct order?"
}
return(results)
}
contingency_table<-function(query, reference, background){
# restrict query and references to values also present in background list
query <- query[query %in% background]
reference <- reference[reference %in% background]
#lengths of each list
qlength<-length(query)
rlength<-length(reference)
blength<-length(background)
# how many values are shared between query and reference
intersect_query_ref<-length(intersect(query, reference))
# how many values are shared between query and background (not counting values intersecting with query)
intersect_query_back<-length(intersect(query, background)) - intersect_query_ref
# how many values are shared between reference and background (not counting values intersecting with query)
intersect_ref_back<- length(intersect(reference, background)) - intersect_query_ref
# how many values are in background and not in the query or reference
background_only<-blength - ((qlength + rlength) - intersect_query_ref)
# make a contingency table of calculated values
contingency_table<-matrix(c(intersect_query_ref, intersect_query_back, intersect_ref_back, background_only), nrow=2)
rownames(contingency_table)<-c("in.query", "not.in.query")
colnames(contingency_table)<-c("in.ref", "not.in.ref")
return(contingency_table)
}
|
b7076e0cdc578dc2121f89e66527228cb5b5120c
|
ab9e65ac9ba7d2839200ec27d2fa4fd6fb3c793d
|
/script/aula_11_script/sabesp.R
|
24811ad845fdddb1f7c61322e339c6588b1fc592
|
[] |
no_license
|
Droszczak/curso-r.github.com
|
f466af42160b19f2683945ffb87a72379c29b340
|
ea0eebf90f6a210aac4ac8811ccee71b80313426
|
refs/heads/master
| 2023-02-26T15:36:05.077321
| 2021-02-06T18:38:38
| 2021-02-06T18:38:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,767
|
r
|
sabesp.R
|
require(httr)
require(rvest)
require(tidyr)
require(lubridate)
require(dplyr)
require(stringr)
require(ggplot2)
require(scales)
rm_accent <- function (x) gsub("`|\\'", "", iconv(x, to = "ASCII//TRANSLIT"))
renomear <- function(x, ...) {
names(x) <- eval(substitute(c(...)))
x
}
baixa_sabesp <- function(x) {
link <- 'http://www2.sabesp.com.br/mananciais/DivulgacaoSiteSabesp.aspx'
txt <- GET(link)
viewstate <- txt %>% content('text') %>% html %>% html_node('#__VIEWSTATE') %>% html_attr('value')
eventval <- txt %>% content('text') %>% html %>% html_node('#__EVENTVALIDATION') %>% html_attr('value')
data <- as.Date(x)
dados <- list(cmbDia=day(data),
cmbMes=month(data),
cmbAno=year(data),
Imagebutton1.x='0',
Imagebutton1.y='0',
'__VIEWSTATE'=viewstate,
'__EVENTVALIDATION'=eventval)
r <- POST(link, body=dados, cookies=unlist(txt$cookies))
try({
nomes <- r %>% content('text') %>% html(encoding='UTF-8') %>% html_nodes('img') %>% html_attr('src')
nomes <- nomes %>% `[`(!str_detect(nomes, 'jpg')) %>% str_match('/(.+)\\.gif') %>% `[`(,2)
d <- r %>%
content('text') %>%
html(encoding='UTF-8') %>%
html_node('#tabDados') %>%
html_table(fill=TRUE) %>%
renomear('titulo', 'info') %>%
select(1:2) %>%
filter(titulo!='') %>%
mutate(titulo=rm_accent(gsub(' +', '_', titulo)),
lugar=rep(nomes, each=4),
info=gsub('[^0-9.]', '', gsub(',', '.', info))) %>%
spread(titulo, info, convert=TRUE) %>%
mutate(volume_armazenado=volume_armazenado/100)
return(d)
})
return(data.frame())
}
datas <- today() - days(0:3650)
d_sabesp <- datas %>%
as.character %>%
data.frame(data=., stringsAsFactors=F) %>%
group_by(data) %>%
do(baixa_sabesp(as.character(.))) %>%
ungroup
save(d_sabesp, file='d_sabesp.RData')
load('d_sabesp.RData')
d_sabesp %>%
ggplot(aes(x=as.Date(data), y=volume_armazenado, colour=lugar)) +
geom_line() +
scale_y_continuous(labels=percent, limits=c(0,1.1), breaks=0:11/10) +
geom_hline(yintercept=0, colour='red') +
theme_bw()
d_sabesp %>%
mutate(data_date=as.Date(data),
ano=year(data_date),
data_mes=as.Date(sprintf('2014-%02d-%02d', month(data_date), day(data_date)))) %>%
filter(ano >= 2010) %>%
ggplot(aes(x=data_mes, y=volume_armazenado, colour=factor(ano), group=ano)) +
geom_line(size=.6) +
facet_wrap(~lugar) +
theme_bw() +
scale_x_date(breaks='1 month', labels = date_format("%b")) +
theme(axis.text.x=element_text(angle=45, hjust=1)) +
scale_colour_brewer(palette = 'Reds') +
scale_y_continuous(labels=percent, breaks=0:12/10) +
geom_hline(yintercept=0, colour='red')
|
c7061ded198d8691c7775ca8dfcc27f9b71bf4b9
|
5e87428e86d60c71315170df532ca4418063731b
|
/ReservingAutoInsurance.R
|
56f09070423188a4ba634584932ea8526a78d4a5
|
[] |
no_license
|
veeranalytics/ReservingAutoInsuranceProducts
|
733383f9f0bae8f791da8c4b675be5d1084be544
|
0b40026ab6532dd0fa788504901a9def035c3f63
|
refs/heads/master
| 2020-04-16T17:18:23.547620
| 2019-01-16T12:20:01
| 2019-01-16T12:20:01
| 165,771,366
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,798
|
r
|
ReservingAutoInsurance.R
|
# Import libraries
library(ChainLadder)
n <- 7
# Claim dataset Thefirst column holds the origin year,
# the second column the development
# year and the third column has the incremental payments / transactions.
Claims <- data.frame(originf = factor(rep(2007:2013, n:1)),
dev=sequence(n:1),
inc.paid= c(3511, 3215, 2266, 1712, 1059, 587,340,
4001, 3702, 2278, 1180, 956,629, 4355, 3932,
1946, 1522, 1238,4295, 3455, 2023, 1320, 4150,
3747,2320, 5102, 4548, 6283))
# To present the data in a triangle format, we can use the matrix function
(inc.triangle <- with(Claims, {M <- matrix(nrow=n, ncol=n,dimnames=list(origin=levels(originf), dev=1:n))
M[cbind(originf, dev)] <- inc.paid
M
}))
# It is the objective of a reserving exercise to forecast the future
# claims development in the bottom right corner of the triangle and
# potential further developments beyond development age 7
# cumulative development of claims
(cum.triangle <- t(apply(inc.triangle, 1, cumsum)))
# latest cumulative paid position of all origin years
(latest.paid <- cum.triangle[row(cum.triangle) == n - col(cum.triangle) + 1])
# add the cumulative paid data as a column to the data frame
Claims$cum.paid <- cum.triangle[with(Claims, cbind(originf, dev))]
# To start the reserving analysis, we plot the data:
op <- par(fig=c(0,0.5,0,1), cex=0.8, oma=c(0,0,0,0))
with(Claims, {interaction.plot(x.factor=dev, trace.factor=originf, response=inc.paid,fun=sum, type="b", bty='n', legend=FALSE); axis(1, at=1:n)
par(fig=c(0.45,1,0,1), new=TRUE, cex=0.8, oma=c(0,0,0,0))
interaction.plot(x.factor=dev, trace.factor=originf, response=cum.paid,
fun=sum, type="b", bty='n'); axis(1,at=1:n)
})
mtext("Incremental and cumulative claims development",side=3, outer=TRUE, line=-3, cex = 1.1, font=2)
par(op)
library(lattice)
xyplot(cum.paid ~ dev | originf, data=Claims, t="b", layout=c(4,2),as.table=TRUE, main="Cumulative claims development")
# Chain-Ladder Algorithm: oldest method or algorithm for estimating reserves is
# the so-called chain-ladder method or loss development factor (LDF) method.
# The classical chain-ladder method is a deterministic algorithm to forecast claims based
# on historical data. It assumes that the proportional developments of claims from one development
# period to the next is the same for all origin periods.
# first step, the age-to-age link ratios fk are calculated as the volume
# weighted average development ratios of a cumulative loss development triangle from one
# age period to the next
f <- sapply((n-1):1, function(i) {
sum( cum.triangle[1:i, n-i+1] ) / sum( cum.triangle[1:i, n-i] )
})
# Initially we expect no further development after year 7. Hence, we set the last link ratio
# (often called the tail factor) to 1
tail <- 1
(f <- c(f, tail))
# The squaring of the claims triangle
full.triangle <- cum.triangle
for(k in 1:(n-1)){
full.triangle[(n-k+1):n, k+1] <- full.triangle[(n-k+1):n,k]*f[k]
}
full.triangle
# The last column contains the forecast ultimate loss cost
(ultimate.paid <- full.triangle[,n])
# The cumulative products of the age-to-age development ratios provide the loss development
# factors for the latest cumulative paid claims for each row to ultimate
(ldf <- rev(cumprod(rev(f))))
# The inverse of the loss development factor estimates the proportion of claims developed to
# date for each origin year, often also called the gross up factors or growth curve
(dev.pattern <- 1/ldf)
# The total estimated outstanding loss reserve with this method is
(reserve <- sum (latest.paid * (ldf - 1)))
|
d45c677da7651b106be84a68cfb478e2761cf799
|
3b9312b9e66f57fc1573cbd5b5866fb987059a49
|
/bin/campy_dada2_processing.R
|
81e8f49148f9e5de2282b48e59be19b693e98470
|
[] |
no_license
|
gregmedlock/campy_murine_diets
|
a468b0fa123d699fa6f1d42d63d3c5d41db6094b
|
3f1216db2d17ec1ac6ef50cd876a65abf67c7feb
|
refs/heads/master
| 2021-09-15T01:51:00.455827
| 2018-05-23T19:18:48
| 2018-05-23T19:18:48
| 119,574,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,150
|
r
|
campy_dada2_processing.R
|
library(dada2)
library(ShortRead)
library(ggplot2)
library(phyloseq)
path <- "~/Documents/projects/campy_murine_diets/data/20170224_16S-35570571/"
fns <- list.files(path)
fns
### Load forward and reverse reads
fastqs <- fns[grepl(".fastq$", fns)]
fastqs <- sort(fastqs) # Sort ensures forward/reverse reads are in same order
fnFs <- fastqs[grepl("_R1", fastqs)] # Just the forward read files
fnRs <- fastqs[grepl("_R2", fastqs)] # Just the reverse read files
# Get sample names from the first part of the forward read filenames
sample.names <- sapply(strsplit(fnFs, "_"), `[`, 1)
# Fully specify the path for the fnFs and fnRs
fnFs <- file.path(path, fnFs)
fnRs <- file.path(path, fnRs)
# NOTE: reads are 250bp instead of 300 this time
plotQualityProfile(fnFs[[3]])
# Looks like first 10 bases and last 10 should be trimmed
plotQualityProfile(fnRs[[3]])
# reverse reads should be trimmed before first 10 and after 150
filt_path <- file.path(path, "filtered")
if(!file_test("-d", filt_path)) dir.create(filt_path)
filtFs <- file.path(filt_path, paste0(sample.names, "_F_filt.fastq.gz"))
filtRs <- file.path(filt_path, paste0(sample.names, "_R_filt.fastq.gz"))
# Filter
for(i in seq_along(fnFs)) {
fastqPairedFilter(c(fnFs[i], fnRs[i]), c(filtFs[i], filtRs[i]),
truncLen=c(240,160), trimLeft=c(10,10),
maxN=0, maxEE=c(2,2), truncQ=2, rm.phix=TRUE,
compress=TRUE, verbose=TRUE)
}
# How does looser maxEE affect output? (2,2) was old
### Dereplication
derepFs <- derepFastq(filtFs, verbose=TRUE)
derepRs <- derepFastq(filtRs, verbose=TRUE)
# Name the derep-class objects by the sample names
names(derepFs) <- sample.names
names(derepRs) <- sample.names
derepFs[[1]]
# learn error rates
dadaFs.lrn <- dada(derepFs, err=NULL, selfConsist = TRUE, multithread=TRUE)
errF <- dadaFs.lrn[[1]]$err_out
dadaRs.lrn <- dada(derepRs, err=NULL, selfConsist = TRUE, multithread=TRUE)
errR <- dadaRs.lrn[[1]]$err_out
plotErrors(dadaFs.lrn[[1]], nominalQ=TRUE)
dadaFs <- dada(derepFs, err=errF, multithread=TRUE)
dadaRs <- dada(derepRs, err=errR, multithread=TRUE)
dadaFs[[1]]
mergers <- mergePairs(dadaFs, derepFs, dadaRs, derepRs, verbose=TRUE)
head(mergers[[1]])
seqtab <- makeSequenceTable(mergers)
dim(seqtab)
table(nchar(getSequences(seqtab)))
seqtab.nochim <- removeBimeraDenovo(seqtab, verbose=TRUE)
dim(seqtab.nochim)
sum(seqtab.nochim)/sum(seqtab)
taxa <- assignTaxonomy(seqtab.nochim, "~/Documents/projects/campy_murine_diets/data/silva_nr_v123_train_set.fa.gz")
unname(head(taxa))
# create and save phyloseq object
library(phyloseq); packageVersion("phyloseq")
samples.out <- rownames(seqtab.nochim)
plate <- sapply(strsplit(samples.out, "-"), `[`, 1)
well <- sapply(strsplit(samples.out, "-"), `[`, 2)
samdf <- data.frame(Plate=plate, Well=well)
rownames(samdf) <- samples.out
ps <- phyloseq(otu_table(seqtab.nochim, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa))
ps
# Save ps object
saveRDS(ps,"~/Documents/projects/campy_murine_diets/data/campy_phyloseq_obj_less_strict.rds")
# To read later, do:
#ps <- readRDS("path/to/ps.rds")
|
251eeb0c49a9f2918fcb8eaca49efd677a4e01af
|
86e9dd83e99996ff69767a94d5a1430880bffe2c
|
/C/12/12_lab.R
|
2090573616401062a57913c6fdf2777c69a2270a
|
[] |
no_license
|
biscofil/data_analysis_exercises
|
a068499c7e5e7d4160e204d15db0a783f7c465f9
|
f3a9a6c280557f9353ec3931ac23f2a421bd9d24
|
refs/heads/master
| 2020-03-17T21:45:29.596858
| 2018-05-20T23:24:03
| 2018-05-20T23:24:03
| 133,973,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 475
|
r
|
12_lab.R
|
de_bilt <- read.table(file = "KNMI_20160831.txt", header = FALSE, skip = 12, sep = ',')
names(de_bilt)<-c("station", "yyyymmdd", "rainfall")
head(de_bilt)
month<-substr(de_bilt$yyyymmdd, 5, 6)
sel<-month=="04"
y<-de_bilt$rainfall[sel]
summary(y)
y[y < 0]<-0
summary(y)
boxplot(y)
hist(y, freq = FALSE)
x<-y[y>0]
library(fitdistrplus)
fitmle<-fitdist(as.numeric(na.omit(x)), distr = "exp")
plot(fitmle)
fitmlg<-fitdist(as.numeric(na.omit(x)), distr = "gamma")
plot(fitmlg)
|
06d38b13f3d8beb5ec544ddf644445e6b5ce2865
|
10a2d7bc8930a90503609233c88aa06bc9477f29
|
/man/scale_colour_discrete.Rd
|
3a339e0891817411c6b5a2f0409b629258ecccaa
|
[] |
no_license
|
u6yuvi/MissingDataGUI
|
c2b3b39d6e24ce3b43fdee8864f3ee04c9bfe62e
|
7b0da584d2e4285b80dec9471158b73759f0abdd
|
refs/heads/master
| 2021-01-19T01:07:29.834674
| 2016-04-25T08:58:53
| 2016-04-25T08:58:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 357
|
rd
|
scale_colour_discrete.Rd
|
% Please edit documentation in R/utils.r
\name{scale_colour_discrete}
\alias{scale_colour_discrete}
\title{Change the discrete color scale for the plots generated by ggplot2}
\usage{
scale_colour_discrete(...)
}
\arguments{
\item{...}{parameters passed into the function}
}
\description{
Change the discrete color scale for the plots generated by ggplot2
}
|
f31520ee9533a5a2d3c9f93a9f2180d2dcaf54e1
|
0378bb8cbb6fc691bb2ab274a19b9a0a0e971eae
|
/Iris.R
|
ebd4494c6c98db317eda1b9cc31cdf666ddd0b73
|
[] |
no_license
|
mrunali3437/Random-Forest
|
fbcf19e4832b74984a34872b49eb9a0fb0dca3cd
|
adb4abffa3ef389a98b821cb1fb59dc967ca6e67
|
refs/heads/main
| 2022-12-25T14:49:06.555474
| 2020-10-13T08:43:35
| 2020-10-13T08:43:35
| 303,641,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,658
|
r
|
Iris.R
|
#we Build a Random Forest for the Iris Data
datasets::iris
data("iris")
attach(iris)
library(caret)
library(randomForest)
summary(iris)
#The column Species is already in the form of a categorical Variable
names(iris) #Gives the Column Names
dim(iris) #Gives the Number of Rows and Columns
str(iris) #Gives the Entire Structure of the Data
#Standard Deviation
sd(iris$Sepal.Length)
sd(iris$Sepal.Width)
sd(iris$Petal.Length)
sd(iris$Petal.Width)
#Variance
var(iris$Sepal.Length)
var(iris$Sepal.Width)
var(iris$Petal.Length)
var(iris$Petal.Width)
boxplot(iris) #Displays the Boxplot for every column in the Dataset
pairs(iris) #Using Pairs Function we get plot for each and every Column
#Lets Divide the Data for building the model
inTraininglocal <- createDataPartition(iris$Species,p=.70,list = F) #.70 means 70% Partition
training<- iris[inTraininglocal,]
testing<- iris[-inTraininglocal,]
#Lets Build the Random forest Model
rf1 <- randomForest(Species~. , data = training, ntree = 500)
rf1
print(importance(rf1))
pred1 <- predict(rf1, testing[,-5])
CrossTable(testing[,5], pred1)
tab1 <- table(testing[,5], pred1)
sum(diag(tab1))/ sum(tab1)
rf2 <- randomForest(Species~. , data = training, ntree = 1000)
rf2
print(importance(rf2))
pred2 <- predict(rf2, testing[,-5])
CrossTable(testing[,5], pred2)
tab2 <- table(testing[,5], pred2)
sum(diag(tab2))/ sum(tab2)
rf3 <- randomForest(Species~. , data = training, ntree = 1500)
rf3
print(importance(rf3))
pred3 <- predict(rf3, testing[,-5])
CrossTable(testing[,5], pred3)
tab3 <- table(testing[,5], pred3)
sum(diag(tab3))/ sum(tab3)
|
00334789676740da73e4cd7c438d2ae7c4371f8d
|
9bd511b1c231447ba530284d0021aff0fa768cec
|
/CityDensity.R
|
d9f952323265a01fafdd8d780655154f55ba240f
|
[] |
no_license
|
enricomarcovergara/appdensitycalculation
|
291a5ccd3cead8a5e340c98f9b8c5184476d7658
|
0d0a36a072130b916f7adb53eec9d395ec653c70
|
refs/heads/master
| 2020-04-30T04:54:56.495091
| 2019-03-20T03:30:43
| 2019-03-20T03:30:43
| 176,619,119
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
CityDensity.R
|
#CITY DENSITY
pop <- read.csv("population.csv")
regarea <- read.csv("regionarea.csv")
library(dplyr)
colnames(pop)
numCity <- aggregate(pop$CityProvince,list(Region=pop$Region),function(x) length(unique(x)))
colnames(numCity)[2] <- c('TotalNumCity')
numCity
regarea
CityArea <- merge(numCity,regarea)
CityArea
AveCityArea <- mutate(numCity,AveArea=CityArea$Area/TotalNumCity)
AveCityArea
popagg <- aggregate(pop$Population,list(Region=pop$Region,CityProvince=pop$CityProvince),FUN=sum)
colnames(popagg)[3] <- c('TotalPopulation')
colnames(popagg)
citymerge <- merge(popagg,AveCityArea)
colnames(citymerge)
citydens <- mutate(citymerge,Density=TotalPopulation/AveArea)
top5citydens <- citydens[order(-citydens$Density),]
head(top5citydens,5)
|
538bef789e79cc4d96e5a9f3a1a5959026410e34
|
4786eec2f0b08b8df043c8288c047a66ce0c9099
|
/R/census.R
|
73e102c6040f20e3b63fbc4f8c45a72329a6d1ca
|
[] |
no_license
|
kosukeimai/eco
|
d516b4e3e521a0156ef1800b66706dcb524a43c6
|
75d961fa0454036d44494c5d9b07537b3b33bfd4
|
refs/heads/master
| 2022-11-17T01:25:39.808138
| 2022-11-03T01:19:25
| 2022-11-03T01:19:25
| 84,952,870
| 4
| 3
| null | 2020-08-11T17:58:33
| 2017-03-14T13:34:59
|
C
|
UTF-8
|
R
| false
| false
| 1,323
|
r
|
census.R
|
#' Black Illiteracy Rates in 1910 US Census
#'
#' This data set contains the proportion of the residents who are black, the
#' proportion of those who can read, the total population as well as the actual
#' black literacy rate and white literacy rate for 1040 counties in the US. The
#' dataset was originally analyzed by Robinson (1950) at the state level. King
#' (1997) recoded the 1910 census at county level. The data set only includes
#' those who are older than 10 years of age.
#'
#'
#' @name census
#' @docType data
#' @format A data frame containing 5 variables and 1040 observations
#' \tabular{lll}{ X \tab numeric \tab the proportion of Black residents in each
#' county\cr Y \tab numeric \tab the overall literacy rates in each county\cr N
#' \tab numeric \tab the total number of residents in each county \cr W1 \tab
#' numeric \tab the actual Black literacy rate \cr W2 \tab numeric \tab the
#' actual White literacy rate }
#' @references Robinson, W.S. (1950). ``Ecological Correlations and the
#' Behavior of Individuals.'' \emph{American Sociological Review}, vol. 15,
#' pp.351-357. \cr \cr King, G. (1997). \dQuote{A Solution to the Ecological
#' Inference Problem: Reconstructing Individual Behavior from Aggregate Data}.
#' Princeton University Press, Princeton, NJ.
#' @keywords datasets
NULL
|
b8f6ac0048e1289603c03f6b195d6fc241b3247e
|
260a1e3b07cf8a8935027b0a669febb0afe89f7c
|
/r101/readPSES.R
|
149cede99ad3bf5f8acaac8e188bc79519ff6e0d
|
[] |
no_license
|
M-IVI/R-Ottawa
|
b3dbf9ecbd504e59d6bfeda68d9c116c2b60a32d
|
90e0d79f2eb70bc089e0bb96d2288772c2b1fdad
|
refs/heads/master
| 2021-04-24T06:00:50.494020
| 2020-05-28T15:16:20
| 2020-05-28T15:16:20
| 250,088,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,639
|
r
|
readPSES.R
|
# readPSES.R
# D.Gorodnichy
PSES_ID_COLS = c("LEVEL1ID" , "LEVEL2ID" , "LEVEL3ID" , "LEVEL4ID" , "LEVEL5ID" ); psesKeys = PSES_ID_COLS; COLS_PSES = PSES_ID_COLS
OPEN_CANADA_URL <- "https://www.canada.ca/content/dam/tbs-sct/documents/datasets/pses-saff/2018/2018_PSES_open_dataset_Ensemble_de_donn%C3%A9es_ouvertes_du_SAFF_2018.csv"
myID0 = c(83, 200, 307, 416 , 0) # 2019
myID = myID0
# data to read ----
strUrlDocumentation2018local <- "https://www.canada.ca/content/dam/tbs-sct/documents/datasets/2019_PSES_Supporting_Documentation_Document_de_reference_du_SAFF_2019.xlsx"
strUrlDocumentation2018local <- "source-data/2019_PSES_Supporting_Documentation_Document_de_reference_du_SAFF_2019.xlsx"
strUrlDocumentation2018local <- "source-data/2018_PSES_Supporting_Documentation_Document_de_référence_du_SAFF_2018.xlsx"
createPsesDepartments <- function() {
dtDepartments <- read_excel(strUrlDocumentation2018local, sheet=6) %>% data.table();
dtDepartments[ str_detect(Organization,"Border")]
setkeyv(dtDepartments, PSES_ID_COLS)
dtDepartments[as.list(myID0)] # 83 200 304 418 0
dtDepartments[LEVEL1ID==83 & LEVEL2ID==200, .(.N,Organization) , by=LEVEL3ID] # 83 200 304 418 0
#51
dtDepartments[LEVEL1ID==83 & LEVEL2ID==200 & LEVEL3ID==307, .(.N,Organization) , by=LEVEL4ID] # 83 200 304 418 0
#9
dtDepartments[LEVEL1ID==83 & LEVEL2ID==200 &
LEVEL3ID==307 & LEVEL4ID==416, .(.N,Organization) , by=LEVEL5ID] # 83 200 307 416 0
#9
dtDepartments[, .N]; dtDepartments %>% names
dtDepartments$`DESCRIPTION FR` <- NULL
setnames(dtDepartments, old="DESCRIPTION ENG", new="Organization")
dtDepartments[1]
cols <- PSES_ID_COLS # c("LEVEL1ID", "LEVEL2ID", "LEVEL3ID", "LEVEL4ID", "LEVEL5ID" )
cols=1:5;
dtDepartments[, (cols):=lapply(.SD, as.integer), .SDcols=cols];
# . Add PS (0.0.0.0.0) ----
dtDepartments <- dtDepartments %>% rbind(data.table(0L,0L,0L,0L,0L,"Public Service"), use.names=F)
#. Truncate Dept name -----
dtDepartments$Organization.fullname <- dtDepartments$Organization
dtDepartments[ , Organization:= Organization %>% str_trunc(50,side="center", ellipsis = "...") ]
#. Replace `-`` to `/`` -----
dtDepartments$Organization<- gsub("/", "-", dtDepartments$Organization )
# . Add Acronyms (AADD) -----
dtDepartments$AADD <- abbreviate(dtDepartments$Organization, 1, named = FALSE)
#NB: some Acronyms are the same !
dtDepartments[, .(AADD,Organization)] #2404 / 2019: 3201
dtDepartments[, .(AADD,Organization)] %>% unique() # 2177:
for (i in 1:5) dtDepartments[, AADD := str_replace (AADD, '\\(', "")]
dtDepartments[, AADD := str_replace (AADD, "[[:lower:]]+", "")]
dtDepartments$AADD <- str_replace(dtDepartments$AADD, "[[]:punct:]]+", "")
#for (i in 1:5) dtDepartments[, AADD := str_replace (AADD, "[:lower:]+", "")]
#for (i in 1:5) dtDepartments$AADD <- str_replace(dtDepartments$AADD, "[:punct:]+", "")
# dtDepartments[AADD == "I", AADD:= "N.A."]
dtDepartments[AADD == "I", AADD:= "N/A"]
# . Add IDlevel ----
dtDepartments[ , IDlevel:=ifelse(LEVEL1ID == 0, 0,
ifelse(LEVEL2ID == 0, 1,
ifelse(LEVEL3ID == 0, 2,
ifelse(LEVEL4ID == 0, 3,
ifelse(LEVEL5ID == 0, 4, 5)))))]
# . factor(dtDepartments$IDlevel----
dtDepartments$IDlevel <- factor(dtDepartments$IDlevel, levels = order(dtDepartments$IDlevel,decreasing=T))
# . pathString: ORG_, LEV_ CBSA-HQ-ISTB-SED ----
if(T) { # NB: this will take ~10 mins to compute !
for (i in 1:nrow(dtDepartments)) {
# if (dtDepartments[i,]$LEVEL2ID==999 | dtDepartments[i,]$LEVEL2ID==0)
# next;
id <- dtDepartments[i, (PSES_ID_COLS),with=F] %>% unlist; id
# if (getLevel(id)==0)
# next
ll <- getLevel(id);ll
getIDupto(id,ll)
# dtDepartments[i, BB_DD:=""]
setkeyv(dtDepartments, PSES_ID_COLS)
for (l in 0:getLevel(id)) {
# for (l in 1:getLevel(id)) {
x <- dtDepartments[as.list(getIDupto(id,l))]$AADD ; #x %>% print
xx <- dtDepartments[as.list(getIDupto(id,l))]$Organization.fullname %>%
str_trunc(40,ellipsis = "...")
dtDepartments[i, paste0("LEV_", l):= x]
dtDepartments[i, paste0("ORG_", l):= xx]
}
}
dtDepartments[ , pathString:=paste(LEV_1, LEV_2,LEV_3,LEV_4, sep = "/")]
for (i in 1:5) dtDepartments[, pathString := str_replace (pathString, '/NA', "")]
# dtDepartments[IDlevel==0, pathString:="All Public Service"]
# dtDepartments[IDlevel==1, pathString:=AADD]
}
# . Order by Key and Add order number: .I -----
setkeyv(dtDepartments, PSES_ID_COLS)
dtDepartments[, I:= .I]
# .[ remove 999. ie. "I can't find my unit"] -----
# .. Test Uniqueness of names,Org,pathString-----
dtDepartments %>% nrow() # [1] 2404
dtDepartments$Organization %>% unique() %>% length()# [1] 2177
dtDepartments$pathString %>% unique() %>% length()# [1] 2206 2267
dtDepartments$AADD %>% unique() %>% length()# [1] 1997
samePaths <- dtDepartments[, .N, by=pathString][N>1]$pathString
dtDepartments[pathString %in% samePaths]
#. [ order decreasing = T ] ----
# dtDepartments <- dtDepartments[order(Organization, decreasing = T)]
# . Save -----
dtDepartments[,.N]; dtDepartments %>% names
dtDepartments[c(1,.N)]
dtDepartments[LEVEL1ID==83]
fwrite(dtDepartments, "dtDepartments.csv", sep="\t");
fwrite(dtDepartments, "dtDepartments2019.csv", sep="\t");
#dtDepartments <- fread("dtDepartments.csv")
}
#. createPsesDepartments ----
if (F)
createPsesDepartments()
#.................................................... ----
#............................................................... ----
createPsesScores <- function() {
if (F) { # I. Read 2018 /2019 only ----
# strUrl2018local <- "https://www.canada.ca/content/dam/tbs-sct/documents/datasets/2019_PSES_SAFF_%20subset-1_Sous-ensemble-1.csv"
#
# strUrl2018local <-
# "https://www.canada.ca/content/dam/tbs-sct/documents/datasets/Main_9.12.03.b_subset_2.csv"
# strUrl2018local <-
# "https://www.canada.ca/content/dam/tbs-sct/documents/datasets/Main_9.12.03.b_subset_3.csv"
# strUrl2018local <-
# "https://www.canada.ca/content/dam/tbs-sct/documents/datasets/Main_9.12.03.b_subset_4.csv"
# strUrl2018local <- "source-data/2018_PSES_open_dataset_Ensemble_de_données_ouvertes_du_SAFF_2018.csv"
strUrl2018local <- "source-data/Main_9.12.03.b_subset_2.csv"
strUrl2018local <- "source-data/2019_PSES_SAFF_ subset-1_Sous-ensemble-1.csv"
# 1.csv: 49301, 2.csv: 1888191 , 3.csv: 3177825 4.csv: 1804844
#
dtPSES <<- fread(strUrl2018local);
dtPSES %>% dim
dtPSES[LEVEL1ID==83, .N] # 83 200 307
# 32216 63751 32216
dtPSES[LEVEL1ID==83, .N, by=LEVEL2ID] # 83 200 307
# LEVEL2ID N
# <int> <int>
# 1: 0 32272
dtPSES[LEVEL1ID==83 & LEVEL2ID==200 ] # 83 200 307 416
dtPSES[LEVEL1ID==83 & LEVEL2ID==200 & LEVEL3ID==307 ] # 83 200 307 416
dtPSES[LEVEL1ID==83 & LEVEL2ID==200 & LEVEL3ID==307 & LEVEL4ID==416] # 83 200 307 416
setkeyv(dtPSES, PSES_ID_COLS)
dtPSES[as.list(myID0)]
dtPSES[as.list(getIDupto(myID0, 3))][QUESTION=="Q34"]
dtPSES[as.list(getIDupto(myID0, 3))][QUESTION=="Q34"]
myID <- getIDupto(myID0, 3);myID
dtPSES[as.list(myID)]
dtPSES[as.list(c(83,0,0,0,0))]$SURVEYR %>% unique()
dtPSES[as.list(c(83,0,0,0,0)), .(QUESTION,TITLE_E)][1:200] %>% unique()
if (T) { # . Keep 2018 only
dtPSES[ , .N, by = SURVEYR]
dtPSES[SURVEYR == 2018 ] # 1062480:
## dtPSES <<- dtPSES [SURVEYR==2018]
dtPSES[SURVEYR == 2018, .N ] #14878
dtPSES[SURVEYR == 2019, .N ] #17776
}
} else { # II. Read 2011-2018 files ----
# THIS NEEDS TO BE VALIDATED
########################################################### #
# 0.Read dtQmapping ----
########################################################### #
#Question number concordance with past surveys
#https://www.canada.ca/en/treasury-board-secretariat/services/innovation/public-service-employee-survey/2018/question-number-concordance-past-surveys-2018.html
cols <- c("n2018", "n2017","n2017a","n2014","n2011","n2008")
dtQmapping[, (cols):=lapply(.SD, as.integer), .SDcols=cols]
dtQmapping[, (cols):=lapply(.SD, function(x) sprintf("Q%02i",x)), .SDcols=cols]
dtQmapping[, (cols):=lapply(.SD, function(x) ifelse(x=="QNA", NA, x)), .SDcols=cols]
dtQmapping
#. Read 2011-2018 .csv data ----
strUrl2018local <- "source-data/2018_PSES_open_dataset_Ensemble_de_données_ouvertes_du_SAFF_2018.csv"
strUrl2017local <- "source-data/2017_PSES_SAFF_Open_dataset_Ensemble_donnees_ouvertes.csv"
strUrl2014local <- "source-data/2014-results-resultats.csv"
strUrl2011local <- "source-data/2011_results-resultats.csv"
dt2011 <- fread(strUrl2011local); dim(dt2011)# 22 cols
dt2014 <- fread(strUrl2014local); dim(dt2014)# 23 cols
dt2017 <- fread(strUrl2017local); dim(dt2017)# 23 cols
dt2018<- fread(strUrl2018local); dim(dt2018)
#. Remove AGREE column in 2014-2018 .csv data ----
dt2014$AGREE <- NULL
dt2017$AGREE <- NULL
dt2018$AGREE <- NULL
#. Rename nQ column in 2011 so we can merge by it----
setnames(dt2011, "V9", "nQ"); dt2011 %>% names
# . Rename question numbers in each set ----
dt2018$nQ %>% unique # Q01 2312923:
dt2017$nQ %>% unique # A_Q01 3289210:
dt2014$nQ %>% unique# A_Q01 1509724:
dt2011$nQ %>% unique# A_Q01 1069189:
dt2017[, nQ := nQ %>% substring(3)]
dt2014[, nQ := nQ %>% substring(3)]
dt2011[, nQ := nQ %>% substring(3)]
# . * Replace question via mapping ----
dt2011[
dtQmapping,
on = c(nQ = "n2011"),
nQ := n2018
]
dt2014[
dtQmapping,
on = c(nQ = "n2014"),
nQ := n2018
]
dt2017[
dtQmapping,
on = c(nQ = "n2017"),
nQ := n2018
]
# . rbind them together (dt2011-2018) ----
dtPSES <- dt2018 %>% rbind(dt2017) %>% rbind (dt2014) %>% rbind(dt2011, use.names=F)
rm(dt2018); rm(dt2017); rm(dt2014); rm(dt2011);
}
|
cd6d5adacd1a123f73b89ecb3bfdf6967078f948
|
1f617f5a6191411dd5e535be37d8608b031906c4
|
/man/baggr_compare.Rd
|
20279eb5d20a2e674bf836a4c1d266bc977144dd
|
[] |
no_license
|
cran/baggr
|
e63bf49aa3460db5dba567e9514b63b444bdc523
|
bc9c1c88d29eab82f2e623397da4e7d913d7c46c
|
refs/heads/master
| 2023-04-06T22:01:38.614702
| 2023-03-22T09:20:02
| 2023-03-22T09:20:02
| 218,299,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,653
|
rd
|
baggr_compare.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/baggr_compare.R
\name{baggr_compare}
\alias{baggr_compare}
\title{(Run and) compare multiple baggr models}
\usage{
baggr_compare(
...,
what = "pooling",
compare = c("groups", "hyperpars", "effects"),
transform = NULL,
prob = 0.95,
plot = FALSE
)
}
\arguments{
\item{...}{Either some (at least 1) objects of class \code{baggr}
(you should name your objects, see the example below)
or the same arguments you'd pass to \link{baggr}.
In the latter case you must specify \code{what} to compare.}
\item{what}{One of \code{"pooling"} (comparison between no, partial and
full pooling) or \code{"prior"} (comparison between prior and
posterior predictive). If pre-existing baggr models are
passed to \code{...}, this argument is ignored.}
\item{compare}{When plotting, choose between comparison of \code{"groups"}
(default), \code{"hyperpars"} (to omit group-specific estimates)
or (predicted) \code{"effects"}.
The \code{"groups"} option is not available when \code{what = "prior"}.}
\item{transform}{a function (e.g. exp(), log()) to apply to
the the sample of group (and hyper, if \code{hyper=TRUE})
effects before plotting; when working with
effects that are on log scale,
exponent transform is used automatically,
you can plot on log scale by setting
transform = identity}
\item{prob}{Width of uncertainty interval (defaults to 95\%)}
\item{plot}{logical; calls \link{plot.baggr_compare} when running \code{baggr_compare}}
}
\value{
an object of class \code{baggr_compare}
}
\description{
Compare multiple \link{baggr} models by either
providing multiple already existing models as (named) arguments or
passing parameters necessary to run a \link{baggr} model.
}
\details{
If you pass parameters to the function you must specify
what kind of comparison you want, either \code{"pooling"}, which
will run fully/partially/un-pooled models and then compare them,
or \code{"prior"} which will generate estimates without the data
and compare them to the model with the full data. For more
details see \link{baggr}, specifically the \code{ppd} argument.
}
\examples{
\donttest{
# Most basic comparison between no, partial and full pooling
# (This will run the models)
# run model with just prior and then full data for comparison
# with the same arguments that are passed to baggr
prior_comparison <-
baggr_compare(schools,
model = 'rubin',
#this is just for illustration -- don't set it this low normally!
iter = 500,
prior_hypermean = normal(0, 3),
prior_hypersd = normal(0,2),
prior_hypercor = lkj(2),
what = "prior")
# print the aggregated treatment effects
prior_comparison
# plot the comparison of the two distributions
plot(prior_comparison)
# Now compare different types of pooling for the same model
pooling_comparison <-
baggr_compare(schools,
model = 'rubin',
#this is just for illustration -- don't set it this low normally!
iter = 500,
prior_hypermean = normal(0, 3),
prior_hypersd = normal(0,2),
prior_hypercor = lkj(2),
what = "pooling",
# You can automatically plot:
plot = TRUE)
# Compare existing models (you don't have to, but best to name them):
bg1 <- baggr(schools, pooling = "partial")
bg2 <- baggr(schools, pooling = "full")
baggr_compare("Partial pooling model" = bg1, "Full pooling" = bg2)
#' ...or simply draw from prior predictive dist (note ppd=T)
bg1 <- baggr(schools, ppd=TRUE)
bg2 <- baggr(schools, prior_hypermean = normal(0, 5), ppd=TRUE)
baggr_compare("Prior A, p.p.d."=bg1,
"Prior B p.p.d."=bg2,
compare = "effects")
# Compare how posterior predictive effect varies with e.g. choice of prior
bg1 <- baggr(schools, prior_hypersd = uniform(0, 20))
bg2 <- baggr(schools, prior_hypersd = normal(0, 5))
baggr_compare("Uniform prior on SD"=bg1,
"Normal prior on SD"=bg2,
compare = "effects", plot = TRUE)
# Models don't have to be identical. Compare different subsets of input data:
bg1_small <- baggr(schools[1:6,], pooling = "partial")
baggr_compare("8 schools model" = bg1, "First 6 schools" = bg1_small,
plot = TRUE)
}
}
\seealso{
\link{plot.baggr_compare} and \link{print.baggr_compare}
for working with results of this function
}
\author{
Witold Wiecek, Brice Green
}
|
c044a47f6d547f556dad8f4290ce1bb1c8428a21
|
14c5f84de6a6b5bcbb526c4bdc6e2eaac5aff9bd
|
/Youtube_search_results.R
|
3c0c266fe1e9b818d0ca5ed8ad67df72689bbef2
|
[] |
no_license
|
Nachtjagdgeschwader/Youtube_search_scrapping
|
f80c99069b5e02ace8ed3734c2e46dd0a56adfc1
|
ed85b6ada40cd3304b9544cb13f83e8c812d2eb0
|
refs/heads/master
| 2021-09-04T00:41:28.026740
| 2018-01-13T13:20:15
| 2018-01-13T13:20:15
| 85,432,487
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,158
|
r
|
Youtube_search_results.R
|
# First, you need to download PhantomJS headless WebKit from here http://phantomjs.org/download.html
# and extract it to a desired folder
install.packages("RSelenium")
library(RSelenium)
# installing and running the RSelenium package which we will use for driving
# the PhantomJS WebKit
psPath <-
"C:/*/phantomjs-2.1.1-windows/bin/phantomjs.exe"
# provide the full path to phantomjs.exe on your harddrive
pJS <- phantom(pjs_cmd = psPath)
remDr <- remoteDriver(browserName = "phantomjs")
remDr$open()
# start phantomjs driver
URLs <-
read.csv("C:/*/InitialURLsYt.csv",
header = F)$V1
# Here you provide the path to csv file with URLs to search queries results
# with all filters you want (may be copied from YouTube interface once and
# then just populated on all keywords).
# Here a sample InitialURLsYt.csv provided
URLs <- as.character(URLs)
# Stating that URLs varible has string (character) values
datalist1 = list()
datalist2 = list()
# Creating two lists: in datalist1 we will collect URLs to videos with
# a particular keyword; in datalist2 we will collect all resulting lists
# of type datalist1. Afterwads we will combine them into one dataframe.
n=length(URLs)
library(tcltk)
pb <-
tkProgressBar(
title = "Collecting URLs",
min = 0,
max = n,
width = 200
)
for (i in 1:n) {
# Creating a loop which goes through all keywords
tryCatch({
Sys.sleep(0.001)
setTkProgressBar(pb, i, label = paste(round(i / n * 100, 1), "% done"))
# This is only for visual control of the process. We create a progress bar
# which shows the percentage of keywords searches processed.
remDr$navigate(URLs[i])
# Go to the i-th URL in a loop
for (k in 1:100) {
# Here we state how many pages of results we should try to handle. We should specify
# the maximum we have. I.e. if for one keyword 20 pages available, and for the second
# 50 pages available, then we should write "k in 1:49" as the first page doesn't count
# (we're already on in when we call remDr$navigate(URLs[i]))
URLRaw <-
remDr$findElements(
using = 'css',
"a.yt-uix-tile-link.yt-ui-ellipsis.yt-ui-ellipsis-2.yt-uix-sessionlink.spf-link"
)
# Here we specify a css-selector of an URL on a search results page. It is a selector
# of a video title too. To find a unique css selector I recommend to use http://selectorgadget.com/
Attr <- function(x)
{
x$getElementAttribute("href")
}
URL <- mapply(Attr, URLRaw)
# Now we create a function which find a link attribute of each title and apply it to
# all video titles on a page.
data <- data.frame(rbind.fill(URL))
colnames(data) <- c("URL")
# We create a dataframe with all URLs from a page.
datalist1[[k]] <- data
# and put it to a dataframe list.
remDr$findElement(using = 'css',
".yt-uix-button-size-default:nth-child(8) .yt-uix-button-content")$clickElement()
# After collecting the URLs we go to the next page by clicking the selector of the "Next" button.
Sys.sleep(1)
}
}, error = function(e) {
})
datalist2[[i]] = do.call(rbind, datalist1)
# We combine all URLs dataframes for a particular keyword and put it to a list
# of dataframes called datalist2
}
big_data = do.call(rbind, datalist2)
# Combining all dataframes with all URLs into a list
nrow(big_data)
# Optional. Just to see how many URLs we collect
big_data_ready <-
as.data.frame(
big_data,
row.names = NULL,
optional = FALSE,
cut.names = FALSE,
col.names = names(c("URL")),
fix.empty.names = TRUE,
stringsAsFactors = default.stringsAsFactors()
)
# Transforming a resulting list into a dataframe.
write.csv2(
big_data_ready,
"C:/*/ResultURLYt.csv",
col.names = TRUE,
sep = ";"
)
# Saving the resulting dataframe to a csv file. Here you should provide the full path
# to a saving directory. Alternatively use file.choose() instean of path
remDr$close()
# Close PhantomJS.
|
fcc67d4a12c681aa853673bbf58b5e1db6569336
|
5c6fbdd4764df04b72d938afb9227519e1d5e69f
|
/R/wunc.R
|
1987ce906d2e0667a8d942bd5b3a9bf9dd891666
|
[
"MIT"
] |
permissive
|
chainsawriot/nucommit
|
d3cae16a394a27b37d867148984fcdddcd7d533f
|
7ee9cf1556c3d593eb8459387b078c528e4734ee
|
refs/heads/master
| 2021-07-15T00:25:15.055126
| 2020-11-19T14:25:22
| 2020-11-19T14:25:22
| 225,418,499
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,343
|
r
|
wunc.R
|
.clean <- function(input) {
if (all(c("user_id", "created_at", "text") %in% names(input))) {
output <- dplyr::select(input, user_id, created_at, text)
} else {
## POSITIONAL ASSUMPTION
output <- input[,1:3]
names(output) <- c("user_id", "created_at", "text")
}
if (!"POSIXct" %in% class(output$created_at)) {
output$created_at <- lubridate::ymd_hms(output$created_at)
}
return(output)
}
#' Calculate the numbers time series according to Freelon et al.
#'
#' This function calculates the 'numbers' time series from tweets according to Freelon et al. (2019)
#' @param input a data frame, either from rtweet::search_tweets or any one with the three columns meaning user id, timestamp and content.
#' @return A tibble with date and the 'numbers' metric.
#' @importFrom magrittr %>%
#' @references Freelon, D., McIlwain, C., & Clark, M. (2018). Quantifying the power and consequences of social media protest. New Media & Society, 20(3), 990-1011.
#' @export
calculate_numbers <- function(input) {
input <- .clean(input)
input %>% dplyr::mutate(created_at = lubridate::floor_date(created_at, unit = "day")) %>% dplyr::group_by(created_at, user_id) %>% dplyr::count() %>% dplyr::ungroup() %>% dplyr::count(created_at) %>% dplyr::arrange(created_at) %>% dplyr::rename(numbers = "n")
}
#' Calculate the unity time series according to Freelon et al.
#'
#' This function calculates the 'unity' time series from tweets according to Freelon et al. (2019)
#' @param input a data frame, either from rtweet::search_tweets or any one with the three columns meaning user id, timestamp and content.
#' @return A tibble with date and the 'unity' metric.
#' @references Freelon, D., McIlwain, C., & Clark, M. (2018). Quantifying the power and consequences of social media protest. New Media & Society, 20(3), 990-1011.
#' @export
calculate_unity <- function(input) {
input <- .clean(input)
input %>% dplyr::mutate(created_at = lubridate::floor_date(created_at, unit = "day")) %>% dplyr::mutate(hashtags = stringr::str_extract_all(tolower(text), "#[A-Za-z0-9]*")) %>% dplyr::group_by(created_at) %>% dplyr::select(created_at, hashtags) %>% dplyr::group_by(created_at) %>% dplyr::summarise(al = list(unlist = hashtags)) %>% dplyr::mutate(al = purrr::map(al, unlist)) %>% dplyr::mutate(gini = purrr::map_dbl(al, ~reldist::gini(table(.)))) %>% dplyr::arrange(created_at) %>% dplyr::select(created_at, gini) %>% dplyr::rename(unity = "gini")
}
.windowing_count <- function(i, user_hash, after_days = 3) {
current_date <- user_hash[i, 1] %>% dplyr::pull()
current_user <- dplyr::pull(user_hash[i, 2])[[1]]
dplyr::filter(user_hash, created_at > current_date & created_at <= current_date + lubridate::days(after_days)) %>% dplyr::pull(data) %>% dplyr::bind_rows() -> all_users_next_3
if(ncol(all_users_next_3) == 0) {
return(NA)
}
consistent_users <- intersect(current_user$user_id, all_users_next_3$user_id)
ratio <- length(consistent_users) / nrow(current_user)
return(ratio)
}
#' Calculate the commitment time series according to Freelon et al.
#'
#' This function calculates the 'commitment' time series from tweets according to Freelon et al. (2019)
#' @param input a data frame, either from rtweet::search_tweets or any one with the three columns meaning user id, timestamp and content.
#' @param after_days a number to determine what consistents a repeat participation. Default to 3 days (Freelon et al.)
#' @return A tibble with date and the 'unity' metric.
#' @references Freelon, D., McIlwain, C., & Clark, M. (2018). Quantifying the power and consequences of social media protest. New Media & Society, 20(3), 990-1011.
#' @export
calculate_commitment <- function(input, after_days = 3) {
input <- .clean(input)
input %>% dplyr::mutate(created_at = lubridate::floor_date(created_at, unit = "day")) %>% dplyr::group_by(created_at, user_id) %>% dplyr::count() %>% dplyr::select(-n) %>% dplyr::ungroup() %>% dplyr::group_by(created_at) %>% tidyr::nest(data = user_id) -> user_hash
user_hash$commitment <- purrr::map_dbl(1:nrow(user_hash), .windowing_count, user_hash = user_hash, after_days = after_days)
user_hash %>% dplyr::ungroup(created_at) %>% dplyr::select(created_at, commitment) %>% dplyr::arrange(created_at)
}
|
8c2045a13210499bdf7e62296227b5c76d340e8b
|
7f026bc3deee32e4732c13cd318cb32119c7dd69
|
/man/qar.sim.Rd
|
7fdb121f5f1a14406c10c8b80c1368b0f4be19f9
|
[] |
no_license
|
cran/TSA
|
109803777566ded77104af3a01e288c749daa97b
|
5050db06a645f31f2a37ac81a90fc5d2c590a25c
|
refs/heads/master
| 2022-07-28T07:23:53.254418
| 2022-07-05T10:36:22
| 2022-07-05T10:36:22
| 17,693,886
| 1
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,306
|
rd
|
qar.sim.Rd
|
\name{qar.sim}
\alias{qar.sim}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Simulate a first-order quadratic AR model}
\description{
Simulates a first-order quadratic AR model with normally distributed noise.
}
\usage{
qar.sim(const = 0, phi0 = 0, phi1 = 0.5, sigma = 1, n = 20, init = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{const}{intercept}
\item{phi0}{coefficient of the lag 1}
\item{phi1}{coefficient of the squared lag 1}
\item{sigma}{noise standard deviation}
\item{n}{sample size}
\item{init}{number of burn-in values}
}
\details{
The quadratic AR(1) model specifies that
\deqn{Y_t = \mathrm{const}+\phi_0 Y_{t-1}+\phi_1 Y^2_{t-1}+e_t}
where \eqn{e_t} are iid normally distributed with zero mean and standard
deviation \eqn{\sigma}. If \eqn{\sigma=0}, the model is deterministic.
}
\value{A simulated series from the quadratic AR(1) model, as a vector}
\author{Kung-Sik Chan}
\seealso{ \code{\link{tar.sim}}}
\examples{
set.seed(1234567)
plot(y=qar.sim(n=15,phi1=.5,sigma=1),x=1:15,type='l',ylab=expression(Y[t]),xlab='t')
y=qar.sim(n=100,const=0.0,phi0=3.97, phi1=-3.97,sigma=0,init=.377)
plot(y,x=1:100,type='l',ylab=expression(Y[t]),xlab='t')
acf(y,main='')
}
\keyword{methods}
|
6971962079d81bb8d2fe4c475a092df835b70b25
|
c485c6c823f09b5b23d3f7f0e3caa40332101ba1
|
/inst/RunScripts/Run_MQ_QuantTwoGroupAnalysis.R
|
6f52d3a3e2e4fe1a80cd2a7b5b99b52ec8fc6182
|
[] |
no_license
|
LucasKook/SRMService
|
e6d208dcfe09d7dacb950d24a4ddb7bd8751a121
|
dd2aa9f952a4ebd6706627d3e8a1b8cca5ce4a57
|
refs/heads/master
| 2022-02-21T08:46:48.792797
| 2019-01-29T13:01:01
| 2019-01-29T13:01:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,405
|
r
|
Run_MQ_QuantTwoGroupAnalysis.R
|
#
# If you are going to use results produced by the scripts please do cite the
# SRMSerivce R package by providing the following URL
# www.github.com/protViz/SRMService
# by W.E. Wolski, J. Grossmann, C. Panse
#
rm(list=ls())
library(limma)
library(SRMService)
### Protein groups file
packagedir <- path.package("SRMService")
proteinGroupsFile <- file.path(packagedir, "samples/proteinGroups/proteinGroups.txt")
###
protein <- readr::read_tsv(proteinGroupsFile)
colnames(protein) <- make.names(colnames(protein))
tmp <- cumsum(rev(table(protein$Peptides)))
barplot(tmp[(length(tmp)-5):length(tmp)],ylim=c(0, length(protein$Peptides)),xlab='nr of proteins with at least # peptides')
rawF <- gsub("Intensity\\.", "", grep("Intensity\\.",colnames(protein),value=T) )
condition <- quantable::split2table(rawF)[,3]
annotation <- data.frame(Raw.file = rawF,
Condition = condition,
BioReplicate = paste("X",1:length(condition),sep=""),
Run = 1:length(condition),
IsotopeLabelType = rep("L",length(condition)),
stringsAsFactors = F)
###################################
### Configuration section
resultdir <- "output"
dir.create(resultdir)
#fix(annotation)
Experimentname = ""
nrNas = sum(!is.na(annotation$Condition)) - 1
nrNas = 5
nrPeptides = 2
reference=unique(annotation$Condition)[1]
reference="WT"
qvalueThreshold = 0.05
qfoldchange =1
write.table(annotation, file=file.path(resultdir, "annotationused.txt"))
####### END of user configuration ##
# source("R/Grp2Analysis.R")
grp2 <- Grp2Analysis(annotation, "Experimentname",
maxNA=nrNas,
nrPeptides=nrPeptides,
reference=reference,
numberOfProteinClusters = 20
)
grp2$setMQProteinGroups(protein)
grp2$setQValueThresholds(qvalue = qvalueThreshold,qfoldchange = qfoldchange)
mqQuantMatrixGRP2 <- grp2
head(mqQuantMatrixGRP2$getModPValuesCI())
usethis::use_data(mqQuantMatrixGRP2, overwrite = TRUE)
#readr::write_tsv(grp2$getResultTable(), path=file.path(resultdir,"pValues.csv"))
## REMOVE TO RENDER
# rmarkdown::render("vignettes/Grp2AnalysisHeatmap3.Rmd",bookdown::pdf_document2(), params=list(grp = grp2))
# rmarkdown::render("vignettes/Grp2Analysis.Rmd",bookdown::pdf_document2(), params=list(grp = grp2))
|
189bdcae676249f5ffd413b7ee306fa41d584bc7
|
b5cf7ad90c793fa7b26e075d85040b3421dceaaf
|
/plotconhadly.r
|
794573548eff00f95060e814ade7d8ed60063396
|
[] |
no_license
|
apsteinmetz/test
|
57915576b47c0a6a099f959ae9176f1e745658fa
|
5f75bfa064b6b4fe1a496ec95b4b4456e122bf87
|
refs/heads/master
| 2021-01-20T13:59:01.283202
| 2018-09-28T20:15:38
| 2018-09-28T20:15:38
| 82,726,456
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 952
|
r
|
plotconhadly.r
|
# hadley's plotcon 2016 talk
library(tidyverse)
library(gapminder)
library(dplyr)
gapminder<-gapminder%>%mutate(year1950=year-1950)
by_country<-gapminder %>% group_by(continent,country) %>% nest()
country_model <- function(df) {
lm(lifeExp ~ year1950, data=df)
}
models <-by_country %>% mutate(model = map(data,country_model))
models<- models %>%
mutate(
glance = model %>% map(broom::glance),
rsq = glance %>% map_dbl('r.squared'),
tidy = model %>% map(broom::tidy),
augment = model %>% map(broom::augment)
)
models %>% ggplot(aes(rsq,reorder(country,rsq)))+geom_point(aes(colour=continent))
ggd<-unnest(models,tidy)%>%select(continent,country,term,estimate,rsq)%>%spread(term,estimate)
gg<-ggd%>%ggplot(aes(`(Intercept)`,year1950))
gg<-gg+geom_point(aes(colour=continent,size=rsq))
gg<-gg+xlab('Life Expectancy 1950')+ylab('Yearly Improvement')
gg<-gg+geom_smooth()
gg<-gg+scale_size_area()
gg
|
3d941da02d8e7af452216a5c9feef90d314e6c73
|
996790634a55078a7d1469286e54b7dff5b23b31
|
/man/pit_sample.Rd
|
8e896ce8fbf80fe14e641904d3d11d9b1369a940
|
[
"MIT"
] |
permissive
|
epiforecasts/scoringutils
|
d811ff7dc30e0f4ba6639ed6a42897793fe67b54
|
b7090104e0736f2f59a2b9771171a256f3af3514
|
refs/heads/main
| 2023-08-08T22:48:48.957717
| 2023-07-28T10:19:06
| 2023-07-28T10:19:06
| 240,501,300
| 32
| 13
|
NOASSERTION
| 2023-09-06T13:12:03
| 2020-02-14T12:16:42
|
R
|
UTF-8
|
R
| false
| true
| 4,136
|
rd
|
pit_sample.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pit.R
\name{pit_sample}
\alias{pit_sample}
\title{Probability Integral Transformation (sample-based version)}
\usage{
pit_sample(true_values, predictions, n_replicates = 100)
}
\arguments{
\item{true_values}{A vector with the true observed values of size n}
\item{predictions}{nxN matrix of predictive samples, n (number of rows) being
the number of data points and N (number of columns) the number of Monte
Carlo samples. Alternatively, predictions can just be a vector of size n.}
\item{n_replicates}{the number of draws for the randomised PIT for
integer predictions.}
}
\value{
A vector with PIT-values. For continuous forecasts, the vector will
correspond to the length of \code{true_values}. For integer forecasts, a
randomised PIT will be returned of length
\code{length(true_values) * n_replicates}
}
\description{
Uses a Probability Integral Transformation (PIT) (or a
randomised PIT for integer forecasts) to
assess the calibration of predictive Monte Carlo samples. Returns a
p-values resulting from an Anderson-Darling test for uniformity
of the (randomised) PIT as well as a PIT histogram if specified.
}
\details{
Calibration or reliability of forecasts is the ability of a model to
correctly identify its own uncertainty in making predictions. In a model
with perfect calibration, the observed data at each time point look as if
they came from the predictive probability distribution at that time.
Equivalently, one can inspect the probability integral transform of the
predictive distribution at time t,
\deqn{
u_t = F_t (x_t)
}
where \eqn{x_t} is the observed data point at time \eqn{t \textrm{ in } t_1,
…, t_n}{t in t_1, …, t_n}, n being the number of forecasts, and \eqn{F_t} is
the (continuous) predictive cumulative probability distribution at time t. If
the true probability distribution of outcomes at time t is \eqn{G_t} then the
forecasts \eqn{F_t} are said to be ideal if \eqn{F_t = G_t} at all times t.
In that case, the probabilities \eqn{u_t} are distributed uniformly.
In the case of discrete outcomes such as incidence counts,
the PIT is no longer uniform even when forecasts are ideal.
In that case a randomised PIT can be used instead:
\deqn{
u_t = P_t(k_t) + v * (P_t(k_t) - P_t(k_t - 1) )
}
where \eqn{k_t} is the observed count, \eqn{P_t(x)} is the predictive
cumulative probability of observing incidence k at time t,
\eqn{P_t (-1) = 0} by definition and v is standard uniform and independent
of k. If \eqn{P_t} is the true cumulative
probability distribution, then \eqn{u_t} is standard uniform.
The function checks whether integer or continuous forecasts were provided.
It then applies the (randomised) probability integral and tests
the values \eqn{u_t} for uniformity using the
Anderson-Darling test.
As a rule of thumb, there is no evidence to suggest a forecasting model is
miscalibrated if the p-value found was greater than a threshold of p >= 0.1,
some evidence that it was miscalibrated if 0.01 < p < 0.1, and good
evidence that it was miscalibrated if p <= 0.01. However, the AD-p-values
may be overly strict and there actual usefulness may be questionable.
In this context it should be noted, though, that uniformity of the
PIT is a necessary but not sufficient condition of calibration.
}
\examples{
data.table::setDTthreads(1) # only needed to avoid issues on CRAN
## continuous predictions
true_values <- rnorm(20, mean = 1:20)
predictions <- replicate(100, rnorm(n = 20, mean = 1:20))
pit <- pit_sample(true_values, predictions)
plot_pit(pit)
## integer predictions
true_values <- rpois(50, lambda = 1:50)
predictions <- replicate(2000, rpois(n = 50, lambda = 1:50))
pit <- pit_sample(true_values, predictions, n_replicates = 30)
plot_pit(pit)
}
\references{
Sebastian Funk, Anton Camacho, Adam J. Kucharski, Rachel Lowe,
Rosalind M. Eggo, W. John Edmunds (2019) Assessing the performance of
real-time epidemic forecasts: A case study of Ebola in the Western Area
region of Sierra Leone, 2014-15, \doi{10.1371/journal.pcbi.1006785}
}
\seealso{
\code{\link[=pit]{pit()}}
}
\keyword{metric}
|
9e114ed4c80154d68220476a731bc78cf8205530
|
f624abe47a57527cb58f4d34c482709a4961f949
|
/inst/shiny-metabase/components/main/body.R
|
c948ca9cb216bc021cdbb500d8072e31f0c6d127
|
[] |
no_license
|
zhuchcn/ShinyMetabase
|
ffdcd852844e7fcbb6cafb6ee6c58f2b342b3e90
|
8a61bb851132876977cb551e0241920cc9d75dd9
|
refs/heads/master
| 2020-04-27T15:07:48.624245
| 2019-03-18T17:45:05
| 2019-03-18T17:45:05
| 174,434,028
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,299
|
r
|
body.R
|
import::here(TabOverview, .from="../overview/overview.R")
import::here(TabNormality, .from="../normality/normality.R")
import::here(TabUnivariate, .from="../univariate/univariate.R")
import::here(TabMultivariate, .from="../multivariate/multivariate.R")
import::here(NetworkTuning, .from="../network/NetworkTuning.R")
import::here(NetworkVisual, .from="../network/NetworkVisual.R")
Body = R6Class(
"Body",
inherit = ShinyModule,
public = list(
tabOverview = NULL,
tabQC = NULL,
tabNormality = NULL,
tabUnivariate = NULL,
tabMultivariate = NULL,
tabNetworkTuning = NULL,
tabNetworkVisual = NULL,
initialize = function(){
self$tabOverview = TabOverview$new()
self$tabNormality = TabNormality$new()
self$tabUnivariate = TabUnivariate$new()
self$tabMultivariate = TabMultivariate$new()
self$tabNetworkTuning = NetworkTuning$new()
self$tabNetworkVisual = NetworkVisual$new()
},
ui = function(){
dashboardBody(
tags$link(href="style.css", rel = "stylesheet"),
tags$script(src="script.js"),
tags$script(src="cytoscape/bundle.js"),
shinyjs::useShinyjs(),
fluidRow(
tabItems(
tabItem( tabName = "tab_overview", self$tabOverview$ui()),
tabItem( tabName = "tab_normality", self$tabNormality$ui()),
tabItem( tabName = "tab_univariate", self$tabUnivariate$ui() ),
tabItem( tabName = "tab_multivariate", self$tabMultivariate$ui() ),
tabItem( tabName = "tab_network_tuning", self$tabNetworkTuning$ui() ),
tabItem( tabName = "tab_network_visual", self$tabNetworkVisual$ui() )
)
)
)
},
server = function(input, output, session){
states = self$tabOverview$call()
observe({
if(is.null(states$data())) {
session$sendCustomMessage("dataNotLoaded", list(
tabs = c(
"tab_normality",
"tab_univariate",
"tab_multivariate",
"tab_network"
)
))
} else {
session$sendCustomMessage("dataLoaded", list(
tabs = c(
"tab_normality",
"tab_univariate",
"tab_multivariate",
"tab_network"
)
))
norm_data = self$tabNormality$call(props = states)
stats_data = self$tabUnivariate$call(props = norm_data)
self$tabMultivariate$call(props = reactiveValues(
data = norm_data$data, statsData = stats_data
))
self$tabNetworkTuning$call(props = norm_data)
self$tabNetworkVisual$call(props = norm_data)
}
})
}
)
)
|
2d3914f2fbc4eec8f561c5809f9068d428918181
|
cc5091d91fa7e614f45fb9b83b4dd7e9c96710e3
|
/man/roc_neat.Rd
|
438fe0d20ca3131732d2220b09c33f7982882a21
|
[
"BSD-2-Clause"
] |
permissive
|
gasparl/neatstats
|
814aeba4aee20c70f6f671953e870f47e2aa38aa
|
22171ed04211c8136e3e7532e56ca664194d5f35
|
refs/heads/master
| 2022-12-10T13:32:08.363769
| 2022-12-07T10:33:45
| 2022-12-07T10:33:45
| 187,226,036
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,598
|
rd
|
roc_neat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/roc_neat.R
\name{roc_neat}
\alias{roc_neat}
\title{Difference of Two Areas Under the Curves}
\usage{
roc_neat(
roc1,
roc2 = NULL,
pair = FALSE,
greater = NULL,
ci = NULL,
hush = FALSE,
plot_rocs = FALSE,
roc_labels = "",
cutoff_auto = TRUE,
cutoff_custom = NULL
)
}
\arguments{
\item{roc1}{Receiver operating characteristic (ROC) \code{\link[pROC:roc]{
object}}, or, for plotting only, a \code{\link{list}} including any number
of such ROC objects.}
\item{roc2}{Receiver operating characteristic (ROC) \code{\link[pROC:roc]{
object}}, or, for plotting only, leave it as \code{NULL} (default) and
provide list for the first parameter (\code{roc1}).}
\item{pair}{Logical. If \code{TRUE}, the test is conducted for paired samples.
Otherwise (default) for independent samples.}
\item{greater}{\code{NULL} or string (or number); optionally specifies
one-sided test: either "1" (\code{roc1} AUC expected to be greater than
\code{roc2} AUC) or "2" (\code{roc2} AUC expected to be greater than
\code{roc2} AUC). If \code{NULL} (default), the test is two-sided.}
\item{ci}{Numeric; confidence level for the returned CIs (raw difference).}
\item{hush}{Logical. If \code{TRUE}, prevents printing any details to console
(and plotting).}
\item{plot_rocs}{Logical. If \code{TRUE}, plots and returns ROC curves.}
\item{roc_labels}{Optional character vector to provide legend label texts (in
the order of the provided ROC objects) for the ROC plot.}
\item{cutoff_auto}{Logical. If \code{TRUE} (default), optimal cutoffs
on the ROC plots are displayed.}
\item{cutoff_custom}{Custom cutoff to be indicated on the plot can be given
here in a \code{list}. The list index must exactly correspond to the index
of the list index of the AUC (given in \code{roc1}) for which the given
cutoff is intended.}
}
\value{
Prints DeLong's test results for the comparison of the two given AUCs
in APA style, as well as corresponding CI for the AUC difference.
Furthermore, when assigned, returns a list with \code{stat} (D value),
\code{p} (p value), and, when plot is added, ROC plot.
}
\description{
Comparison of two \code{\link[neatStats:t_neat]{areas under the
receiver operating characteristic curves}} (AUCs) and plotting any number of
ROC curves.
}
\note{
The main test statistics are calculated via
\code{\link[pROC:roc.test]{pROC::roc.test}} as DeLong's test (for both
paired and unpaired). The \code{roc_neat} function merely prints it in APA
style. The CI is calculated based on the p value, as described by Altman and
Bland (2011).
The ROC object may be calculated via \code{\link{t_neat}}, or directly with
\code{\link[pROC:roc]{pROC::roc}}.
}
\examples{
# calculate first AUC (from v1 and v2)
v1 = c(191, 115, 129, 43, 523,-4, 34, 28, 33,-1, 54)
v2 = c(4,-2, 23, 13, 32, 16, 3, 29, 37,-4, 65)
results1 = t_neat(v1, v2, auc_added = TRUE)
# calculate second AUC (from v3 and v4)
v3 = c(14.1, 58.5, 25.5, 42.2, 13, 4.4, 55.5, 28.5, 25.6, 37.1)
v4 = c(36.2, 45.2, 41, 24.6, 30.5, 28.2, 40.9, 45.1, 31, 16.9)
results2 = t_neat(v3, v4, auc_added = TRUE)
# one-sided comparison of the two AUCs
roc_neat(results1$roc_obj, results2$roc_obj, greater = "1")
# create a list of randomlz generated AUCs
set.seed(1)
aucs_list = list()
for (i in 1:4) {
aucs_list[[i]] = t_neat(rnorm(50, (i-1)),
rnorm(50),
auc_added = TRUE,
hush = TRUE)$roc_obj
}
# depict AUCs (recognized as list)
roc_neat(aucs_list)
\donttest{
# with custom cutoffs depicted
roc_neat(aucs_list,
cutoff_custom = list(0.2),
cutoff_auto = FALSE)
roc_neat(aucs_list,
cutoff_custom = list(.1, c(-.5, 0), NULL, c(.7, 1.6)),
cutoff_auto = FALSE)
roc_neat(aucs_list,
cutoff_custom = list(.6, NULL, NULL, 1.1))
}
}
\references{
Altman, D. G., & Bland, J. M. (2011). How to obtain the confidence interval
from a P value. Bmj, 343(d2090). \doi{https://doi.org/10.1136/bmj.d2090}
DeLong, E. R., DeLong, D. M., & Clarke-Pearson, D. L. (1988). Comparing the
areas under two or more correlated receiver operating characteristic curves: a
nonparametric approach. Biometrics, 44(3), 837-845.
\doi{https://doi.org/10.2307/2531595}
Robin, X., Turck, N., Hainard, A., Tiberti, N., Lisacek, F., Sanchez, J. C., &
Muller, M. (2011). pROC: an open-source package for R and S+ to analyze and
compare ROC curves. BMC bioinformatics, 12(1), 77.
\doi{https://doi.org/10.1186/1471-2105-12-77}
}
\seealso{
\code{\link{t_neat}}
}
|
41561dce58b7f8479058bf72b4aa99ba53ef0c86
|
88481c995db9fcfa8e0fdc576ebaea210e289c9e
|
/code/referencePanel/calcPrecisionTPM_NoIsoFilt_MLE.R
|
ccb82d1ce3601c2f227afc00ce4b479062511c0f
|
[] |
no_license
|
mccabes292/actorPaper
|
9612e032b6bbe15f3f3710ed2af6185dcd086929
|
d204419c1c1f8dda7193ec44ce928833b5876c8b
|
refs/heads/master
| 2020-09-12T04:16:52.304561
| 2019-11-23T04:38:29
| 2019-11-23T04:38:29
| 222,301,991
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,090
|
r
|
calcPrecisionTPM_NoIsoFilt_MLE.R
|
library(readr)
library(magrittr)
library(DirichletMultinomial)
args=commandArgs(TRUE)
index=as.numeric(args[1])
calcDMMLE=function(gene){
print(gene)
ctsTemp=gtex[gtex$gene_id==gene,]
cs=colSums(ctsTemp[,-(1:2)])
ctsTemp=ctsTemp[,c(TRUE,TRUE,cs!=0)]
fit=DirichletMultinomial::dmn(t(ctsTemp[,-(1:2)]),k=1,verbose=TRUE )
est=fit@fit$Estimate
rownames(est)=(ctsTemp$feature_id)
return(est)
}
sampDat=read_tsv("/proj/milovelab/mccabe/proj/GTEx/data/GTEx_v7_Annotations_SampleAttributesDS.txt")
colN=read_table("/proj/milovelab/mccabe/proj/GTEx/data/colNames.txt")
colN2=unlist(colN[-(1:2),1])
matchCol=sampDat$SAMPID%in%colN2
strp=function(x) substr(x,1,15)
sampDatFull=sampDat[matchCol,]
list1=names(table(sampDatFull$SMTSD))
tissueType=list1[index]
sampDatRed=sampDatFull[sampDatFull$SMTSD==tissueType,]
dim(sampDatRed)
print(tissueType)
tissueName=paste(strsplit(gsub("-"," ",tissueType),"\\s+")[[1]],collapse="_")
print(tissueName)
#isoList=read.table("/proj/milovelab/mccabe/proj/GTEx/data/GTExSuffExprIsos.txt")
#isoList2=unlist(isoList)
gtex=read_tsv("/proj/milovelab/mccabe/proj/GTEx/data/GTExScaleTPM.txt")
dim(gtex)
gtex=gtex[,c("transcript_id","gene_id",sampDatRed$SAMPID) ]
dim(gtex)
intercept=data.frame("sample_id"=sampDatRed$SAMPID, "int"=rep(1,nrow(sampDatRed)))
#Find precision estimates using DRIMSeq
cn=colnames(gtex)
cn[1]="feature_id"
gtex=data.frame(gtex)
colnames(gtex)=cn
gtex=gtex[,c(1,2,3:ncol(gtex))]
dim(gtex)
rs=apply(gtex[,-(1:2)],1,sum)
gtex=gtex[rs>0,]
dim(gtex)
gene.cts <-rowsum(gtex[,-(1:2)], gtex$gene_id)
gene.cts.sum=apply(gene.cts>10,1,sum)
gtex=gtex[gtex$gene_id%in%names(gene.cts.sum[gene.cts.sum>0.7*nrow(intercept)]),]
numGenes=table(gtex$gene_id)
keepGenes=names(numGenes)[numGenes>1]
gtex=gtex[gtex$gene_id%in%keepGenes,]
geneList=unique(gtex$gene_id)
precVal=lapply(geneList,calcDMMLE)
precDF=data.frame("gene_id"=gtex$gene_id,"feature_id"=gtex$feature_id,"prec"=unlist(precVal))
write_delim(precDF,paste("/proj/milovelab/mccabe/proj/GTEx/data/precisionNoIsoFilt_MLE/",tissueName,"AlphaNoIsoFilt.txt",sep=""),delim="\t")
|
339b4e29fb88d54dd864f2961eed083f65dc9200
|
3877ee02e7deec476c64901c474a24ad56dcd431
|
/R/meta.retrieval.all.R
|
ba7e5fe168e5fc7c36b3a7586b4431281bddaac9
|
[] |
no_license
|
ropensci/biomartr
|
282d15b64b1d984e3ff8d7d0e4c32b981349f8ca
|
e82db6541f4132d28de11add75c61624644f6aa1
|
refs/heads/master
| 2023-09-04T09:40:15.481115
| 2023-08-28T15:56:25
| 2023-08-28T15:56:25
| 22,648,899
| 171
| 34
| null | 2023-09-14T12:28:02
| 2014-08-05T15:34:55
|
R
|
UTF-8
|
R
| false
| false
| 3,345
|
r
|
meta.retrieval.all.R
|
#' @title Perform Meta-Genome Retrieval of all organisms in all kingdoms of life
#' @description Download genomes, proteomes, cds, gff, rna, or assembly stats
#' files of individual species of all kingdoms of life.
#' @param db a character string specifying the database from which the genome
#' shall be retrieved:
#' \itemize{
#' \item \code{db = "refseq"}
#' \item \code{db = "genbank"}
#' \item \code{db = "emsembl"}
#' \item \code{db = "ensemblgenomes"}
#' }
#' @param type type of sequences that shall be retrieved. Options are:
#' \itemize{
#' \item \code{type = "genome"} :
#' for genome assembly retrieval; see also \code{\link{getGenome}}),
#' \item \code{type = "proteome"} :
#' (for proteome retrieval; see also \code{\link{getProteome}}),
#' \item \code{type = "cds"} :
#' (for coding sequence retrieval; see also \code{\link{getCDS}}),
#' \item \code{type = "gff"} :
#' (for annotation file retrieval in gff format; see also \code{\link{getGFF}}),
#' \item \code{type = "gtf"} :
#' (for annotation file retrieval in gtf format
#' (only for ensembl and ensemblgenomes); see also \code{\link{getGTF}}),
#' \item \code{type = "rna"} :
#' (for RNA file retrieval in fasta format; see also \code{\link{getRNA}}),
#' \item \code{type = "rm"} :
#' (for Repeat Masker output file retrieval; see also
#' \code{\link{getRepeatMasker}}),
#' \item \code{type = "assemblystats"} (for genome assembly quality stats
#' file retrieval; see also \code{\link{getAssemblyStats}}).
#' }
#' @param reference a logical value indicating whether or not a genome shall be downloaded if it isn't marked in the database
#' as either a reference genome or a representative genome. Options are:
#' \itemize{
#' \item \code{reference = FALSE} (Default): all organisms (reference, representative, and non-representative genomes) are downloaded.
#' \item \code{reference = TRUE}: organisms that are downloaded must be either a reference or representative genome. Thus, most genomes which are usually non-reference genomes
#' will not be downloaded.
#' }
#' @author Hajk-Georg Drost
#' @details This function aims to perform bulk retrieval of all genomes
#' of species for all kingdoms of life.
#' @examples
#' \dontrun{
#' # download all genomes from refseq
#' meta.retrieval.all(db = "refseq", type = "genome")
#' # download all vertebrate genomes from genbank
#' meta.retrieval.all(db = "genbank", type = "genome")
#' # download all vertebrate genomes from ensemblgenomes
#' meta.retrieval.all(db = "genbank", type = "ensemblgenomes")
#' }
#' @return a character vector storing the file paths of the retrieved files.
#' @seealso \code{\link{meta.retrieval}}
#' @export
meta.retrieval.all <- function(db = "refseq", type = "genome", reference = FALSE) {
message("Starting ", type, " meta retrieval process of all species individually from database: ", db," ...")
# retrieve all genomes from all kingdoms of life
paths <- unlist(lapply(getKingdoms(db = db),
function(x) meta.retrieval(x, type = type,
db = db,
group = NULL,
reference = reference)))
message("Meta retrieval process... finished!")
return(paths)
}
|
257f84ff46073ded29e7d62d1e83f657bb89bc07
|
32fbef51ecce23ecfe421ec8055c94e8170272a6
|
/man/getWRFDataDir.Rd
|
8bc88c542dcafea610b3fec7615b755f1838d2bc
|
[] |
no_license
|
MazamaScience/AirFireWRF
|
b10fcedfe6a3161eb95aa5c422edd8762be515e9
|
44c97372d491c49032630e3b112dba7d3c04a765
|
refs/heads/master
| 2023-01-08T17:07:24.743029
| 2020-11-03T22:11:44
| 2020-11-03T22:11:44
| 282,019,617
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 359
|
rd
|
getWRFDataDir.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AirFireWRF.R
\name{getWRFDataDir}
\alias{getWRFDataDir}
\title{Get package data directory}
\usage{
getWRFDataDir()
}
\value{
Absolute path string.
}
\description{
Returns the package data directory where model data is located.
}
\seealso{
\link{WRFDataDir}
\link{setWRFDataDir}
}
|
0e7eeb40ad042b9db5a11ab245d12b49dbeb7ecf
|
2099a2b0f63f250e09f7cd7350ca45d212e2d364
|
/AI-Dataset/Summary_rnd/S0004370217301376.xml.A.R
|
a21a03bbc18fa4da598b45d1b1f11fe774d001de
|
[] |
no_license
|
Angela7126/SLNSumEval
|
3548301645264f9656b67dc807aec93b636778ef
|
b9e7157a735555861d2baf6c182e807e732a9dd6
|
refs/heads/master
| 2023-04-20T06:41:01.728968
| 2021-05-12T03:40:11
| 2021-05-12T03:40:11
| 366,429,744
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,642
|
r
|
S0004370217301376.xml.A.R
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:8, WORD_NUM:153">
</head>
<body bgcolor="white">
<a href="#0" id="0">But we now can use the semantics of speculative knowledge.In order to prove that {a mathematical formula}(M′,s′)⊨KiSφ, we assume that {a mathematical formula}t′∈Ri′(s′) and that {a mathematical formula}R′:(M′,t′)↔_Ai′(s′)(M″,t″), and we have to show that {a mathematical formula}(M″,t″)⊨φ.From {a mathematical formula}R:(M,s)↔_Ai(s)(M′,s′), {a mathematical formula}t′∈Ri′(s′), {a mathematical formula}Ai(s)=Ai′(s′), and back follows that there is a {a mathematical formula}t∈Ri(s) such that {a mathematical formula}(t,t′)∈R[Ai(s)].</a>
<a href="#1" id="1">Other relations between the non-propositional primitives are discussed in Section 5 on expressivity, e.g., we have that {a mathematical formula}Aiφ↔KiE(φ∨¬φ).</a>
<a href="#2" id="2">86], shown here as Fig. 11.</a>
<a href="#3" id="3">Logics over the class of all epistemic awareness models</a>
<a href="#4" id="4">We only consider awareness generated by propositional variables.</a>
<a href="#5" id="5">Assume that the accessibility relation is serial, transitive, and euclidean, i.e. {a mathematical formula}KD45.</a>
<a href="#6" id="6">As before (Proposition 46, Fig. 7), a distinguishing formula is {a mathematical formula}Aip, true in {a mathematical formula}(N1,w) and false in {a mathematical formula}(N1′,w′).</a>
<a href="#7" id="7">□ {a mathematical formula}L□≻LE{a mathematical formula}L(□,A)is more expressive than{a mathematical formula}L(KE,A).</a>
</body>
</html>
|
6b4b431dca5288383f49918d9707a162df093e86
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/isotone/examples/weighted.median.Rd.R
|
3da877dc1eac4801b173b050540b7811b1dfdc91
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 208
|
r
|
weighted.median.Rd.R
|
library(isotone)
### Name: weighted.median
### Title: Weighted Median
### Aliases: weighted.median
### Keywords: models
### ** Examples
y <- 1:9
w <- c(rep(1,5), rep(2,4))
res <- weighted.median(y, w)
|
ae7f387742102d11befe0f98c76bd79d7cac18bb
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/9369_0/rinput.R
|
7e554b3e66aab86858b8070aaf82ea0e424bd250
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("9369_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9369_0_unrooted.txt")
|
4dbdb772bf5ecbcdbbf61ae75606bbfef828928d
|
6ce79966b1b89de1a6d6eb29cea945188c18652c
|
/R/algorithms__MovingKnots_MCMC.R
|
c906d525cf408cc6b330001367ff38a91ee752dd
|
[] |
no_license
|
feng-li/movingknots
|
d3041a0998f0873459814a09e413c714fff700c6
|
5f921070e4cd160a831c5191255f88dd7d4c850c
|
refs/heads/master
| 2021-06-10T00:18:57.172246
| 2021-03-22T05:56:44
| 2021-03-22T05:56:44
| 145,708,629
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,316
|
r
|
algorithms__MovingKnots_MCMC.R
|
#' MCMC for movingknots.
#'
#' Details are available in the paper.
#' @param gradhess.fun.name NA
#' @param logpost.fun.name NA
#' @param nIter NA
#' @param Params NA
#' @param Params4Gibbs NA
#' @param Params.sub.struc NA
#' @param Y NA
#' @param x0 NA
#' @param splineArgs NA
#' @param priorArgs NA
#' @param Params_Transform NA
#' @param propMethods NA
#' @param algArgs NA
#' @param crossvalid.struc NA
#' @param OUT.Params NA
#' @param OUT.accept.probs NA
#' @param burn.in NA
#' @param LPDS.sampleProp NA
#' @param track.MCMC NA
#' @return NA
#' @export
MovingKnots_MCMC <- function(gradhess.fun.name,
logpost.fun.name,
nIter,
Params,
Params4Gibbs,
Params.sub.struc,
Y,
x0,
splineArgs,
priorArgs,
Params_Transform,
propMethods,
algArgs,
crossvalid.struc,
OUT.Params,
OUT.accept.probs,
burn.in,
LPDS.sampleProp,
track.MCMC)
{
##----------------------------------------------------------------------------------------
## Update Knots locations (subsets), shrinkage and covariance jointly
##----------------------------------------------------------------------------------------
Running.date <- Sys.time() # The stating time
Start.Time <- proc.time() # The CPU time
cat("Updating Knots, Shrinkages, and Covariance >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n")
MCMCPropFun <- function(iCross)
{
## Training sample
Y.iCross <- Y[crossvalid.struc$training[[iCross]], , drop = FALSE]
x.iCross <- x0[crossvalid.struc$training[[iCross]], , drop = FALSE]
for(iIter in 1:nIter) # loop nIter times
{
## a <- proc.time()
for (iPar in Params4Gibbs) # loop over parameters
{
for(iSub in 1:length(Params.sub.struc[[iPar]])) # update subsets
{
Sub4iPar <- Params.sub.struc[[iPar]][[iSub]]
if(!is.null(Sub4iPar)) # update if subsets are all fixed
{
param.cur <- matrix(Params[[iPar]][Sub4iPar])
algArgs.cur = algArgs[[iPar]]
## Special case to pass stepsize sequence.
if(tolower(propMethods[[iPar]]) %in% c("sgld"))
{
nInner = length(algArgs.cur[["stepsizeSeq"]]) / nIter
algArgs.cur[["stepsizeSeq"]] = (algArgs.cur[["stepsizeSeq"]]
[((iIter - 1)* nInner + 1):(iIter * nInner)])
}
out.iSub <- MHPropMain(param.cur = param.cur,
gradhess.fun.name = gradhess.fun.name,
logpost.fun.name = logpost.fun.name,
Params = Params,
Y.iCross = Y.iCross,
x.iCross = x.iCross,
callParam = list(id = iPar, subset = Sub4iPar),
splineArgs = splineArgs,
priorArgs = priorArgs,
algArgs = algArgs[[iPar]],
Params_Transform = Params_Transform,
propMethod = propMethods[[iPar]])
## Update the parameters in the parameters list.
param.cur.outMat = out.iSub$param.out
## Take the last one if inner loops are used in e.g. SGLD
Params[[iPar]][Sub4iPar] <- param.cur.outMat[, ncol(param.cur.outMat)]
## Save the acceptance probability
OUT.accept.probs[[iPar]][iSub, iIter, iCross] <- out.iSub$accept.prob
## Save the updated parameters for current iteration.
OUT.Params[[iPar]][Sub4iPar, , , iIter, iCross] <- param.cur.outMat
}
}
} # for (iPar in Params4Gibbs)
## Track the iterations
if(track.MCMC)
{
if(nIter>1000)
{
interval = 0.20
}
else
{
interval = 1/nIter
}
MCMC.trajectory(iIter, nIter, iCross, OUT.accept.probs, interval = .10)
}
## print(proc.time()-a)
}
return(list(OUT.Params = OUT.Params, OUT.accept.probs = OUT.accept.probs))
} # for(iCross in 1:nCross)
nCross <- length(crossvalid.struc$training)
cl <- getDefaultCluster()
if(length(cl) != 0)
{
clusterExport(cl, "nCross")
sink.parallel(cl)
MCMCPropOut = parLapply(cl = cl, X = as.list(1:nCross), fun = MCMCPropFun)
sink.parallel(cl, file = NULL)
}
else
{
MCMCPropOut = lapply(X = as.list(1:nCross), FUN = MCMCPropFun)
}
## Collecting results
for(iCross in 1:nCross){
for (iPar in Params4Gibbs) # loop over parameters
{
OUT.Params[[iPar]][,,,, iCross] = MCMCPropOut[[iCross]][["OUT.Params"]][[iPar]][,,,, iCross]
OUT.accept.probs[[iPar]][,, iCross] = MCMCPropOut[[iCross]][["OUT.accept.probs"]][[iPar]][,, iCross]
}
}
##----------------------------------------------------------------------------------------
## Sample coefficients from Normal
##----------------------------------------------------------------------------------------
cat("Updating Coefficients >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n")
## Average inner draws but keep the output structure.
OUT.Params1Inner = lapply(OUT.Params, function(x){
x4 = apply(x, c(1, 2, 4, 5), mean)
dimx = dim(x)
dimx[3] = 1
array(x4, dimx)
})
OUT.Params <- linear_post4coef(Y = Y,
x0 = x0,
OUT.Params = OUT.Params1Inner,
crossvalid.struc = crossvalid.struc,
nCross = nCross,
nIter = nIter,
splineArgs = splineArgs,
priorArgs = priorArgs,
Params_Transform = Params_Transform)
##----------------------------------------------------------------------------------------
## Compute the predictive density and LPDS
##----------------------------------------------------------------------------------------
## Update the LPDS
cat("Updating LPDS >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n")
OUT.LPDS <- LogPredScore(Y = Y,
x = x0,
logpost.fun.name = logpost.fun.name,
crossvaid.struc = crossvaid.struc,
splineArgs = splineArgs,
priorArgs = priorArgs,
OUT.Params = OUT.Params,
Params_Transform = Params_Transform,
burn.in = burn.in,
LPDS.sampleProp = LPDS.sampleProp)
cat("LPDS:", round(OUT.LPDS$LPDS, 2), "( n.se:", round(OUT.LPDS$nseLPDS, 2), ")",
"\n\n")
Finish.Time <- proc.time()
##########################################################################################
### Post Analysis
##########################################################################################
##----------------------------------------------------------------------------------------
## Computing posterior modes
##----------------------------------------------------------------------------------------
## Drop the burn-in
num.burn.in <- floor(nIter*burn.in)
## TODO: the posterior mode should be averaged with stepsize? Willing & Teh 2011, Eq(11).
OUT.Params.mode <- lapply(OUT.Params, function(x)
apply(x[,,, (num.burn.in+1):nIter,, drop=FALSE], c(1, 2, 5), mean))
OUT.Params.sd <- lapply(OUT.Params, function(x)
apply(x[,,, (num.burn.in+1):nIter,, drop=FALSE], c(1, 2, 5), sd))
OUT.Params.ineff <- lapply(OUT.Params, function(x)
apply(x[,,,(num.burn.in+1):nIter,, drop=FALSE], c(1, 2, 5), ineff))
OUT.accept.probs.mean <- lapply(OUT.accept.probs, function(x)
apply(x[, (num.burn.in+1):nIter, ,drop = FALSE],c(1, 3), mean))
##----------------------------------------------------------------------------------------
## Collecting system information
##----------------------------------------------------------------------------------------
SYS.INFO <- list(Running.date = Running.date,
Elapsed.Time <- Finish.Time - Start.Time,
OMP_NUM_THREADS = as.numeric(Sys.getenv("OMP_NUM_THREADS")),
System.info = as.list(Sys.info()))
##########################################################################################
## Save output
##########################################################################################
## save important output to global environment. "<<-"
OUT <- list()
OUT[["Params"]] <- OUT.Params
OUT[["Params.mode"]] <- OUT.Params.mode
OUT[["Params.sd"]] <- OUT.Params.sd
OUT[["Params.ineff"]] <- OUT.Params.ineff
OUT[["accept.probs"]] <- OUT.accept.probs
OUT[["accept.probs.mean"]] <- OUT.accept.probs.mean
OUT[["LPDS"]] <- OUT.LPDS
OUT[["SYS.INFO"]] <- SYS.INFO
return(OUT)
}
|
29c44da200b171e666b8e56b417985a63ac960f1
|
c3fced9fa3881b8d07000adfb5bebe4213eaa4a4
|
/ANALYSIS/DATA/control_variables.R
|
35f2fc68c33654eb754feed78455fe58124f25d8
|
[] |
no_license
|
rafael-schuetz/Pareto
|
ea9c06cb588113bbdf6a3b5da27a2d2a22f37dc8
|
74c414268d429373b83ccfb27bf222ae25b97c32
|
refs/heads/master
| 2022-04-13T11:36:56.587595
| 2020-04-08T18:31:48
| 2020-04-08T18:31:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,203
|
r
|
control_variables.R
|
### Kontrollvariablen für die DiD-Estimations
# Die id-fixed effects kontrollieren bereits für alle zeit-invarianten Eigenschaften, die
# zwischen den Einrichtungen variieren (z.B. Bundesland, Große / Kleine Stadt). Die time-fixed
# effects kontrollieren für Eigenschaften, die über die Zeit variieren, aber für die Einrichtungen
# konstant sind (z.B. allgemeiner Zeittrend). Aus diesem Grund werden nur Kontrollvariablen
# ermittelt, die über die Zeit variieren.
### Ermitteln relevanter zeit-varianter Kontrollvariablen
# Methode: Korrelation zwischen den relevanten Variablen und möglichen Kontrollvariablen aus
# den Mittagstisch Outcomes
# Erstellen eines Teildatensatzes, der die Zielvariablen (selfworth & dayToDaySkills),
# Treatment-Dummy und alle Variablen für den Mittagstisch umfasst
corMT_controls <- subset(dfcEF, select = c('selfworth',
'dayToDaySkills',
'treatEF',
"eatersPerMealNo", "newKidsNo", "cateringNo",
"mealsInInstitutionNo", "mealsNo", "breakfastsNo",
"lunchesNo", "snacksNo", "dinnersNo", "offersPerWeekNo",
"weeksOfferedNo", "daysOfferedNo", "DGECriteriaNo", "totalCost",
"subsidyRequest", "subsidy", "migrantBackgroundShare", "refugeesShare",
"unemploymentShare", "multipleChildrenHouseholdShare", "singleParentShare",
"povertyShare", "participateMore", "tasksLunch", "monthlyCooks", "weeklyCooks",
"shoppers", "ownIdeas", "stayLonger", "easyDishes", "dietaryKnowledge",
"appreciateHealthy", "foodCulture", "influenceHome", "cookAtHome", "askRecipes",
"moreConcentrated", "moreBalanced", "lessIll", "moreIndependent",
"betterTeamwork", "betterReading", "betterNumbers", "betterGrades",
"moreRegularSchoolVisits", "moreOpen", "moreConfidence",
"addressProblems", "proud", "success", "abiturNo", "mittlereReifeNo", "hauptschuleNo",
"noDegreeNo", "trainingStartedNo", "trainingCompletedNo", "enoughFood",
"enoughStaffLunch", "enoughStaffActivities", "qualitySatisfies", "regionalProducts",
"cultureReligion", "unsweetenedDrinks"))
# Erstellen einer Korrelationsmatrix anhand des Befehls rcorr() aus dem Package 'Hmisc'
library(Hmisc)
correlation_matrix_controls <- rcorr(as.matrix(corMT_controls))
# Speichern der p-Werte aus der Korrelationsmatrix in p
# Speicher der Korrelationskoeffizienten aus der Korrelationsmatrix in R
p <- correlation_matrix_controls$P
R <- correlation_matrix_controls$r
# Definition der Notation für das Signifikanzniveau in Sterne
# Wichtig ist, dass die Lehrzeichen bei den Sternen eingehalten werden
stars_significance <- ifelse(p < .0001, "****", ifelse(p < .001, "*** ",
ifelse(p < .01, "** ", ifelse(p < .05, "* ", " "))))
# Kürzen der Korrelationsmatrix auf zwei Kommazahlen
R <- format(round(cbind(rep(-1.11, ncol(corMT_controls)), R), 2))[,-1]
# Generierung einer neuen Matrix, welche die Korellationskoeffizienten mit den dazugehörigen
# Sterne für das Signifikanzniveau enthält
Rnew <- matrix(paste(R, stars_significance, sep=""), ncol=ncol(corMT_controls))
diag(Rnew) <- paste(diag(R), " ", sep="")
rownames(Rnew) <- colnames(corMT_controls)
colnames(Rnew) <- paste(colnames(corMT_controls), "", sep="")
# Auswählen der relevanten Spalten der neuen Korrelationsmatrix
library(dplyr)
Rnew <- Rnew [, 1:3]
view(Rnew)
# Kontrollvariablen für die Regression mit "selfworth" als Zielvariable:
# Alle Variablen des Mittagstischs, die sowohl mit der Zielvariable "selfworth" als auch mit der
# Treatment-Variable "TreatEF" korreliert sind und der Korrelationskoeffizient statistisch
# signifikant ist, werden als Kontrollvariablen in Betracht gezogen:
# dayToDaySkills, tasksLunch, monthlyCooks, weeklyCooks, shoopers, easyDishes, dietaryKnowledge,
# appreciateHealthy, foodCulture, moreConcentrated, moreBalanced, moreIndependent, moreOpen,
# moreConfidence, adressProblems, proud, enoughFood
# Kontrollvariablen für die Regression mit "dayToDaySkills" als Zielvariable:
# # Alle Variablen des Mittagstischs, die sowohl mit der Zielvariable "dayToDaySkills" als auch
# mit der Treatment-Variable "TreatEF" korreliert sind und der Korrelationskoeffizient statistisch
# signifikant ist, werden als Kontrollvariablen in Betracht gezogen:
# selfworth, DGECriterialNo, subsidy, tasksLunch, monthlyCooks, weeklyCooks, shoppers, easyDishes,
# dietaryKnowledge, appreciateHealthy, foodCulture, moreConcentrated, moreBalanced,
# moreIndependent, moreOpen, moreConfidence, adressProblems, proud, enoughFood
|
b43786dd50f77eb4bcdeaf17e69f65289e79b662
|
973d9b1632b1788e569892476321b545ef4c4192
|
/man/remove_named_cols_from_df.Rd
|
b8c98d3b0f4a4bfc011bae01085effdaf1660bfc
|
[] |
no_license
|
moj-analytical-services/costmodelr
|
8c9049b876b29ac9dd557e4e8271a49a4f154dbd
|
c95222b0800c5f725c156d5cbc4e86545bc4f8bb
|
refs/heads/master
| 2021-01-20T13:10:41.236112
| 2017-05-06T09:38:48
| 2017-05-06T09:38:48
| 90,454,724
| 0
| 2
| null | 2017-05-06T10:14:26
| 2017-05-06T10:14:26
| null |
UTF-8
|
R
| false
| true
| 302
|
rd
|
remove_named_cols_from_df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{remove_named_cols_from_df}
\alias{remove_named_cols_from_df}
\title{Removed named columns from a dataframe}
\usage{
remove_named_cols_from_df(df, drops)
}
\description{
Removed named columns from a dataframe
}
|
79857a7b095dd5b1f88971ab9cbdf7e4fae5d959
|
3d557b698ecea160be63191450cac35defd10f58
|
/R/consensusBrlen.R
|
cadce68866c0d293429e36cd88537b0655c945f0
|
[] |
no_license
|
bomeara/utilitree
|
cb0601f105429ad292b27ae3cfeca29a6b348ad0
|
47c908ee5b3b1b285c255a22a29e51de5722f5f8
|
refs/heads/master
| 2016-08-08T16:23:59.109616
| 2014-07-29T20:46:39
| 2014-07-29T20:46:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,315
|
r
|
consensusBrlen.R
|
consensusBrlen <-
function(focalTree,sourceTreeList,type=c("proportion","mean_brlen","median_brlen","sd_brlen"),print.progress=TRUE,return.val="tree") {
type<-match.arg(type)
if (class(focalTree)!="phylo4") {
focalTree<-as(focalTree,"phylo4")
}
if (class(sourceTreeList[[1]])!="phylo4") {
sourceTreeList<-lapply(sourceTreeList,as,"phylo4")
}
allNodes<-nodeId(focalTree,"all")
allNodes<-allNodes[which(allNodes!=nodeId(focalTree,"root"))] #do not care about root edge
if (print.progress) {
print(c("nodeId","proportion","mean_brlen","median_brlen","sd_brlen"))
}
allResults<-sapply(allNodes,summarizeNode,focalTree,sourceTreeList,print.progress)
if (return.val=="tree") {
newEdgeLengths<-edgeLength(focalTree)
newNodeLabels<-nodeLabels(focalTree)
for (nodeIndex in 1:length(allNodes)) {
newLength<-allResults[which(row.names(allResults)==type),nodeIndex]
if (is.na(newLength)) {
newLength=0
}
newEdgeLengths[ which(names(newEdgeLengths)==getEdge(focalTree,allNodes[nodeIndex])) ]<-newLength
newNodeLabels[ which(names(newNodeLabels)==allNodes[nodeIndex]) ] <- round(allResults[which(row.names(allResults)=="proportion"),nodeIndex],2)
}
edgeLength(focalTree)<-newEdgeLengths
nodeLabels(focalTree)<-newNodeLabels
return(focalTree)
}
else {
return(allResults)
}
}
|
8961e5572d4db0d8f17d3d4ee618bbfe108abe28
|
289cc280222cc40f32686dc42c2ee68891e452ed
|
/man/hzar.meta.init.Rd
|
13ec7683baacc02f02ddff7980b5f5da2df0e91c
|
[] |
no_license
|
GrahamDB/hzar
|
6cd68626d54103f6be26c5a80c41d45ea267eb9a
|
fe52dfc553e69dd5367a8735b687992231f72e18
|
refs/heads/devel_v0.2
| 2023-05-25T16:51:49.440605
| 2019-10-23T18:39:57
| 2019-10-23T18:39:57
| 175,032,847
| 1
| 1
| null | 2023-05-16T23:59:44
| 2019-03-11T15:46:00
|
R
|
UTF-8
|
R
| false
| false
| 1,520
|
rd
|
hzar.meta.init.Rd
|
\name{hzar.meta.init}
\alias{hzar.meta.init}
\alias{hzar.meta.tune}
\alias{hzar.meta.fix}
\alias{hzar.meta.lower}
\alias{hzar.meta.upper}
\alias{hzar.meta.init<-}
\alias{hzar.meta.tune<-}
\alias{hzar.meta.fix<-}
\alias{hzar.meta.lower<-}
\alias{hzar.meta.upper<-}
\title{
Observe and Alter the model parameters in the clineMetaModel
}
\description{
This is a collection of methods to get or set attributes of the
various model parameters.
}
\usage{
hzar.meta.init(x)
hzar.meta.init(x) <- value
hzar.meta.tune(x)
hzar.meta.tune(x) <- value
hzar.meta.fix(x)
hzar.meta.fix(x) <- value
hzar.meta.lower(x)
hzar.meta.lower(x) <- value
hzar.meta.upper(x)
hzar.meta.upper(x) <- value
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
The \code{\link{clineMetaModel}} to use.
}
\item{value}{
The new value or values to set.
}
}
%% \details{
%% %% ~~ If necessary, more details than the description above ~~
%% }
\value{
Returns a list, with one numeric or boolean value per cline parameter.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
%% \references{
%% %% ~put references to the literature/web site here ~
%% }
\author{
Graham Derryberry \email{asterion@alum.mit.edu}
%% ~~who you are~~
}
%% \note{
%% %% ~~further notes~~
%% }
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{clineMetaModel}}
}
%% \examples{
%% }
|
dd416c97d78c02add5fe738773866369b0e17aab
|
1ea35aa8adc3131f178d873800c1c818343b9dec
|
/src/R/ROMOPOmics/man/readStandardTables.Rd
|
c9915d5920c6bd65f78672ad899c44c1cd5fae5a
|
[
"MIT"
] |
permissive
|
NCBI-Codeathons/OMOPOmics
|
9afa7abd4f59baa48248b73a823d5e50d0197663
|
c6f0293f99189cc682d04aef9f40e43a8878ca8b
|
refs/heads/master
| 2020-12-06T04:54:42.723704
| 2020-06-04T16:45:14
| 2020-06-04T16:45:14
| 232,348,286
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 517
|
rd
|
readStandardTables.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readStandardTables.R
\name{readStandardTables}
\alias{readStandardTables}
\title{readStandardTables.R}
\usage{
readStandardTables(input_files)
}
\arguments{
\item{standard_table_files}{List of CSV files to incorporate.}
}
\description{
Given a list of standardized table CSV files, function reads each and incorporates
into a single data frame, with each annotated with an "experiment" column
containing the basename of the origin file.
}
|
4e177570a9771b941fa589baea99302b28376e1c
|
5106c53826b243575b106fd00a3ac1539bd66cad
|
/man/plot_argo.Rd
|
ddf35fa17a5ccc1f1665826e5d27232a686f2887
|
[] |
no_license
|
cran/argo
|
ec1c58d544f3fc10c15e6d18c9d27d29c25dfb2e
|
cd77311a40ebb3f4aabbb350b567c337fd920236
|
refs/heads/master
| 2023-06-07T22:10:54.609142
| 2023-05-24T10:20:14
| 2023-05-24T10:20:14
| 236,549,033
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,193
|
rd
|
plot_argo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_argo}
\alias{plot_argo}
\title{Time series plot of ARGO applied on CDC's ILI data}
\usage{
plot_argo(GFT_xts, GC_GT_cut_date, model_names, legend_names, zoom_periods)
}
\arguments{
\item{GFT_xts}{dataframe with all predicted values}
\item{GC_GT_cut_date}{cutting date for switching datasets}
\item{model_names}{name of predicting models}
\item{legend_names}{legend for predicting models}
\item{zoom_periods}{vector of periods to zoom into}
}
\value{
a graph on the default plot window
}
\description{
This function is used to reproduce the ARGO plot.
}
\examples{
GFT_xts = xts::xts(exp(matrix(rnorm(1000), ncol=5)), order.by = Sys.Date() - (200:1))
names(GFT_xts) <- paste0("col", 1:ncol(GFT_xts))
names(GFT_xts)[1] <- "CDC.data"
zoom_periods = c()
for (i in 0:5){
zoom_periods = c(
zoom_periods,
paste0(zoo::index(GFT_xts)[i*30+1], "/", zoo::index(GFT_xts)[i*30+30])
)
}
plot_argo(
GFT_xts = GFT_xts,
GC_GT_cut_date = zoo::index(GFT_xts)[50],
model_names = colnames(GFT_xts)[-1],
legend_names = paste0(colnames(GFT_xts)[-1], "legend"),
zoom_periods = zoom_periods
)
}
|
addadb7f3ff2b00cfb77ecf1f2430b8dbf658136
|
27aff212b65e315f1f31d6a053027e253c04a8e4
|
/get_merge.R
|
489164727c8c691b280d3e993ce99fb3c2697265
|
[] |
no_license
|
aljrico/triple_a
|
f82ccefc8b8969a55bc14f270884ccf9e0e52658
|
427db264dd1ef09edeff7d0214d728e988b8ead4
|
refs/heads/master
| 2022-04-04T10:27:38.957945
| 2020-01-04T21:54:16
| 2020-01-04T21:54:16
| 177,411,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,048
|
r
|
get_merge.R
|
library(tidyverse)
library(zoo)
getMerge <- function(prices, financials){
# Get common days
k <- 1
absences <- which(!(financials$date %in% prices$date))
while(sum(!(unique(financials$date) %in% unique(prices$date))) > 0){
count <- sum(!(unique(financials$date) %in% unique(prices$date)))
financials$date[[absences[k]]] <- financials$date[[absences[k]]] + 1
if(sum(!(unique(financials$date) %in% unique(prices$date))) < count) k <- k + 1
}
stocks <- unique(prices$firm)
tidy_merged <- tibble()
merged <- left_join(prices,financials, by = c("date", "firm"))
for(s in stocks){
df <- merged %>% filter(firm == s)
tidy_merged <- df %>%
na.locf() %>%
na.omit() %>%
distinct() %>%
as_tibble() %>%
rbind(tidy_merged)
}
tidy_merged$date <- as.Date(tidy_merged$date)
tidy_merged$firm <- as.factor(tidy_merged$firm)
cols <- colnames(tidy_merged)
for(c in cols) if(is.character(tidy_merged[[c]])) tidy_merged[[c]] <- as.numeric(tidy_merged[[c]])
tidy_merged <- tidy_merged %>% distinct()
return(tidy_merged)
}
|
fa664597e3fdaa7cf476ec8252c459bb634d6ac6
|
5015d7aba85f3371535678e9f678144a5c064d5c
|
/1806_pilot_analysis/pilot-data_combine.R
|
8746a7d35ab0122010f0eb8a652ade0737cd8ef3
|
[] |
no_license
|
montero-melis/2018_replication_sheb-pulv2013
|
fa942a8b0c3b04578fcc3f022d00afd6e4ba46de
|
1ef3ab5e3e78935c8c6e539130acf14f59999aa8
|
refs/heads/master
| 2021-08-06T15:05:58.778015
| 2021-06-23T18:58:14
| 2021-06-23T18:58:14
| 134,530,954
| 0
| 2
| null | 2021-06-23T18:58:15
| 2018-05-23T07:29:40
|
Python
|
UTF-8
|
R
| false
| false
| 20,100
|
r
|
pilot-data_combine.R
|
## Process and combine individual participant data
# This script takes the pilot data after coding and combines it into single
# data files for each task, saving it to disk after some processing.
# Tasks: 1. Memory task, 2. Verb ratings, 3. Verb understanding.
library(dplyr) # for left_join
# ------------------------------------------------------------------------
# Functions
# ------------------------------------------------------------------------
## Copy-pasted and slightly modified from script "pilot-data_preprocess.R"
# gets name of data files in "mypath" that contain "expname" in their name
get_data_filenames <- function(expname = NULL, mypath = "pilot_analysis/data_coding") {
myfiles <- list.files(mypath)
# match only csv files for the right experiment
mymatch <- paste(".*", expname, ".*\\.csv", sep ="")
myfiles <- myfiles[grep(mymatch, myfiles)]
# Exclude files that start with "TOCODE"
myfiles <- myfiles[grep("^(?!TOCODE)", myfiles, perl = TRUE)]
myfiles
}
# example:
get_data_filenames("sheb_replic_pilot")
length(get_data_filenames("sheb_replic_pilot"))
## FUN that reads individual data files and combines them into single file
## after some processing
# (NB: Perhaps this could be done more economically using the lapply function,
# see https://www.youtube.com/watch?v=8MVgYu0y-E4, but I don't know if it would
# allow for the processing of individual files inside the for-loop, probably not.)
combine_files <- function(file_list = NULL, sep_default = ";",
mypath = "pilot_analysis/data_coding/") {
df <- data.frame()
for (f in file_list) {
curr_f <- paste(mypath, f, sep = "")
curr_df <- read.csv(curr_f, sep = sep_default, fileEncoding = "UTF-8")
# if there is a coder column, apply coder's initials (in 1st row) to all rows
if ("coder" %in% names(curr_df)) curr_df$coder <- curr_df$coder[1]
# append current df to all previous
df <- rbind(df, curr_df)
}
# some processing is done here but depends on the task, so use IF statements!
# Block and trial numbers should start at 1 (not zero)
if ("block" %in% names(df)) df$block <- df$block + 1
if ("trial" %in% names(df)) df$trial <- df$trial + 1
# score (at trial level) is 1 iff all four words are reproduced in correct order
if ("word4" %in% names(df)) { # this check is sufficient
m <- as.matrix(df[, c("w1", "w2", "w3", "w4")])
df$score <- as.numeric(apply(m, 1, sum) == 4)
}
df
}
# ------------------------------------------------------------------------
# Memory task
# ------------------------------------------------------------------------
## Read individual data files and combine into a single file after some processing
mem <- combine_files(get_data_filenames("sheb_replic_pilot"))
head(mem)
tail(mem)
str(mem)
length(unique(mem$participant)) # number of participants
## Process
# Participants 900 & 901 carried out 6 blocks (with additional word_duration
# settings of 250 and 350 ms)
table(mem[mem$participant %in% c(900, 901), "word_duration"])
# We remove the blocks corresponding to word_duration = 0.25 or 0.35
sum(mem$word_duration %in% c(.25, .35)) # 112 observations from 2 participants
mem <- mem[! mem$word_duration %in% c(.25, .35), ]
# We also want to adjust block number so that they lie between 1 and 4 (for
# comparability). This removes all traces of these participants being "different".
unique(mem[mem$participant %in% c(900, 901), c("participant", "block", "word_duration")])
# I solve this almost manually:
# ppt 900
mem[mem$participant == 900, 3:7] # before
mem[with(mem, participant == 900 & block %in% 2:4), "block"] <- -1 + mem[with(mem, participant == 900 & block %in% 2:4), "block"]
mem[with(mem, participant == 900 & block %in% 6), "block"] <- -2 + mem[with(mem, participant == 900 & block %in% 6), "block"]
mem[mem$participant == 900, 3:7] # after
# ppt 901
mem[mem$participant == 901, 3:7] # before
mem[with(mem, participant == 901 & block %in% 3:5), "block"] <- -1 + mem[with(mem, participant == 901 & block %in% 3:5), "block"]
mem[mem$participant == 901, 3:7] # after
# check everything looks right
unique(mem[mem$participant %in% c(900, 901), c("participant", "block", "word_duration")])
table(mem$word_duration) # Only our 4 word_durations left in the data
# Files whose "comment" column is completely empty gets this column filled with NAs
head(mem[is.na(mem$comment),])
# replace them by empty characters instead
mem[is.na(mem$comment), "comment"] <- ""
# Trials where all individual words were remembered, yet there is an error?
correct4but_error <- which(with(mem, score == 1 & error != ""))
mem[correct4but_error, ]
# All but one case involve shifts/transpositions -- we will treat them all as
# errors at the item level
mem$score[correct4but_error] <- 0
rm(correct4but_error)
# And viceversa?
mem[with(mem, score == 0 & error == ""), ] # these I corrected manually
# What comments are there?
mem$comment[mem$comment != ""]
# Some of them warrant removing the data rows, for example if ppts repeated
# words silently or if the recording is unclear; I remove these observations
# using regex that match those comments
myregex <- "(SILENTLY|UNCLEAR|NOT CLEAR|before the beep|MUMBLES)"
mymatch <- grepl(myregex, mem$comment)
sum(mymatch) # 20 matches
mem$comment[mymatch]
mem[mymatch,]
# remove those data rows
mem <- mem[!mymatch,]
rm(myregex, mymatch)
# Error coding should follow instructions in document
# team_instructions/data-coding_workflow.md (under "Types of errors")
table(mem$error)
# Replace "0" (zero) with "O"
mem$error <- gsub("0", "O", mem$error)
# create column that contains types of errors explicitly written out:
mem$error_expl <- mem$error
mem$error_expl <- gsub("O", "Omission", mem$error_expl)
mem$error_expl <- gsub("R", "Replacement", mem$error_expl)
mem$error_expl <- gsub("S", "Shift", mem$error_expl)
mem$error_expl <- gsub("A", "Addition", mem$error_expl)
mem$error_expl <- gsub(",", ", ", mem$error_expl)
# For plotting and analyses, express word_duration in ms rather than seconds
mem$word_duration <- mem$word_duration * 1000
mem$SOA <- mem$SOA * 1000
## Save a wide version (1 trial per row) and a long version (1 verb per row) to disk
## wide format
write.csv(mem, "pilot_analysis/data_pilot_memory-task_wide.csv",
row.names = FALSE, fileEncoding = "UTF-8")
## long format
# I've used the answer in this thread:
# https://stackoverflow.com/questions/23945350/reshaping-wide-to-long-with-multiple-values-columns
# But I don't get everything in the syntax, e.g. in toy example below, why do
# the arguments to varying need to be passed in that order for the mapping to
# be correct? (so the w1, w2 etc. columns are passed before word1, word2, etc.)
# Toy example for illustration
mytest <- mem[1:4, c(7,10:17)]
mytest
# this comes out right
reshape(mytest, direction = 'long',
varying = paste(c("w", "word"), rep(1:4, each = 2), sep = ""),
timevar = 'wordInTrial',
times = 1:4,
v.names = c('verb', 'correct'),
idvar = c('trial')
)
# but here the mapping of the 'verb' and 'correct' columns is wrong -- why??
reshape(mytest, direction = 'long',
varying = paste(c("word", "w"), rep(1:4, each = 2), sep = ""),
timevar = 'wordInTrial',
times = 1:4,
v.names = c('verb', 'correct'),
idvar = c('trial')
)
# Anyway, here I apply it to the whole data set:
head(mem)
mem_long <- reshape(mem[, c(3:8, 10:17)],
direction = 'long',
varying = paste(c("w", "word"), rep(1:4, each = 2), sep = ""),
timevar = 'wordInTrial',
times = 1:4,
v.names = c('verb', 'correct'),
idvar = c('participant', 'block', 'trial'))
# reorder rows in a more sensible format (follows the order of wide format)
mem_long <- mem_long[with(mem_long, order(participant, block, trial, wordInTrial)), ]
head(mem_long)
tail(mem_long)
# save to disk
write.csv(mem_long, "pilot_analysis/data_pilot_memory-task_long.csv",
row.names = FALSE, fileEncoding = "UTF-8")
# ------------------------------------------------------------------------
# Verb bias task
# ------------------------------------------------------------------------
## Read individual data files and combine into a single file after some processing
get_data_filenames("verb_rating") # individual file names
length(get_data_filenames("verb_rating")) # number of data files
bias <- combine_files(get_data_filenames("verb_rating"), sep_default = ",")
head(bias)
tail(bias)
str(bias)
length(unique(bias$participant)) # number of participants
# change the verbs to lower case (as in other data files):
bias$verb <- tolower(bias$verb)
# add a column for the category we had in mind for each verb:
verb_categ <- unique(mem_long[, c("type", "verb")])
verb_categ$verb <- as.character(verb_categ$verb)
bias <- left_join(bias, verb_categ)
# Note that, due to some oversight, there is one verb that was used in the
# norming task, but not in the memory task:
sum(! unique(bias$verb) %in% verb_categ$verb)
unique(bias$verb)[! unique(bias$verb) %in% verb_categ$verb]
# and viceversa
sum(! verb_categ$verb %in% unique(bias$verb))
verb_categ$verb[! verb_categ$verb %in% unique(bias$verb)]
# This means that:
# "trek" has not been normed
# "pluck" has no type assigned, so let's assign it to "arm"
bias[bias$verb == "pluck", "type"] <- "arm"
# This means there are 57 arm words and 55 leg words for norming data
table(unique(bias[, c("verb", "type")])$type)
# rearrange columns
bias <- bias[, c("expName", "date", "participant", "trial", "verb", "type",
"rated_category", "rating")]
head(bias)
# save to disk
write.csv(bias, "pilot_analysis/data_verb-bias.csv",
row.names = FALSE, fileEncoding = "UTF-8")
# ------------------------------------------------------------------------
# Verb comprehension task
# ------------------------------------------------------------------------
## There were two versions of this task:
# 1) a multiple choice version
get_data_filenames("multiple-choice") # individual file names
length(get_data_filenames("multiple-choice")) # 11 participants did this version
# 2) a free translation version
get_data_filenames("oral-input") # individual file names
length(get_data_filenames("oral-input")) # 6 participants did this version
## 1) Multiple choice version
multi <- combine_files(get_data_filenames("multiple-choice"), sep_default = ",")
head(multi)
tail(multi)
str(multi)
length(unique(multi$participant)) # number of participants
# change the verbs to lower case (as in other data files):
multi$verb <- tolower(multi$verb)
# As above, add a column for the category we had in mind for each verb:
multi <- left_join(multi, verb_categ)
# Note that, as we saw above, "pluck" was used in the norming task, but not
# in the memory task:
unique(multi$verb)[! unique(multi$verb) %in% verb_categ$verb]
# and viceversa for "trek"
verb_categ$verb[! verb_categ$verb %in% unique(multi$verb)]
# Assign "pluck" to arm type
multi[multi$verb == "pluck", "type"] <- "arm"
# save to disk
write.csv(multi, "pilot_analysis/data_verb-understanding_multiple-choice.csv",
row.names = FALSE, fileEncoding = "UTF-8")
# 2) Free translation version
transl <- combine_files(get_data_filenames("oral-input"), sep_default = ";")
head(transl)
tail(transl)
str(transl)
length(unique(transl$participant)) # number of participants
# change the verbs to lower case (as in other data files):
transl$verb <- tolower(transl$verb)
# As above, add a column for the category we had in mind for each verb:
transl <- left_join(transl, verb_categ)
# Note that, as we saw above, "pluck" was used in the norming task, but not
# in the memory task:
unique(transl$verb)[! unique(transl$verb) %in% verb_categ$verb]
# and viceversa for "trek"
verb_categ$verb[! verb_categ$verb %in% unique(transl$verb)]
# Assign "pluck" to arm type
transl[transl$verb == "pluck", "type"] <- "arm"
# Look at the comments and recode where necessary (done by GMM)
transl_with_comments <- transl[transl$comment != "", -c(1:2, 5)]
# correct translations of "bash"
transl[grepl("(slå|smälla|förstöra)", transl$ppt_translation) & transl$verb == "bash", ]
transl[grepl("(slå|smälla|förstöra)", transl$ppt_translation) & transl$verb == "bash", "score"] <- 1
# correct translations of "carve"
transl[grepl("(karva|skära|rista)", transl$ppt_translation) & transl$verb == "carve", ]
transl[grepl("(karva|skära|rista)", transl$ppt_translation) & transl$verb == "carve", "score"] <- 1
# correct translations of "clutch"
transl[grepl("hålla", transl$ppt_translation) & transl$verb == "clutch", ]
transl[grepl("hålla", transl$ppt_translation) & transl$verb == "clutch", "score"] <- 1
# correct translations of "crawl"
transl[grepl("krypa", transl$ppt_translation) & transl$verb == "crawl", ]
transl[grepl("krypa", transl$ppt_translation) & transl$verb == "crawl", "score"] <- 1
# correct translations of "file"
transl[grepl("(lägga|arkivera)", transl$ppt_translation) & transl$verb == "file", ]
transl[grepl("(lägga|arkivera)", transl$ppt_translation) & transl$verb == "file", "score"] <- 1
# correct translations of "flit"
transl[grepl("snabbt", transl$ppt_translation) & transl$verb == "flit", ]
transl[grepl("snabbt", transl$ppt_translation) & transl$verb == "flit", "score"] <- 1
# grab
transl[grepl("hålla fast", transl$ppt_translation) & transl$verb == "grab", ]
transl[grepl("hålla fast", transl$ppt_translation) & transl$verb == "grab", "score"] <- 1
# grip
transl[grepl("hålla i", transl$ppt_translation) & transl$verb == "grip", ]
transl[grepl("hålla i", transl$ppt_translation) & transl$verb == "grip", "score"] <- 1
# hike
transl[grepl("gå", transl$ppt_translation) & transl$verb == "hike", ]
transl[grepl("gå", transl$ppt_translation) & transl$verb == "hike", "score"] <- 1
# hop
transl[grepl("hoppa", transl$ppt_translation) & transl$verb == "hop", ]
transl[grepl("hoppa", transl$ppt_translation) & transl$verb == "hop", "score"] <- 1
# inch
transl[grepl("närma sig l", transl$ppt_translation) & transl$verb == "inch", ]
transl[grepl("närma sig l", transl$ppt_translation) & transl$verb == "inch", "score"] <- 1
# leap
transl[grepl("(hopp|ta stort steg)", transl$ppt_translation) & transl$verb == "leap", ]
transl[grepl("(hopp|ta stort steg)", transl$ppt_translation) & transl$verb == "leap", "score"] <- 1
# "bestiga" and some other alternatives for "mount" are correct
transl[grepl("(bestiga|hoppa upp|hoppa på)", transl$ppt_translation) & transl$verb == "mount", ]
transl[grepl("(bestiga|hoppa upp|hoppa på)", transl$ppt_translation) & transl$verb == "mount", "score"] <- 1
# pace
transl[grepl("gå (runt|otåligt)", transl$ppt_translation) & transl$verb == "pace", ]
transl[grepl("gå (runt|otåligt)", transl$ppt_translation) & transl$verb == "pace", "score"] <- 1
# plod
transl[grepl("gå (slarvigt|med svårigheter)", transl$ppt_translation) & transl$verb == "plod", ]
transl[grepl("gå (slarvigt|med svårigheter)", transl$ppt_translation) & transl$verb == "plod", "score"] <- 1
# roam
transl[grepl("(vandra|dra runt)", transl$ppt_translation) & transl$verb == "roam", ]
transl[grepl("(vandra|dra runt)", transl$ppt_translation) & transl$verb == "roam", "score"] <- 1
# "rulla" should count as a correct translation of "roll"
transl[grepl("rulla", transl$ppt_translation) & transl$verb == "roll", ]
transl[grepl("rulla", transl$ppt_translation) & transl$verb == "roll", "score"] <- 1
# rub
transl[grepl("(gnida|massera)", transl$ppt_translation) & transl$verb == "rub", ]
transl[grepl("(gnida|massera)", transl$ppt_translation) & transl$verb == "rub", "score"] <- 1
# skopa for scoop is correct
transl[grepl("skopa", transl$ppt_translation) & transl$verb == "scoop", ]
transl[grepl("skopa", transl$ppt_translation) & transl$verb == "scoop", "score"] <- 1
# seize
transl[grepl("(ta tag|tillfångata)", transl$ppt_translation) & transl$verb == "seize", ]
transl[grepl("(ta tag|tillfångata)", transl$ppt_translation) & transl$verb == "seize", "score"] <- 1
# "skejta" for "skate" is correct
transl[grepl("skejta", transl$ppt_translation) & transl$verb == "skate", ]
transl[grepl("skejta", transl$ppt_translation) & transl$verb == "skate", "score"] <- 1
# "skumma" for "skim" is correct
transl[grepl("skumma", transl$ppt_translation) & transl$verb == "skim", ]
transl[grepl("skumma", transl$ppt_translation) & transl$verb == "skim", "score"] <- 1
# skip
transl[grepl("(småhoppa|hoppsa)", transl$ppt_translation) & transl$verb == "skip", ]
transl[grepl("(småhoppa|hoppsa)", transl$ppt_translation) & transl$verb == "skip", "score"] <- 1
# "slog/slå" for "slap" is ok
transl[grepl("sl", transl$ppt_translation) & transl$verb == "slap", ]
transl[grepl("sl", transl$ppt_translation) & transl$verb == "slap", "score"] <- 1
# slip
transl[grepl("snubbla", transl$ppt_translation) & transl$verb == "slip", ]
transl[grepl("snubbla", transl$ppt_translation) & transl$verb == "slip", "score"] <- 1
# slither
transl[grepl("(kräla|slingra)", transl$ppt_translation) & transl$verb == "slither", ]
transl[grepl("(kräla|slingra)", transl$ppt_translation) & transl$verb == "slither", "score"] <- 1
# snatch
transl[grepl("(ta|fånga)", transl$ppt_translation) & transl$verb == "snatch", ]
transl[grepl("(ta|fånga)", transl$ppt_translation) & transl$verb == "snatch", "score"] <- 1
# sprint
transl[grepl("(springa|sprinta)", transl$ppt_translation) & transl$verb == "sprint", ]
transl[grepl("(springa|sprinta)", transl$ppt_translation) & transl$verb == "sprint", "score"] <- 1
# stagger
transl[grepl("gå klumpigt", transl$ppt_translation) & transl$verb == "stagger", ]
transl[grepl("gå klumpigt", transl$ppt_translation) & transl$verb == "stagger", "score"] <- 1
# stray
transl[grepl("gå ifrån", transl$ppt_translation) & transl$verb == "stray", ]
transl[grepl("gå ifrån", transl$ppt_translation) & transl$verb == "stray", "score"] <- 1
# stride
transl[grepl("gå (fort|snabbt|beslutsamt)", transl$ppt_translation) & transl$verb == "stride", ]
transl[grepl("gå (fort|snabbt|beslutsamt)", transl$ppt_translation) & transl$verb == "stride", "score"] <- 1
# stroll
transl[grepl("promenera", transl$ppt_translation) & transl$verb == "stroll", ]
transl[grepl("promenera", transl$ppt_translation) & transl$verb == "stroll", "score"] <- 1
# correct transl for "strut"
transl[grepl("good news", transl$ppt_translation) & transl$verb == "strut", ]
transl[grepl("good news", transl$ppt_translation) & transl$verb == "strut", "score"] <- 1
# traipse
transl[grepl("gå försiktigt", transl$ppt_translation) & transl$verb == "traipse", ]
transl[grepl("gå försiktigt", transl$ppt_translation) & transl$verb == "traipse", "score"] <- 1
# tread
transl[grepl("går? försiktigt", transl$ppt_translation) & transl$verb == "tread", ]
transl[grepl("går? försiktigt", transl$ppt_translation) & transl$verb == "tread", "score"] <- 1
# trot
transl[grepl("galoppera", transl$ppt_translation) & transl$verb == "trot", ]
transl[grepl("galoppera", transl$ppt_translation) & transl$verb == "trot", "score"] <- 1 # preserves the leg association and overall meaning
# trudge
transl[grepl("gå", transl$ppt_translation) & transl$verb == "trudge", ]
transl[grepl("gå", transl$ppt_translation) & transl$verb == "trudge", "score"] <- 1
# twine
transl[grepl("(fläta|nysta)", transl$ppt_translation) & transl$verb == "twine", ]
transl[grepl("(fläta|nysta)", transl$ppt_translation) & transl$verb == "twine", "score"] <- 1
# wobble
transl[grepl("(stappla|pendla)", transl$ppt_translation) & transl$verb == "wobble", ]
transl[grepl("(stappla|pendla)", transl$ppt_translation) & transl$verb == "wobble", "score"] <- 1
# wrap
transl[grepl("", transl$ppt_translation) & transl$verb == "wrap", ]
transl[grepl("omge", transl$ppt_translation) & transl$verb == "wobble", "score"] <- 1
# save to disk
write.csv(transl, "pilot_analysis/data_verb-understanding_free-translation.csv",
row.names = FALSE, fileEncoding = "UTF-8")
|
cb08593c264eee65aa9f8eefb1b0d527d87ed02d
|
4db2b5ae0fa68ad90d26673359182c0a83a680ff
|
/man/check_s3_uri.Rd
|
9445067b6821164bf664542414986b7634a828a6
|
[] |
no_license
|
cran/botor
|
2ee5d62d9accef6419afa96806583d3783367aa8
|
6781c087746f7519c4fa0a3018e793f933353d3b
|
refs/heads/master
| 2023-03-16T03:13:26.698530
| 2023-03-12T14:00:02
| 2023-03-12T14:00:02
| 216,648,618
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 571
|
rd
|
check_s3_uri.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkmate.R
\name{check_s3_uri}
\alias{check_s3_uri}
\alias{assert_s3_uri}
\alias{test_s3_uri}
\alias{expect_s3_uri}
\title{Check if an argument looks like an S3 bucket}
\usage{
check_s3_uri(x)
}
\arguments{
\item{x}{string, URI of an S3 object, should start with \code{s3://}, then bucket name and object key}
}
\description{
Check if an argument looks like an S3 bucket
}
\examples{
check_s3_uri('s3://foo/bar')
check_s3_uri('https://foo/bar')
\dontrun{
assert_s3_uri('https://foo/bar')
}
}
|
3eaaa991080dc2484373f44ec6ace411df00bb39
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Letombe/Abduction/aim-200-3_4-yes1-4-00/aim-200-3_4-yes1-4-00.R
|
17f5adf85f1e3ced8f47fdb6aba8636f74342bd5
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72
|
r
|
aim-200-3_4-yes1-4-00.R
|
95301355a18659f4a5af0c8219b71798 aim-200-3_4-yes1-4-00.qdimacs 1478 3316
|
1c12dcc8dc15a8cb1055624b94d16656022e4417
|
9bc58af42b1a2ac00b8b73283d0276930b46b0af
|
/man/QuartetStatus.Rd
|
734d095fbc74643af7571e2b777ddfcf4809f077
|
[] |
no_license
|
ms609/Quartet
|
039147600b4db97b6c59ee137ce0b9c6922553d6
|
7ce55d5c0867c6c67a3f8eabb93627a2917fb8c8
|
refs/heads/master
| 2023-05-23T23:26:07.326694
| 2023-04-20T18:24:46
| 2023-04-20T18:24:46
| 80,424,189
| 7
| 4
| null | 2022-08-30T13:10:05
| 2017-01-30T13:34:42
|
C++
|
UTF-8
|
R
| false
| true
| 6,258
|
rd
|
QuartetStatus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QuartetDistance.R, R/tqDist.r
\name{SharedQuartetStatus}
\alias{SharedQuartetStatus}
\alias{QuartetStatus}
\alias{ManyToManyQuartetAgreement}
\alias{TwoListQuartetAgreement}
\alias{SingleTreeQuartetAgreement}
\title{Status of quartets}
\usage{
SharedQuartetStatus(trees, cf = trees[[1]])
QuartetStatus(trees, cf = trees[[1]], nTip = NULL)
ManyToManyQuartetAgreement(trees, nTip = NULL)
TwoListQuartetAgreement(trees1, trees2)
SingleTreeQuartetAgreement(trees, comparison)
}
\arguments{
\item{trees}{A list of trees of class \code{\link[ape:read.tree]{phylo}},
with identically labelled tips.}
\item{cf}{Comparison tree of class \code{\link[ape:read.tree]{phylo}}. If unspecified,
each tree is compared to the first tree in \code{trees}.}
\item{nTip}{Integer specifying number of tips that could have occurred
in \code{trees}. Useful if comparing trees from different data sources that
contain non-overlapping tips.
If \code{NULL}, the default, then trees are assumed to contain the same tips.
If \code{TRUE}, then a vector is generated automatically by counting all unique
tip labels found in \code{trees} or \code{cf}.}
\item{trees1, trees2}{List or \code{multiPhylo} objects containing
trees of class \code{phylo}.}
\item{comparison}{A tree of class \code{\link[ape:read.tree]{phylo}} against
which to compare \code{trees}.}
}
\value{
\code{QuartetStatus()} returns a two dimensional array. Rows correspond to the input trees; the first row will report a perfect match if the first tree is specified as the comparison tree (or if \code{cf} is not specified). Columns list the status of each quartet:
\describe{
\item{N}{The total number of quartet \emph{statements} for two trees of \emph{n} leaves,
i.e. 2 \emph{Q}.}
\item{Q}{The total number of quartets for \emph{n} leaves.}
\item{s}{The number of quartets that are resolved identically in both trees.}
\item{d}{The number of quartets that are resolved differently in each tree.}
\item{r1}{The number of quartets that are resolved in tree 1, but not in tree 2.}
\item{r2}{The number of quartets that are resolved in tree 2, but not in tree 1.}
\item{u}{The number of quartets that are unresolved in both trees.}
}
\code{ManyToManyQuartetAgreement()} returns a three-dimensional array
listing, for each pair of trees in turn, the number of quartets in each
category.
\code{TwoListQuartetAgreement()} returns a three-dimensional array listing,
for each pair of trees in turn, the number of quartets in each category.
\code{SingleTreeQuartetAgreement()} returns a two-dimensional array listing,
for tree in \code{trees}, the total number of quartets and the
number of quartets in each category.
The \code{comparison} tree is treated as \code{tree2}.
}
\description{
Determines the number of quartets that are consistent within pairs of trees.
}
\details{
Given a list of trees, returns the number of quartet statements present in the
reference tree (the first entry in \code{trees}, if \code{cf} is not specified)
that are also present in each other tree. A random pair of fully resolved
trees is expected to share \code{choose(n_tip, 4) / 3} quartets.
If trees do not bear the same number of tips, \code{SharedQuartetStatus()} will
consider only the quartets that include taxa common to both trees.
From this information it is possible to calculate how many of all possible
quartets occur in one tree or the other, though there is not yet a function
calculating this; \href{https://github.com/ms609/Quartet/issues/new}{let us know}
if you would appreciate this functionality.
The status of each quartet is calculated using the algorithms of
Brodal \emph{et al}. (2013) and Holt \emph{et al}. (2014), implemented in the
tqdist C library (Sand \emph{et al}. 2014).
}
\section{Functions}{
\itemize{
\item \code{SharedQuartetStatus()}: Reports split statistics obtained after removing all
tips that do not occur in both trees being compared.
\item \code{ManyToManyQuartetAgreement()}: Agreement of each quartet, comparing each pair of
trees in a list.
\item \code{TwoListQuartetAgreement()}: Agreement of each quartet in trees in one list with
each quartet in trees in a second list.
\item \code{SingleTreeQuartetAgreement()}: Agreement of each quartet in trees in a list with
the quartets in a comparison tree.
}}
\examples{
data("sq_trees")
# Calculate the status of each quartet relative to the first entry in
# sq_trees
sq_status <- QuartetStatus(sq_trees)
# Calculate the status of each quartet relative to a given tree
two_moved <- sq_trees[5:7]
sq_status <- QuartetStatus(two_moved, sq_trees$ref_tree)
# Calculate Estabrook et al's similarity measures:
SimilarityMetrics(sq_status)
# Compare trees that include a subset of the taxa 1..10
library("TreeTools", quietly = TRUE, warn.conflict = FALSE)
QuartetStatus(BalancedTree(1:5), BalancedTree(3:8), nTip = 10)
# If all taxa studied occur in `trees` or `cf`, set `nTip = TRUE`
QuartetStatus(BalancedTree(1:5), BalancedTree(3:10), nTip = TRUE)
# Calculate Quartet Divergence between each tree and each other tree in a
# list
QuartetDivergence(ManyToManyQuartetAgreement(two_moved))
# Calculate Quartet Divergence between each tree in one list and each
# tree in another
QuartetDivergence(TwoListQuartetAgreement(sq_trees[1:3], sq_trees[10:13]))
}
\references{
\itemize{
\item \insertRef{Brodal2013}{Quartet}
\item \insertRef{Estabrook1985}{Quartet}
\item \insertRef{Holt2014}{Quartet}
\item \insertRef{Sand2014}{Quartet}
}
}
\seealso{
\itemize{
\item Use splits (groups/clades defined by nodes or edges of the tree) instead
of quartets as the unit of comparison: \code{\link[=SplitStatus]{SplitStatus()}}.
\item Generate distance metrics from quartet statuses: \code{\link[=SimilarityMetrics]{SimilarityMetrics()}}.
}
Other element-by-element comparisons:
\code{\link{CompareQuartetsMulti}()},
\code{\link{CompareQuartets}()},
\code{\link{CompareSplits}()},
\code{\link{PairSharedQuartetStatus}()},
\code{\link{QuartetState}()},
\code{\link{SplitStatus}()}
}
\author{
\href{https://orcid.org/0000-0001-5660-1727}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
\concept{element-by-element comparisons}
|
02bcc06a219068104ab8bb120583700f13b522a9
|
80b8f237d1faed2e1786aada66bfdd08bf2fec68
|
/R/plot_ckmeans.R
|
34ae4d3d85aedd72d5340bf27777d73633f9bf45
|
[
"MIT"
] |
permissive
|
TankredO/ckmeans
|
17bb71210f4d28adb82cf5dbeb5401c658d42bd6
|
f97d378e1a6ed328f263507dc0e5d83cae664478
|
refs/heads/master
| 2020-06-18T17:51:49.817535
| 2020-04-02T10:14:09
| 2020-04-02T10:14:09
| 196,389,548
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,383
|
r
|
plot_ckmeans.R
|
#' Generic plotting function for distances (distance matrices)
#' @title Plot distance matrix
#' @author Tankred Ott
#' @param x object to plot
#' @param ... further arguments passed to the class specific plotDist functions
#' @import graphics
#' @import grDevices
#' @export
plotDist <- function(x, ...) UseMethod('plotDist', x)
#' Function to plot a distance matrix
#' @title Plot distance matrix with clusters
#' @description Plots a distance matrix with color coded clusters
#' @author Tankred Ott
#' @param x n*n (distance) matrix
#' @param cl vector determining cluster membership
#' @param value_range vector with two elements c(d_min, d_max) determining the possible value range within the matrix. By default this will be the range of values in x.
#' @param ord vectors of indices for ordering of the matrix or boolean determining whether the matrix should be ordered
#' @param col vector of colors for the distance matrix
#' @param col_cl vector of colors or color ramp function for the clusters
#' @param plot_colorbar logical determining whether a color bar should be plotted
#' @param ... further arguments
#' @export
plotDist.matrix <- function(x, cl=NULL, value_range=NULL, ord=TRUE, col=NULL, col_cl=NULL, plot_colorbar=FALSE, ...) {
old_par <- par(no.readonly = TRUE)
on.exit(par(old_par))
# unpack ...
dot <- list(...)
# prepare par
mar <- if(is.null(dot$mar)) c(0,3,3,0) else dot$mar
oma <- if(is.null(dot$oma)) c(1,1,1,1) else dot$oma
par(mar=mar, oma=oma)
if(plot_colorbar) layout(
matrix(c(1,2), ncol = 2),
widths = c(6,1),
)
# prepare x
n <- nrow(x)
# if x has no row names set row indices as row names
if (is.null(row.names(x))) row.names(x) <- 1:n
# order
is_sim <- if(is.null(dot$is_similarity)) FALSE else dot$is_similarity
if (length(ord) > 1) {
x <- x[ord, ord]
} else if (ord) {
ord <- seriation::seriate(as.dist(if(is_sim) max(x)-x else x), method = 'GW')[[1]]$order
x <- x[ord, ord]
} else ord <- 1:n
if (!is.null(cl)) cl <- cl[ord]
names(ord) <- row.names(x)
# color Ramp for distance matrix
col <- if(is.null(col)) {if(is_sim) c('white', '#24526E') else c('#24526E', 'white')} else col
if (length(col) < 2) stop('Passed a single color as col argument but at least two colors are required!')
cRamp <- colorRamp(col)
# create empty plot
plot(NULL, xlim = c(0, n), ylim = c(0, n), frame.plot = FALSE, axes = FALSE, xlab = '', ylab = '')
# figure
# if value range is NULL calculate it from the input matrix
if (is.null(value_range)) value_range <- c(min(x), max(x))
x_scaled <- if (value_range[1] == value_range[2]) x
else (x - value_range[1]) / (value_range[2] - value_range[1])
dist_raster <- as.raster(
matrix(
# apply(cRamp(x_scaled), 1, function(col) rgb(col[1], col[2], col[3], maxColorValue = 255)),
rgb(cRamp(x_scaled), maxColorValue = 255),
ncol = n, nrow = n
)
)
rasterImage(dist_raster, 0, 0, n, n, interpolate = FALSE)
# plot cluster membership
if(!is.null(cl)) {
col_cl <- if(is.null(col_cl)) pals::kelly(22)[5:22] else col_cl
if (is.function(col_cl)) {
col_cl <- col_cl(length(unique(cl)))
}
if (length(unique(col_cl)) < length(unique(cl))) stop('Received less distinct colors than unique values in cl.')
names(col_cl) <- unique(cl)
cl_raster <- as.raster(col_cl[as.character(cl)])
rasterImage(cl_raster, -1.5, 0, -0.5, n, interpolate = FALSE)
rasterImage(t(cl_raster), 0, n+0.5, n, n+1.5, interpolate = FALSE)
}
# plot lines to create "boxes" instead of a simple raster image
abline(v = 1:(n-1), col='white', lwd=1)
abline(h = 1:(n-1), col='white', lwd=1)
# axis
at <- 1:n - 0.5
labels <- rownames(x)
cex.axis <- if(is.null(dot$cex.axis)) 0.5 else dot$cex.axis
axis(2, outer = F, at = rev(at), labels = labels, cex.axis=cex.axis, pos=(ifelse(is.null(cl), 0, -1.5)), las=1, lwd=0, lwd.ticks = 1)
axis(3, outer = F, at = at, labels = labels, cex.axis=cex.axis, pos=ifelse(is.null(cl), n, n + 1.5), las=2, lwd=0, lwd.ticks = 1)
# cl lines
## TODO: get start and end indices of ranges of the same value withing cl; plot v and h lines
# plot color bar
if(plot_colorbar) {
plot(NULL, xlim = c(0, 1.0), ylim = value_range, frame.plot = FALSE, axes = FALSE, xlab = '', ylab = '')
par(mar=c(0,2,3,2))
cbar_raster <- as.raster(rev(rgb(t(sapply(seq(0, 1, length.out = 50), cRamp)), maxColorValue = 255)))
rasterImage(cbar_raster, 0, value_range[1], 1, value_range[2], interpolate = TRUE)
cb_lwd <- if(is.null(dot$cb_lwd)) 1 else dot$cb_lwd
cb_cex <- if(is.null(dot$cb_cex)) 1 else dot$cb_cex
at <- axisTicks(value_range, FALSE)
axis(
4, lwd = 0, lwd.ticks = cb_lwd,
at = at, cex.axis=cb_cex
)
sapply(at, function(y) lines(c(0, 1.0), c(y,y), lwd=cb_lwd))
}
return(ord)
}
#' Distance (and consensus cluster) plot for cKmeans
#' @title Plot ckmeans object as distance matrix
#' @description Plots the consensus matrix as distance matrix
#' @param x cKmeans object
#' @param col vector of colors (optional)
#' @param ord vectors of indices for ordering the matrix (optional).
#' @param col_cl vector of colors for the clusters (optional)
#' @param plot_colorbar logical determining whether a color bar should be plotted
#' @param ... further arguments passed to the class specific plotDist functions
#' @export
plotDist.ckmeans <- function(x, col = NULL, ord = TRUE, col_cl = NULL, plot_colorbar=TRUE, ...) {
# standard plotDist, get order
.ord <- plotDist(
x = 1-x$pcc, cl = x$cc,
value_range=c(0,1), ord = ord,
col = col, col_cl = col_cl,
plot_colorbar=plot_colorbar,
is_similarity = FALSE,
...
)
return(.ord)
}
#' Consensus plot for cKmeans
#' @title Plot ckmeans object as distance matrix
#' @description Plots the consensus matrix as distance matrix
#' @param x cKmeans object
#' @param col vector of colors (optional)
#' @param ord vectors of indices for ordering the matrix (optional).
#' @param col_cl vector of colors for the clusters (optional)
#' @param plot_colorbar logical determining whether a color bar should be plotted
#' @param ... further arguments passed to the class specific plotDist functions
#' @export
plot.ckmeans <- function(x, col = NULL, ord = TRUE, col_cl = NULL, plot_colorbar=TRUE, ...) {
# standard plotDist, get order
.ord <- plotDist(
x = x$pcc, cl = x$cc,
value_range=c(0,1), ord = ord,
col = col, col_cl = col_cl,
plot_colorbar=plot_colorbar,
is_similarity = TRUE,
...
)
return(.ord)
}
.cols <- c("#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059",
"#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87",
"#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80",
"#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100",
"#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F",
"#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09",
"#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66",
"#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C",
"#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81",
"#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00",
"#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700",
"#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329",
"#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C",
"#83AB58", "#001C1E", "#D1F7CE", "#004B28", "#C8D0F6", "#A3A489", "#806C66", "#222800",
"#BF5650", "#E83000", "#66796D", "#DA007C", "#FF1A59", "#8ADBB4", "#1E0200", "#5B4E51",
"#C895C5", "#320033", "#FF6832", "#66E1D3", "#CFCDAC", "#D0AC94", "#7ED379", "#012C58")
|
6385278e2f98ced2e9fdc68738e32d0b48ddea9c
|
b76879ca270a8d94a42ee2cf821ae24508d5a510
|
/man/get.cumulative.Rd
|
0b1d9e68a9478fd7e15f0559cab2da8da5b8cd32
|
[] |
no_license
|
cran/DCL
|
ee11184e6a8b74515ea856e3bd27acda5488d861
|
bd9aa3502f861a5c0ff0266730a438c7d0d7116a
|
refs/heads/master
| 2022-05-17T09:38:59.010683
| 2022-05-05T15:40:02
| 2022-05-05T15:40:02
| 17,678,627
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 583
|
rd
|
get.cumulative.Rd
|
\name{get.cumulative}
\alias{get.cumulative}
\title{Cumulative triangle
}
\description{
Switch from an incremental to a cumulative triangle
}
\usage{
get.cumulative( triangle )
}
\arguments{
\item{triangle}{
An incremental run-off triangle
}
}
\value{
The cumulative triangle
}
\author{
M.D. Martinez-Miranda, J.P. Nielsen and R. Verrall
}
\note{
The methods in this the DCL package works normally on incremental triangles
}
\seealso{
\code{\link{get.incremental}}
}
\examples{
data(NtriangleDCL)
get.cumulative(NtriangleDCL)
}
\keyword{manip}
|
0ed42b35c72fe6e2e469692a08e17ba72894363c
|
83ae358d90cb1c54c8be380bc7bd628a2f6ed530
|
/man/alka.Rd
|
6ee077c4a3d4c1d7a2ad7851067e2b0c593cba86
|
[] |
no_license
|
cran/Rlab
|
c7963e1210e2140fc6d397ff6a2cf289f0dd3bd2
|
c72e630626f6df15cf75ffd8b9ee7c85322aeda8
|
refs/heads/master
| 2022-05-28T16:35:40.306539
| 2022-05-04T22:10:02
| 2022-05-04T22:10:02
| 17,693,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 545
|
rd
|
alka.Rd
|
\name{alka}
\alias{alka}
\title{Alka-Seltzer dissoloving times}
\description{
This data set contains the times in seconds that it takes Alka-Seltzer tablets to dissolve in water and 7UP at two different temperatures.
}
\format{
A data frame with 8 observations on the following 4 variables.
\describe{
\item{liquid}{: liquid (7UP or water)}
\item{temp}{: temperature (cool or warm)}
\item{time}{: time to dissolve (in seconds)}
\item{block}{: bloaking level for 2x2 factorial design}
}
}
\keyword{datasets}
|
8739aa896e562a05ee2d985499d41a5203f6314b
|
2280a98f3399445859a9b1ee7a1ff2d16046e445
|
/MicroMSRedX.R
|
b397bc04c7b81d10257bcb3323b9db7e03f5b852
|
[] |
no_license
|
SidBaj/Data-Science-R
|
b38b110972246898599339aa040e3a4a9327f384
|
c15aa1603ba2078b2e97027494e11c9d9dfb67c2
|
refs/heads/main
| 2023-02-15T02:24:59.894751
| 2021-01-06T13:54:27
| 2021-01-06T13:54:27
| 327,205,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72,684
|
r
|
MicroMSRedX.R
|
#EDx Micromasters Data Science
install.packages("gapminder")
install.packages("dslabs")
install.packages(tidyverse)
install.packages("tidytext")
install.packages("textdata")
install.packages('caTools')
install.packages('matrixStats')
install.packages("Rborist")
library(gapminder)
library(tidyverse)
library(ggplot2)
library(dslabs)
library(ggrepel)
library(ggthemes)
library(gtools)
library(dplyr)
library(rvest)
library(broom)
library(dslabs)
library(textdata)
library(lubridate)
library(tidytext)
library(Lahman)
library(HistData)
library(caret)
library(e1071)
library(maps)
require(caTools)
library(matrixStats)
library(Rborist)
library(randomForest)
#Course 2:Data Visulization with ggplot2
# Week1
#Basic Statistical Analysis to Understand the distribution
library(dslabs)
prop.table(table(heights$sex)) #Proportion
unique(heights$height) #Gives the unique values
table(unique(heights$height)) #Frequency table of unique values
length(heights$height) #Length
table(heights$height) #Simple Frequency table
mh <-filter(heights , sex=='Male') #Selecting heights of Males only
#Scaling the distribution of Male Heights
m <- scale(mh$height)
#Calculating proportion of values within 2 std of mean
v <- mean(abs(m)<2)
#Normal Distribution
n <- dnorm(mh$height,mean(mh$height),sd(mh$height))
plot(mh$height,n)
#Cummalative Distribution
c <- pnorm(mh$height,mean(mh$height),sd(mh$height))
plot(mh$height,c)
#Plotting both cdf and pdf on a graph
df <- data.frame(n,c,mh$height)
ggplot(data=df)+
geom_line(mapping=aes(x=mh$height,y=n))+
geom_line(mapping=aes(x=mh$height,y=c))
#Eg-To find % of people with height > 75in
1 -pnorm(75,mean(mh$height),sd(mh$height))
#Quantiles and the qnorm function
qu <- quantile(mh$height,0.5) #To find the median/mean value (observd)
qu1 <- qnorm(0.5,mean(mh$height),sd(mh$height)) #Predicted by CDF
#Getting the theoretical values of Quartiles
p <- c(0.25,0.5,0.75)
per <- quantile(mh$height,p)
per1 <- qnorm(p,mean(mh$height),sd(mh$height))
plot(per,per1)
abline(0,1)
#Comparing theoretical and practical values of percentiles
pr <- quantile(mh$height,seq(0.01,0.99,0.05)) #Observed
pr1 <- qnorm(seq(0.01,0.99,0.05),mean(mh$height),sd(mh$height))
plot(pr,pr1)
abline(0,1)
#Week 2
#Using ggplot2 on murders dataset: 1st Method
data(murders)
ggplot(data=murders)+
geom_point(aes(x=population,y=(total/population)*(10^7),color=region),size=1.5)+
geom_text(aes(x=population,y=(total/population)*(10^7),label=abb),nudge_x = 0.075,nudge_y = 0.075)+
geom_line(aes(x=population,y=mean(total/population)*(10^7)),lty=2)+
#scale_discrete_manual(aes(x=population,y=(total/population)*(10^7),guide=guide_legend(title = "Region of the US"),values=c("Orange","Blue","Green","Black")))+
scale_x_log10()+
scale_y_log10()+
xlab("Logarithm of Population")+
ylab("Logarithm of No of people died per 1Cr")+
ggtitle("US Murders Dataset")
#2nd Method:
rate = intercept=log10((sum(murders$total)/sum(murders$population))*10^6)
p1 <- murders %>% ggplot(aes(x=population/10^6,y=total,label=abb,col=region))
p2 <- p1 +geom_point() + geom_hline(yintercept = mean(murders$total))
p3 <- p2 + geom_text_repel(color="Black")
p4 <- p3 + scale_x_log10() + scale_y_log10() + geom_abline(intercept = rate,lty=2)
p5 <- p4 + scale_color_discrete(name="Regions of US")
p6 <- p5 +xlab("Population") +ylab("Total Murders in the State")+ggtitle("US Gun Murders By State")
#Loading ggthemes package
install.packages("ggthemes")
library(ggthemes) #To change the theme
p7 <- p6 +theme_economist()
install.packages("ggrepel")
#Labels do not overlap
#Examples of the heights dataset Hist in ggplot
ht <-heights%>%
filter(sex=="Female")%>%
ggplot(aes(x=height))
ht5<- ht +geom_histogram(fill="Black",col="White")
#Density Plot
ht1 <-heights%>%
filter(sex=="Female")%>%
ggplot(aes(x=height))
ht4 <- ht1 +geom_density()
#QQ plot
fm <-filter(heights,sex=="Female")
prm <- c(mean(fm$height),sd(fm$height))
ht2 <- heights%>%
filter(sex=="Female")%>%
ggplot(aes(sample=height))
ht3 <- ht2 +geom_qq(dparams = prm) +geom_abline()
#How to add plots next to each other
install.packages("gridExtra")
library(gridExtra)
grid.arrange(ht3,ht4,ht5,ncol=3)
#Week3
#Intro to dplyr
#summarize()
su <-heights%>%
filter(sex=="Female")%>%
summarize(m=mean(height),s=sd(height),mi=min(height),mx=max(height))
#Using dot operator
su %>% .$m
#Using the group_by()
murders %>%
group_by(region)%>%
summarise(meanmurderrate=(sum(total)/sum(population))*10^6,meanpop=mean(population))
iris %>%
group_by(Species)%>%
summarise(Mean_Length=mean(Sepal.Length),Mean_Width=mean(Sepal.Width),Petal_Length=mean(Petal.Length),Petal_Width=mean(Petal.Width))%>%
.$Mean_Length
#Using the arrange function()
mu <- murders
mu['DeathsperMillion'] <- (mu$total/mu$population)*(10^6)
mu %>% arrange(desc(DeathsperMillion))%>%head(10)
mtcars %>%
arrange(desc(mpg,cyl)) %>% head()
iris %>%
group_by(Species)%>%
arrange(desc(Petal.Length)) %>% top_n(25)
murders %>%
group_by(region)%>%
arrange(region,desc(population))
summarise(meanpop=mean(population))
#Week4: Gapminder
#arrange(),facet_wrap(),geom_text(),reorder(),logit(),limit(),breaks()
gapminder %>%
filter(year==2016)%>%
select(life_expectancy,country)%>%
arrange(desc(life_expectancy))
filter(gapminder,year %in% c(1962,2010))%>%
ggplot(aes(y=life_expectancy,x=fertility,col=continent))+
facet_wrap(.~year) +geom_point() #Using the facet function for multiple plots
pl2 <- filter(gapminder,year==2010)%>%
ggplot(aes(x=life_expectancy,y=fertility,col=continent))+
geom_point()
#Using the facet_wrap()
iris %>%
ggplot(aes(x=Sepal.Length,y=Petal.Length,col=Species))+
facet_wrap(.~Species) +geom_point()
mtcars %>%
ggplot(aes(x=disp,y=mpg,col=cyl))+
facet_wrap(.~cyl) + geom_point()+
ggtitle("Miles per Gallon vs. Displacement")
#Fertility rate of India over the years
#Comparing gdp of different countries over time
gapminder %>%
filter(country %in%c("India","China"))%>%
ggplot(aes(x=year,y=population/10000000,col=country)) +geom_line()
#With text labels
countries = c("South Korea","Germany")
labels <- data.frame(country = countries, x = c(1975, 1965), y = c(60, 72))
gapminder %>% filter(country %in% countries) %>%
ggplot(aes(year, life_expectancy, col = country)) +
geom_line() +
geom_text(data = labels, aes(x, y, label = country), size = 5) +
theme(legend.position = "none")
#Per Capita income of countries in $per day: Histogram
gapminder%>%
ggplot(aes(x=(gdp/population)/365))+
geom_histogram(binwidth = 1,fill="Grey",col="black")+
scale_x_continuous(trans = "log2")+
ggtitle("Per Capital GDP")
#Boxplot of per capita daily income stratified by region
p <- gapminder%>%
filter(year==2010)%>%
mutate(dpd = gdp/population/365,region=reorder(region,dpd,FUN=median))
p %>%ggplot(aes(y=dpd,region,fill=continent))+
theme(axis.text.x = element_text(angle=90,hjust=1))+
geom_boxplot()+geom_point()
#Side by side comparison of west and developing world
west <- c("Western Europe", "Northern Europe", "Southern Europe", "Northern America", "Australia and New Zealand")
#Income distribution of West vs. Developing world
c1 <- gapminder %>% filter(year==1970 & !is.na(gdp)) %>% .$country
c2 <- gapminder%>% filter(year==2010 & !is.na(gdp)) %>% .$country
c3 <- intersect(c1,c2)
gapminder %>%
filter(year %in% c(1970,2010),country %in% c3)%>%
mutate(dollar_per_day = (gdp/population/365),regn = ifelse(region %in% west,"West","Rest"))%>%
ggplot(aes(y=dollar_per_day,region,fill=factor(year)))+
geom_boxplot()+
scale_y_continuous(trans="log2")+
theme(axis.text.x = element_text(angle=90,hjust=1))
gm <- gapminder %>%
filter(year %in% c(1970,2010) &country %in% c3)%>%
mutate(dollar_per_day = (gdp/population/365),regn = ifelse(region %in% west,"West","Developing"))%>%
ggplot(aes(y=dollar_per_day,region,fill=factor(year)))+
geom_boxplot()+
scale_y_continuous(trans="log2")+
theme(axis.text.x = element_text(angle=90,hjust=1))
afr <- c("Eastern Africa","Northern Africa")
gap <- gapminder %>%
mutate(group = case_when(
.$region %in% west ~ "The West",
.$region %in% afr ~"North Africa",
.$region %in% c("Eastern Asia","South-Eastern Asia") ~ "Asia",
.$region =="Southern Asia" ~"Southern Asia",
.$region %in% c("Central America","South America","Caribbean") ~"Latin America",
.$continent == "Africa" &.$region !="Northern Africa" ~"Subsaharan Africa",
.$region %in% c("Melanesia", "Micronesia", "Polynesia") ~ "Pacific Islands"))
gap <- gap %>%
filter(year == 2010 & !is.na(gdp) & !is.na(infant_mortality) & !is.na(group)) %>%
group_by(country,group) %>%
summarize(income = sum(gdp)/sum(population)/365,
infant_survival_rate = 1 - sum(infant_mortality/1000*population)/sum(population))
gap %>% arrange(income)
gap %>% ggplot(aes(income, infant_survival_rate,label=country ,color = group)) +
scale_x_continuous(trans = "log2", limit = c(0.25, 150)) +
scale_y_continuous(trans = "logit", limit = c(0.875, .9981),
breaks = c(.85, .90, .95, .99, .995, .998)) +
geom_label(size = 3, show.legend = FALSE) +geom_point()
gap1 <- gapminder %>%
filter(year==1970)%>%
select(country,population,gdp,continent)%>%
mutate(pop=log2(population))%>%
arrange(desc(gdp))%>%
top_n(20)
gap1%>%
filter(!is.na(gdp))%>%
mutate(country = reorder(country,gdp,FUN = mean))%>%
ggplot(aes(gdp,country,fill=continent)) +geom_col()
gapminder %>%
mutate(country=reorder(country,population,FUN=median))%>%
ggplot(aes(population,country)) +geom_col()
heights %>% ggplot(aes(sex, height)) + geom_jitter(width = 0.2,alpha=0.4)
#Slope Chart
gapminder %>%
filter(year %in% c(1970,2010),!is.na(life_expectancy),country %in% c("India","China","Pakistan","United States","France","Finland"))%>%
ggplot(aes(y=life_expectancy,x=year,col=country))+geom_point()+geom_line()
t1 <-titanic_train %>%
filter(!Fare==0 &Survived==0)%>%
group_by(Survived)%>%
ggplot(aes(x=Fare))+geom_boxplot()+scale_x_continuous(trans="log2")
t2 <-titanic_train %>%
filter(!Fare==0 &Survived==1)%>%
group_by(Survived)%>%
ggplot(aes(x=Fare))+geom_boxplot()+scale_x_continuous(trans="log2")
titanic_train%>%
filter(Fare > 0)%>%
ggplot(aes(Survived, Pclass)) +
geom_boxplot() +
scale_y_continuous(trans = "log10") +
geom_jitter(alpha = 0.2,width = 0.2)
t <- titanic_train %>%
filter(Pclass==3 & Survived==0)
titanic%>%
ggplot(aes(Age,y=..count..,fill=Survived))+geom_density(alpha=0.2)+facet_grid(Sex~Pclass)
stars%>%
ggplot(aes(x=temp,y=magnitude,color=type))+geom_point()
temp_carbon %>%
filter(!is.na(temp_anomaly)) %>%
filter(year %in% c(1880,2018))%>%select(temp_anomaly)
p <- temp_carbon %>%
filter(!is.na(temp_anomaly))%>%
ggplot(aes(x=year,y=temp_anomaly))+geom_line()
temp_carbon %>%
filter(!is.na(ocean_anomaly),!is.na(land_anomaly),!is.na(temp_anomaly))%>%
ggplot(aes(x=year,y=temp_anomaly))+geom_line(col='Black')+
geom_line(aes(x=year,y=ocean_anomaly),col='Blue')+geom_line(aes(x=year,y=land_anomaly),col="Red")
greenhouse_gases %>%
ggplot(aes(x=year,y=concentration)) +
geom_line() +
facet_grid(gas~., scales = "free")+
geom_vline(xintercept = 1850)
ylab("Concentration (ch4/n2o ppb, co2 ppm)") +
ggtitle("Atmospheric greenhouse gas concentration by year, 0-2000")
greenhouse_gases %>%
filter(year==2000)%>%
select(gas,concentration)
temp_carbon%>%
filter(!is.na(carbon_emissions))%>%
select(year,carbon_emissions)%>%
ggplot(aes(x=year,y=carbon_emissions))+
geom_line()
temp_carbon%>%
filter(year %in% c(2014,1960))%>%
select(carbon_emissions)
historic_co2%>%
ggplot(aes(x=year,y=co2,col=source))+xlim(-3000,2018)+geom_line()+geom_vline(
xintercept = 1850
)
c <- gapminder$country
data("gapminder")
g1 <- gapminder %>%
filter(country %in% c("India","Pakistan","China","United States","United Kingdom"),!is.na(lifeExp))%>%
ggplot(aes(x=year,y=pop,col=country)) +geom_line()
#Course 3 Probability
#Discrete Variables probability
set.seed(1)
set.seed(1, sample.kind="Rounding")
#Monte Carlo simulation for discrete variables/probabilities
#Without replacement
beads <- rep(c("Red","Blue"),c(20,80))
samp <- sample(beads,10)
re <- replicate(20000,sample(beads,10))
#With replacement
sa <- sample(beads,10,replace = TRUE)
#Permutation and Combinations
#Creating a deck of cards
suits <- c("Diamonds", "Clubs", "Hearts", "Spades")
numbers <- c("Ace", "Deuce", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Jack", "Queen", "King")
deck <- expand.grid(Suit=suits,Numbers=numbers)
deck <- paste(deck$Numbers,deck$Suit)
#Creating different outcomes
king <- paste("King",suits) #Outcomes of drawing a king
mean(deck %in% king) #Probability of drawing a king
queen <- paste("Queen",suits) #Outcomes of drawing a Queen
mean(deck %in% queen) #Probability of drawing a Queen
install.packages("gtools")
library(gtools)
#Probability of drawing two consecutive kings
total <- permutations(52,2,v=deck)
k <- total[,1]
s <- total[,2]
prob <- mean(k %in% king & s %in% king)/mean(k %in% king)
#Birthday problem
#Probability of atleast 2 people sharing birthdays for a group of 50 people
bday <- replicate(1000,any(duplicated(sample(1:365,50,replace = TRUE))))
#Calculating the above probability for different group sizes
n <- seq(1:100)
dd <- function(n,B){
f <- replicate(B,{
b <- sample(1:365,n,replace = TRUE)
any(duplicated(b))
})
mean(f)
}
plt <- sapply(n,dd)
plot(n,plt)
#Calculating the result for different values of monte carlo simulations(B)
B1 <- 10^seq(1,5,len=100)
comp_prob <- function(B1,n=22){
f1 <- replicate(B1,{
b1<- sample(1:365,n,replace = TRUE)
any(duplicated(b1))
})
mean(f1)
}
sims <- sapply(B1,comp_prob)
plot(log10(B1),sims,type = "l") #Plotting the results vs. No of sims
#Monty Hall Problem
B2 <- 10000
switch <- replicate(B2,{
doors <- as.character(1:3)
prize <- sample(c("Car","Goat","Goat"))
prize_door <- doors[prize=="Car"]
my_pick <- sample(doors,1)
show <- sample( doors[!doors %in% c(my_pick,prize_door)],1)
switch <- doors[!doors %in% c(my_pick,show)]
switch == prize_door
})
mean(switch)
#Example
#Odds of winning a game
outcome <- c(0,1)
b <- permutations(2,r=10,v=outcome,repeats.allowed = TRUE,set = FALSE)
mean(rowSums(b)>=6)
#Doing the same thing by 10,000 monte carlo simulations
#Doing this for different series lengths (from 1 to 25 by 2)
#Assuming the first game is won by the team
results <- function(n){
rep <- replicate(10000,{
samp <- sample(c(0,1),n-1,replace=T,prob = c(0.5,0.5))
sum(samp)>=(n-1)/2
})
mean(rep)
}
n <- seq(1,25,2)
pr <- sapply(n,results)
plot(n,pr)
#Assignment questions EDx
library(gtools)
library(tidyverse)
j <- c("Jamaica", "Jamaica", "Jamaica")
runners <- c("Jamaica", "Jamaica", "Jamaica", "USA", "Ecuador", "Netherlands", "France", "South Africa")
oly <- replicate(10000,{
k <- sample(runners,3)
all(k %in% j)
})
r <- seq(2,12,1)
a <- function(r){
b <- combinations(r,2)
nrow(b)
}
b <- sapply(r,a)
esoph %>%
filter(tobgp=="30+" & alch)%>%
summarise(sum(ncases),sum(ncontrols))
#Continuous Variables probability
#Proportion of male student above 70.5 inches
library(dslabs)
pro <- function(n){
heights %>%
filter(sex=="Male")%>%
summarise(mean(height<=n))
}
#Plotting proportions for different heights (CDF)
n <- seq(50,83,0.5)
pr <- sapply(n,pro)
plot(n,pr)
#Using the pnorm() function for CDF
ht <- heights %>% filter(sex=="Female")%>% .$height
ht <- sort(ht,decreasing = FALSE)
avg <- mean(ht)
s <- sd(ht)
p <- pnorm(ht,avg,s)
df <- data.frame(ht,p)
df%>%
ggplot(aes(x=ht,y=p))+
geom_point()
#Using dnorm() for PDF
ht <- heights %>% filter(sex=="Female")%>% .$height
ht <- sort(ht,decreasing = FALSE)
avg <- mean(ht)
s <- sd(ht)
d <- dnorm(ht,avg,s)
df <- data.frame(ht,d)
df%>%
ggplot(aes(x=ht,y=d))+
geom_point()
#Plotting both PDF & CDF on single graph
df1 <- data.frame(ht,p,d)
df1 %>%
ggplot()+
geom_point(aes(ht,p),color="Red")+
geom_point(aes(ht,d))
#Using rnorm() for Monte-Carlo Simulation
r <- rnorm(length(ht),avg,s)
hist(r)
#Using qnorm() to get exact values for different quantiles
sw <- seq(0,1,0.025)
q <- qnorm(sw,avg,s)
#Probability of the casino losing money.
#Monte Carlo on Sampling model
sam <- replicate(10000,{
s <- sample(c(-1,1),prob=c(9/19,10/19),size = 1000,replace = TRUE)
sum(s)
})
mean(sam<=0)
d <- dnorm(sam,mean(sam),sd(sam))
df <- data.frame(d,sam)
df%>%
ggplot(aes(df,..density..))+
geom_line(aes(x=sam,y=d),color="Blue",lwd=1)+
geom_histogram(aes(sam))
#Probability of winning on 10,000 bets on green using CLT
p_green <- 2/38
p_not_green <- 36/38
s <- sample(c(17,-1),size=10000,replace = TRUE,prob=c(p_green,p_not_green))
#Probability of winning on 10,000 bets on green using 10,000 Monte-Carlos
p_green <- 2/38
p_not_green <- 36/38
mcs <- replicate(10000,{
s1 <- sample(c(17,-1),size=100,replace = TRUE,prob=c(p_green,p_not_green))
mean(s1>0)
})
1 - pnorm(0,mean(mcs),sd(mcs))
se <- sqrt(44)*(abs(-0.25-1)*sqrt(0.2*0.8))
score <- sample(c(-0.25,1),prob=c(0.8,0.2),size=44,replace = TRUE)
m <- ((0*0.75)+(0.25))
set.seed(21)
b <- seq(0.25,0.95,0.05)
mct <- function(b){
replicate(10000,{
s <- sample(c(-0.25,1),prob=c(1-b,b),size=44,replace = TRUE)
sum(s)
})
}
#The BIG SHORT
#Sampling model for determining loan defaults
#Giving out 1000 loans of 200,000$ each & default rate = 2%
loans <- sample(c(-200000,0),size = 1000,prob = c(0.02,0.98),replace = TRUE)
exp_value <- 1000*((0.02)*(-200000)+(0.98)*0)
se <- sqrt(1000)*(abs(-200000)*sqrt(0.02*0.98))
#Probability for determining interest rates to minimise defaults
#Case1: Probability of loss as a function of different interest rates
rat <- seq(0,0.035,0.001)
default <- function(rat){
rates <- replicate(10000,{
sa <- sample(c(-200000,rat*180000),prob=c(0.02,0.98),size=1000,replace = TRUE)
mean(sa)
})
mean(rates<0)
}
def_ault <- sapply(rat,default)
#Case2: Probability of profit as a function of different interest rates
payback <- function(rat){
rates <- replicate(10000,{
sa <- sample(c(-200000,rat*180000),prob=c(0.02,0.98),size=1000,replace = TRUE)
mean(sa)
})
mean(rates>0)
}
pay_back <- sapply(rat,payback)
#Plotting the results
data.frame(rat,def_ault,pay_back)%>%
ggplot()+
geom_point(aes(rat,def_ault),color="Red")+
geom_point(aes(rat,pay_back),color="Green")
#Insurance Problem!!
set.seed(29)
profits <- function(premium=3268){
r <- replicate(10000,{
p <- 0.015 + sample(seq(-0.01,0.01,length=100),1)
s <- sample(c(-150000,premium),size=1000,prob=c(p,1-p),replace = TRUE)
sum(s)
})
mean(r< -10^6)
}
prft <- sapply(seq(3200,3250,1),profits)
df <- data.frame(seq(3200,3250,1),prft)
#Course 4: Inference and Modeling
#Monte-Carlo Simulation to confirm CLT
n <- seq(10,200,10)
p <- 0.6
props <- function(n=100){
pr <- replicate(1000,{
s <- sample(c(0,1),size=n,replace=TRUE,prob=c(1-p,p))
mean(s)-0.6
})
pr
}
se_sim <- sapply(n, props)
se_calc <- sqrt(p*(1-p))/sqrt(n)
data.frame(n,se_sim,se_calc)%>%
ggplot()+
geom_point(aes(x=n,y=se_sim),color="Red")+
geom_point(aes(x=n,y=se_calc),color="Blue")
#Monte-Carlo Simulation of Confidence Intervals
inside <- replicate(10000,{
s <- sample(c(0,1),size=100,replace=TRUE,prob=c(0.2,0.8))
m <- mean(s)
std <- sqrt(m*(1-m)/100)
between(0.8,m-(1.96*std),m+ (1.96*std))
})
#Creating 95% confidence intervals for the spread for 2016 US polls
#How many of those intervals actually got the correct value i.e 2.1%
polls <- polls_us_election_2016 %>%
filter(state=='U.S.' & enddate>=2016-10-31)%>%
group_by(pollster)%>%
filter(n()>6)%>%
mutate(spread=rawpoll_clinton-rawpoll_trump)%>%
mutate(me=2*1.96*sqrt(rawpoll_clinton*(100-rawpoll_clinton)/samplesize))%>%
mutate(lower=spread-me,upper=spread+me)%>%
mutate(gotit=ifelse(2.1>=lower & 2.1<=upper,TRUE,FALSE))%>%
select(spread,samplesize,pollster,grade,me,lower,upper,gotit)
#Plot of all the 13 pollster's histograms of spreads
polls %>%
group_by(pollster)%>%
ggplot()+
geom_point(aes(x=spread,y=pollster))+
geom_vline(aes(xintercept=2.1))
#Aggreate of all the polls to calculate the spread(POLL AGGREGATION)
agg <- polls %>% ungroup() %>% select(samplesize,spread,pollster)
d_hat <- agg %>% summarise(d=sum(spread*samplesize)/sum(samplesize))%>% .$d
p_hat <- (100+d_hat)/2
se_d <- 2*qnorm(0.975)*sqrt(p_hat*(100-p_hat)/sum(agg$samplesize))
#Aggregate by pollster
poll_agg <- polls %>%
group_by(pollster)%>%
summarise(se=2*sqrt(p_hat*(100-p_hat)/sum(samplesize)),d=sum(spread*samplesize)/sum(samplesize))%>%
mutate(p=(100+d)/2)
#Heights dataset assgn
males <- heights %>%filter(sex=='Male')%>% .$height
male_sample <- sample(males,size=50,replace = TRUE)
#Bayesian/Posterior Probability
#The Observed Poll Data ~(d,sigma)
polls <- polls_us_election_2016 %>%
filter(state=='U.S.' & enddate>="2016-10-31" &
(grade %in% c("A+","","A","A-","B+") | is.na(grade)))%>%
mutate(spread=rawpoll_clinton/100-rawpoll_trump/100)
one_poll_per_pollster <- polls %>% group_by(pollster)%>%
filter(enddate==max(enddate))%>%
ungroup()
results <- one_poll_per_pollster %>%
summarise(avg=mean(spread),se= sd(spread)/sqrt(length(spread)))
#Historical Data ~(mu=0,tau=0.035)
#Posterior mean = B*mu + (1-B)*Y; B = sigma^2/sigma^2+tau^2
mu <- 0
tau <- 0.035
Y <- results$avg
sigma <- results$se
B <- sigma^2/(sigma^2 + tau^2)
pos_mean <- (B*mu)+(1-B)*Y
pos_se <- sqrt(1/(1/sigma^2+1/tau^2))
#95% credible interval
pos_mean + c(-1.96,1.96)*pos_se
1-pnorm(0,pos_mean,pos_se)
#Predicting the Electoral college
results <- polls_us_election_2016 %>%
filter(state != "U.S." &
!grepl("CD", "state") &
enddate >= "2016-10-31" &
(grade %in% c("A+", "A", "A-", "B+") | is.na(grade))) %>%
mutate(spread = rawpoll_clinton/100 - rawpoll_trump/100) %>%
group_by(state) %>%
summarize(avg = mean(spread), sd = sd(spread), n = n()) %>%
mutate(state = as.character(state))
results <- results %>% arrange(desc(abs(avg)))
results <- left_join(results,results_us_election_2016,by="state")
results <- results %>%
mutate(sd = ifelse(is.na(sd), median(results$sd, na.rm = TRUE), sd))
mu <- 0
tau <- 0.02
results %>% mutate(sigma = sd/sqrt(n),
B = sigma^2/ (sigma^2 + tau^2),
posterior_mean = B*mu + (1-B)*avg,
posterior_se = sqrt( 1 / (1/sigma^2 + 1/tau^2))) %>%
arrange(abs(posterior_mean))
#Monte-Carlo Simulation of Election Night Results with general bias of 3%
g_bias <- 0.03
Clinton_EV <- replicate(1000,{
results %>% mutate(sigma=sqrt(sd^2/n+g_bias^2),
B = sigma^2/(sigma^2+tau^2),
posterior_mean = B*mu + (1-B)*avg,
posterior_se = sqrt(1/(1/tau^2+1/sigma^2)),
simulated_result <- rnorm(length(posterior_mean),posterior_mean,posterior_se),
clinton=ifelse(simulated_result>0,electoral_votes,0))%>%
summarize(clinton=sum(clinton))%>%
.$clinton+7
})
mean(Clinton_EV>269)
#Chi-Square Test
totals <- research_funding_rates%>%
select(-discipline)%>%
summarise_all(funs(sum))%>%
summarise(yes_men=awards_men,no_men=applications_men-awards_men,
yes_women=awards_women,no_women=applications_women-awards_women)
#Creating a two by two contigency table
# compute overall funding rate
funding_rate <- totals %>%
summarize(percent_total = (yes_men + yes_women) / (yes_men + no_men + yes_women + no_women)) %>%
.$percent_total
#Cross-Contingency table
cross_tab <- tibble(awarded = c("no", "yes"),
men = c(totals$no_men ,totals$yes_men),
women = c(totals$no_women , totals$yes_women))
#Performing the chi_sq test
cross_tab%>%select(-awarded)%>%chisq.test()
odds_men <- cross_tab$men[2]/cross_tab$men[1]
odds_women <- cross_tab$women[2]/cross_tab$women[1]
#Brexit polling assignment
library(tidyverse)
options(digits = 3)
# load brexit_polls object
library(dslabs)
data(brexit_polls)
brex <- brexit_polls %>% mutate(p_hat=(1+spread)/2)
june_polls <- brex%>%
mutate(se_x_hat<- sqrt(p_hat*(1-p_hat)/samplesize),sp=2*se_x_hat)%>%
mutate(l=spread-qnorm(0.975)*sp,h=spread+qnorm(0.975)*sp,hit=ifelse(-.038>=l & -.038<=h,TRUE,FALSE))%>%
select(poll_type,hit)
june_polls %>% group_by(poll_type)%>%summarise(N=sum(samplesize),spread=sum(spread*samplesize)/N,p_hat=(1+spread)/2)
brexit_long <- brexit_polls %>%
gather(vote, proportion, "remain":"undecided") %>%
mutate(vote = factor(vote))
##Course 5## Data Wrangling
##Reshaping data
#gather(),spread(),seperate(),unite()
dt <- read.csv("C:\\Users\\lalit\\Downloads\\datasets_180_408_data.csv")
gap <- gapminder %>% select(country,year,gdpPercap)
wid <- gap %>% spread(key=year,value=gdpPercap)
tidy <- wid %>% gather("year","gdpPerCap",'1952':'2007')
age_group <- c(20,30,40,50)
my_time <- c(3,7,8,9)
my_participants <- c(25,32,21,63)
your_time <- c(5,6,3,1)
your_participants <- c(45,33,46,76)
dataf <- data.frame(cbind(age_group,my_time,my_participants,your_time,your_participants))
co2_wide <- data.frame(matrix(co2, ncol = 12, byrow = TRUE)) %>%
setNames(1:12) %>%
mutate(year = as.character(1959:1997))
co2_tidy <- co2_wide %>% gather(month,co2,-year)
tmp <- gather(admissions,key,value,admitted:applicants)
data(admissions)
dat <- admissions %>% select(-applicants)
tmp2 <- tmp %>% unite(column_name,c(key,gender))
#Joining Data ## left_join(),right_join(),inner_join(),full_join(),semi_join(),anti_join()
# Set Operators ## intersect(),union(),setdiff(),setequal()
t1 <- mtcars[1:10,]
t2 <- mtcars[8:20,]
rownames(intersect(t1,t2))
#Example##
f1 <- c(2,3,4)
f2 <- c(3,4,5)
full_join(f1,f2)
##Example Dataset ##
install.packages("Lahman")
library(Lahman)
top <- Batting %>% filter(yearID==2016)%>%top_n(10,HR)
hitt <- AwardsPlayers %>% filter(yearID==2016)
#Web Scraping ##
library(rvest)
url <- "https://en.wikipedia.org/wiki/Murder_in_the_United_States_by_state"
page <- read_html(url)
t <- html_nodes(page,"table")
t <- t[2]
d <- html_table(t)
d <- d %>% setNames(c("state", "population", "total", "murders", "gun_murders", "gun_ownership", "total_rate", "murder_rate", "gun_murder_rate"))
#Example#
h <- read_html("http://www.foodnetwork.com/recipes/alton-brown/guacamole-recipe-1940609")
recipe <- h %>% html_node(".o-AssetTitle__a-HeadlineText") %>% html_text()
prep_time <- h %>% html_node(".m-RecipeInfo__a-Description--Total") %>% html_text()
ingredients <- h %>% html_nodes(".o-Ingredients__a-Ingredient") %>% html_text()
guacamole <- list(recipe, prep_time, ingredients)
#Assignment##
urrl <- "http://www.stevetheump.com/Payrolls.htm"
dt <- read_html(urrl)
nd <- html_nodes(dt,"table")
html_text(nd[8])
tabl<- html_table(nd[8])
tab_1 <- html_table(nd[[10]])
tab_2 <- html_table(nd[[19]])
tab_1 <- tab_1[2:30,]
tab_2 <- tab_2[2:31,]
tab_2 <- tab_2 %>% setNames(c("Team","Payroll"))
#Example2#
library(rvest)
library(tidyverse)
url <- "https://en.wikipedia.org/w/index.php?title=Opinion_polling_for_the_United_Kingdom_European_Union_membership_referendum&oldid=896735054"
ht <- read_html(url)
tbls <- html_nodes(ht,"table")
n <- html_table(tbls[5],fill=TRUE)
## ##
library(rvest)
url <- "https://web.archive.org/web/20181024132313/http://www.stevetheump.com/Payrolls.htm"
h <- read_html(url)
nodes <- html_nodes(h, "table")
tab_1<- html_table(nodes[[10]])
tab_1 <- tab_1[2:31,2:4]
tab_1 <- setNames(object=tab_1,c("Team","Payroll","Average"))
tab_2<- html_table(nodes[[19]])
tab_2 <- tab_2[2:31,] %>% setNames(c("Team","Payroll","Average"))
#String Processing basics##
#Load the US Murders Dataset from the webpage##
url <- "https://en.wikipedia.org/w/index.php?title=Gun_violence_in_the_United_States_by_state&direction=prev&oldid=810166167"
murders_raw <- read_html(url) %>%
html_nodes("table") %>%
html_table() %>%
.[[1]]%>%
setNames(c("state", "population", "total", "murder_rate"))
commas <- function(x) any(str_detect(x, ","))
murders_raw %>% summarize_all(funs(commas))
murders_raw <- murders_raw %>%
mutate(population=str_replace_all(population,",",""),total=parse_number(total))
murders_new <- murders_raw %>% mutate_at(2:3, parse_number)
#String Processing ## Part2##
library(dslabs)
data("reported_heights")
not_defined <- function(x,smallest = 50, largest = 84){
inches <- suppressWarnings(as.numeric(x))
ind <- is.na(inches) | inches < smallest | inches > largest
ind
}
problems <- reported_heights %>%
filter(not_defined(height)) %>%
.$height
length(problems)
df <- reported_heights %>% mutate(ht=suppressWarnings(as.numeric(height))) %>%
filter(is.na(ht) | ht > 84 | ht < 50) %>% .$height
##Regex## str_detect(),str_subset(),str_replace(),str_rempve()
str_subset(reported_heights$height,"cm")
str_view(reported_heights$height,"cm|feet")
str_subset(reported_heights$height,"\\d")
str_detect(reported_heights$height,"inches") %>% sum()
##Regex## Anchors and quantifiers##
##Anchors ^ start $end ##
pattern <- "^\\d{1,2}$"
m <- c("23","1","456")
p2 <- "^(\\d)(')(\\d)$"
h1 <- c("5'8","6'2","5'1","3'4")
str_replace(h1,p2,replacement ="\\1 \\2" )
str_extract(h1,p2)
##Additional Quantifiers ##
p4 <- "^[4-7]\\s*'\\s*\\d{1,2}$"
pat <- "^([4-7])\\s*[.|,|\\s+]\\s*(\\d*)$"
problems_solved <- df %>% str_replace("feet|ft|foot","'")%>%
str_replace("inches|in|\"","") %>%
str_replace(pat,"\\1'\\2") %>%
str_remove_all(" ")%>%
str_subset(p4)
#Pattern with groups () , str_match(), str_match(),str_extract()
yes <- c("5,6","6,8")
pat <- "^([4-7])\\s*[.|,|\\s+]\\s*(\\d*)$"
str_subset(df,pat)
str_replace(df,pat,"\\1'\\2")
#extract(),separate(),str_trim()
pr <- data.frame(x=problems_solved)
pr %>% separate(x,c("feet","inches"),sep="'")
pr %>% extract(x,c("feet","inches"),regex ="(\\d)'(\\d*)")
##Renaming with recode()##
names <- USArrests %>% filter(str_length(rownames(USArrests))>12) %>% rownames(USArrests)
new_names <- recode(names,"Massachusetts"='MS')
##Date and Time Mining##
#Date#
ymd_hms(now())
dates <- sample(polls_us_election_2016$startdate,10) %>% sort()
month <- months(dates)
day(dates)
year(dates)
#Time
Sys.time()
now()
now() %>% minute()
now()%>%second()
now()%>%hour()
##TextMining## unnest_tokens()
install.packages("tidytext")
install.packages("textdata")
library(tidytext)
#Trump twitter text analysis##
data("trump_tweets")
##Where did the tweets come from?##
trump_tweets %>% count(source) %>% arrange(desc(n))
##Creating the dataset##
campaign_tweets <- trump_tweets %>%
extract(source, "source", "Twitter for (.*)") %>%
filter(source %in% c("Android", "iPhone") &
created_at >= ymd("2015-06-17") &
created_at < ymd("2016-11-08")) %>%
filter(!is_retweet) %>%
arrange(created_at)
#Visualizing the dataset##
ds_theme_set()
campaign_tweets %>%
mutate(hour = hour(with_tz(created_at, "EST"))) %>%
count(source, hour) %>%
group_by(source) %>%
mutate(percent = n / sum(n)) %>%
ungroup %>%
ggplot(aes(hour, percent, color = source)) +
geom_line() +
geom_point() +
labs(x = "Hour of day (EST)",
y = "% of tweets",
color = "")
##unnest_token()##
example <- data_frame(line = c(1, 2, 3, 4),
text = c("Roses are red,", "Violets are blue,", "Sugar is sweet,", "And so are you."))
example %>% unnest_tokens(wrd,text,token="sentences") #For breaking into sentences
example %>% unnest_tokens(wrd,text,token="words") ##For breaking into sentences##
#LookaheadRedex## (?=),(?!)
strng <- c("aqic","aqrr")
pat <- "q(?=i)"
str_subset(strng,pat)
pat1 <- "q(?!i)"
str_subset(strng,pat1)
##Lookbehind## (?<=),(?<!)
pat2 <- "(?<=q)s"
str_subset("ghqs",pat2)
pat4 <- "(?<!a)w"
str_subset(c("eraw","adswwfdg"),pat4)
##grep(),grepl(),sub(),gsub()
##Trump tweets text mining##
pattern <- "([^A-Za-z\\d#@']|'(?![A-Za-z\\d#@]))"
tweet_words <- campaign_tweets %>%
mutate(text = str_replace_all(text, "https://t.co/[A-Za-z\\d]+|&", "")) %>%
unnest_tokens(word,text,token="regex",pattern=pattern)%>%
filter(!word %in% stop_words$word & !str_detect(word,"^\\d+$")) %>% mutate(word=str_replace(word,"^'",""))
##Getting the top 10 words by source##Android/iPhone
sourcetweets <- tweet_words %>% count(word,source) %>% arrange(desc(n))
#Given a word, how many time was it tweeted from an iPhone and an Android phone##
wordsourcewise <- sourcetweets %>% spread(key = source,value=n,fill=0)
##1.Given a word, what were the odds that it was tweeted from an Android phone as opposed to an iPhone##O
##2. Given an android phone, how likely was it that the the respective word was tweeted as opposed to other words##OR
android_words_odds <- wordsourcewise %>% filter(Android != 0 & iPhone !=0) %>% mutate(Anodds=Android/iPhone) %>%
mutate(Anoddsratio = (Android/iPhone)/( (sum(Android)-Android)/ (sum(iPhone)-iPhone) ) ) %>%
arrange(desc(Anoddsratio))
iphone_words_odds <- wordsourcewise %>% filter(Android != 0 & iPhone !=0) %>% mutate(iodds=iPhone/Android) %>%
mutate(ioddsratio = (iPhone/Android)/( (sum(iPhone)-iPhone)/ (sum(Android)-Android) ) ) %>%
arrange(desc(ioddsratio))
#Sentiment analysis of the tweets##
sentiments
get_sentiments("bing")
get_sentiments("nrc") %>% select(word,sentiments)
#Odds of sentiments according to the source## Android vs. iPhone##
nrc <- get_sentiments("nrc") %>% select(word,sentiment)
sentiment_counts <- tweet_words %>% left_join(nrc,by="word") %>% count(source,sentiment) %>% spread(source,n) %>%
filter(sentiment != "none") %>%
mutate(Android_odds = Android/iPhone, Word_odds_android = Android/(sum(Android)-Android)) %>% arrange(desc(Word_odds_android))
#Odds Ratio##
library(broom)
log_or <- sentiment_counts %>%
mutate( log_or = log( (Android / (sum(Android) - Android)) / (iPhone / (sum(iPhone) - iPhone))),
se = sqrt( 1/Android + 1/(sum(Android) - Android) + 1/iPhone + 1/(sum(iPhone) - iPhone)),
conf.low = log_or - qnorm(0.975)*se,
conf.high = log_or + qnorm(0.975)*se) %>%
arrange(desc(log_or))
#Visualization##
log_or %>% mutate(sentiment=reorder(sentiment,log_or),) %>% ggplot(aes(x = sentiment, ymin = conf.low, ymax = conf.high)) +
geom_errorbar() +
geom_point(aes(sentiment, log_or)) +
ylab("Log odds ratio for association between Android and sentiment") +
coord_flip()
#Important datetime functions()##
#weekdays(),rounddate(),month()
##Datetime assessment##
weekdays(brexit_polls$enddate) %>% table()
data("movielens")
movielens %>% mutate(date = year(as_datetime(timestamp))) %>% count(date) %>% arrange(desc(n))
#Project Gutenbreg Assessment##
install.packages("gutenbergr")
library(gutenbergr)
words <- gutenberg_download(1342) %>% unnest_tokens(word,text) %>% filter(!word %in% stop_words$word & !str_detect(word,"\\d"))
afinn <- get_sentiments("afinn")
words %>% inner_join(afinn,by="word") %>% filter(value==4) %>% nrow()
##Project Hurricane Maria##
fn <- system.file("extdata", "RD-Mortality-Report_2015-18-180531.pdf", package="dslabs")
system("cmd.exe", input = paste("start", fn))
txt <- pdftools::pdf_text(fn)
x <- txt[9] %>% str_split(pattern = "\n")
s <- x[[1]] %>% str_trim()
str_which(s,"SEP")
str_split(s[2],"\\s+",simplify = T) %>% length()
str_which(s,"Total")
str_count(s,"\\d+")
df <- s %>% str_split("\\s+",simplify = T)
df <- df[1:34,1:5] %>% as.data.frame(df)
name <- c("SEP","a","b","c","d")
colnames(df) <- name
df <- df[3:34,1:5]
df <- df %>% filter(as.numeric(SEP) < 31) %>% mutate(SEP=as.character(SEP))
str_split_fixed(str_remove_all(s,"[^\\d\\s]"),"\\s+",n=6)[,1:5]
df %>% ggplot()+geom_point(aes(x=SEP,y=a),color="Blue") + geom_point(aes(x=SEP,y=b),color="Green") +
geom_point(aes(x=SEP,y=c),color="Red") + geom_vline(aes(xintercept = 20))
##Course 7##
##Regression##
?Teams()
head(Teams)
Teams %>% filter(yearID %in% 1961:2001) %>% mutate(AB=X2B/G,R=X3B/G) %>% select(AB,R) %>% cor()
##Galtons Dataset##
data(galton)
library(galton)
data("GaltonFamilies")
set.seed(1989)
female_heights <- GaltonFamilies%>%
filter(gender == "female") %>%
group_by(family) %>%
sample_n(1) %>%
ungroup() %>%
select(mother, childHeight) %>%
rename(daughter = childHeight)
r <- cor(female_heights$mother,female_heights$daughter)
slope <- r*sd(female_heights$daughter)/sd(female_heights$mother)
##Linear Models#
galton_heights <- GaltonFamilies %>%
filter(gender == "male") %>%
group_by(family) %>%
sample_n(1) %>%
ungroup() %>%
select(father, childHeight) %>%
rename(son = childHeight)
##RSS##
rss <- function(beta0,beta1,data){
resid <- galton_heights$son - (beta0 + beta1*galton_heights$father)
return(sum(resid^2))
}
b = seq(0,1,len=nrow(galton_heights))
results <- data.frame(beta1=b,rss=sapply(X=b,FUN=rss,beta0=25))
results %>% ggplot(aes(beta1, rss)) + geom_line() +
geom_line(aes(beta1, rss))
bat_02 <- Batting %>% filter(yearID == 2002) %>%
mutate(pa = AB + BB, singles = (H - X2B - X3B - HR)/pa, bb = BB/pa) %>%
filter(pa >= 100) %>%
select(playerID, singles, bb)
bat_01 <- Batting %>% filter(yearID %in% 1999:2001) %>%
mutate(pa = AB + BB, singles = (H - X2B - X3B - HR)/pa, bb = BB/pa) %>%
filter(pa >= 100) %>%
group_by(playerID)%>%
summarise(s=mean(singles),b=mean(bb))%>%
select(playerID,s,b)
df <- inner_join(bat_02,bat_01,by="playerID")
##do(), broom()##
dat <- Teams %>% filter(yearID %in% 1961:2001) %>%
mutate(HR = round(HR/G, 1),
BB = BB/G,
R = R/G) %>%
select(HR, BB, R) %>%
filter(HR >= 0.4 & HR<=1.2)
dat %>% group_by(HR) %>% do(tidy(lm(R~BB,data=.),conf.int=TRUE)) %>%
filter(term=="BB")%>%ggplot(aes(x=HR,y=estimate,ymin=conf.low,ymax=conf.high)) + geom_point()+geom_errorbar()
##Excercise##
set.seed(1)
galton <- GaltonFamilies %>%
group_by(family, gender) %>%
sample_n(1) %>%
ungroup() %>%
gather(parent, parentHeight, father:mother) %>%
mutate(child = ifelse(gender == "female", "daughter", "son")) %>%
unite(pair, c("parent", "child"))
##Exercise##
set.seed(1)
t <- Teams %>%filter(yearID %in% 1961:2018)%>% group_by(yearID) %>% select(HR,BB,R) %>%
do(tidy(lm(R~BB+HR,data=.),conf.int=T)) %>% ungroup() %>% filter(term=="BB") %>%
select(yearID,term,estimate)
t %>% ggplot(aes(x=yearID,y=estimate)) +geom_point()+geom_smooth(method="lm")
##Assessment 1## Linear Regression
model <- Teams %>%
filter(yearID %in% 1961:2001) %>%
mutate(avg_attendance = attendance/G,runs_per_game=R/G,homeruns_per_game=HR/G) %>%
lm(avg_attendance~runs_per_game+homeruns_per_game+W+yearID,data=.)
data <- Teams %>% filter(yearID %in% 2002) %>% mutate(homeruns_per_game=HR/G,runs_per_game=R/G,ag=attendance/G) %>%
select(homeruns_per_game,runs_per_game,W,yearID,ag)
attendance <- data.frame(predicted = predict(model,data),actual= data$ag)
##Scatterplot between actual and predicted attendance per game in 2002##
attendance %>% ggplot(aes(x=actual,y=predicted)) + geom_point() + geom_smooth(method="lm")
##Correlation is not causation##
admissions %>% group_by(gender) %>%
summarise(total_admitted = round(sum(admitted / 100 * applicants)),
not_admitted = sum(applicants) - sum(total_admitted)) %>%
select(-gender) %>%
do(tidy(chisq.test(.)))
##Monte Carlo Simulations of p-value##
x <- rnorm(100,10,1)
y = rnorm(100,15,1)
r <- replicate(1000000,{
sample_x <- sample(x,size=20,replace = TRUE)
sample_y <- sample(y,size=20,replace=TRUE)
p <- tidy(lm(sample_x~sample_y),conf.int=T) %>%
filter(term=="sample_y")%>% select(p.value)
p
})
##Confounding Assessment##
library(dslabs)
data("research_funding_rates")
research_funding_rates
dat <- research_funding_rates %>%
mutate(discipline = reorder(discipline, success_rates_total)) %>%
rename(success_total = success_rates_total,
success_men = success_rates_men,
success_women = success_rates_women) %>%
gather(key, value, -discipline) %>%
separate(key, c("type", "gender")) %>%
spread(type, value) %>%
filter(gender != "total")
## Course 8## Machine Learning ##
##Heights Dataset##
x <- heights$height
y <- heights$sex
i <- createDataPartition(y,times=1,p=0.5,list=FALSE)
train_set <- heights[i,]
test_set <- heights[-i,]
y_hat <- sample(c("Male","Female"),nrow(test_set),replace = TRUE) %>%
factor(levels=levels(test_set$sex))##Random Sampling
mean(y_hat==test_set$sex)
y_HAT <- ifelse(train_set$height>65,"Male","Female") %>%
factor(levels=levels(test_set$sex))
sequence <- seq(61,70)
acc_train <- function(x){
y_hat <- ifelse(train_set$height>x,"Male","Female")
mean(y_hat==train_set$sex)
}
accuracy_train <- sapply(sequence,acc_train)
acc_test <- function(x){
y_hat <- ifelse(test_set$height>x,"Male","Female")
mean(y_hat==test_set$sex)
}
accuracy_test <- sapply(sequence,acc_test)
ac <- data.frame(train_acc=accuracy_train,test_acc=accuracy_test,cutoff=sequence)
##Confusion Matrix##
##Recall = TP/(TP+FN) , Precision = TP/(TP+FP)
confusionMatrix(data=y_HAT,reference = test_set$sex)
F_meas(data=y_HAT,reference = factor(test_set$sex))
##F-1 Score ##
acc_train <- function(x){
y_hat <- ifelse(train_set$height>x,"Male","Female")
mean(y_hat==train_set$sex)
}
accuracy_train <- sapply(sequence,acc_train)
acc_test <- function(x){
y_hat <- ifelse(test_set$height>x,"Male","Female")
mean(y_hat==test_set$sex)
}
accuracy_test <- sapply(sequence,acc_test)
ac <- data.frame(train_acc=accuracy_train,test_acc=accuracy_test,cutoff=sequence)
cutoff <- seq(61, 70)
F_1 <- function(x){
y_hat <- ifelse(train_set$height > x, "Male", "Female") %>%
factor(levels = levels(test_set$sex))
F_meas(data = y_hat, reference = factor(train_set$sex))
}
library(dslabs)
library(dplyr)
library(lubridate)
data(reported_heights)
dat <- mutate(reported_heights, date_time = ymd_hms(time_stamp)) %>%
filter(date_time >= make_date(2016, 01, 25) & date_time < make_date(2016, 02, 1)) %>%
mutate(type = ifelse(day(date_time) == 25 & hour(date_time) == 8 & between(minute(date_time), 15, 30), "inclass","online")) %>%
select(sex, type)
y <- factor(dat$sex, c("Female", "Male"))
x <- dat$type
new_dat <- dat %>% mutate(y_hat=ifelse(type=="online","Male","Female"),y_hat=factor(y_hat))
library(caret)
set.seed(2, sample.kind="Rounding")
data(iris)
iris <- iris[-which(iris$Species=='setosa'),]
y = iris$Species
test_index <- createDataPartition(iris,times=1,p=0.5,list=FALSE)
test <- iris[test_index,]
train <- iris[-test_index,]
trainsp <- factor(train$Species,levels = c("virginica","versicolor"))
sw <- seq(min(test$Sepal.Width),max(test$Sepal.Width),by=0.1)
sl <- seq(min(test$Sepal.Length),max(test$Sepal.Length),by=0.1)
pw <- seq(min(train$Petal.Width),max(train$Petal.Width),by=0.1)
pl <- seq(min(train$Petal.Length),max(train$Petal.Length),by=0.1)
SL <- function(x){
y_hat <- ifelse(train$Petal.Length>x,'virginica', 'versicolor')
y_hat <- factor(y_hat,levels = c("virginica","versicolor"))
mean(y_hat==trainsp)
}
S2 <- function(x){
y_hat <- ifelse(train$Petal.Length > x, "virginica","versicolor")
y_hat <- factor(y_hat,levels=c("virginica","versicolor"))
mean(y_hat==trainsp)
}
swidth <- sapply(sw,SL) %>% max()
slength <- sapply(sl, SL) %>% max()
pwidth <- sapply(pw, SL) %>% max()
plength <- sapply(pl, SL) %>% max()
y_hat <- ifelse(test$Petal.Width>1.6 | test$Petal.Length>4.7,'virginica','versicolor')
set.seed(1, sample.kind = "Rounding") # if using R 3.6 or later
disease <- sample(c(0,1), size=1e6, replace=TRUE, prob=c(0.98,0.02))
test <- rep(NA, 1e6)
test[disease==0] <- sample(c(0,1), size=sum(disease==0), replace=TRUE, prob=c(0.90,0.10))
test[disease==1] <- sample(c(0,1), size=sum(disease==1), replace=TRUE, prob=c(0.15, 0.85))
ps <- seq(0, 1, 0.1)
heights %>%
mutate(g = cut(height, quantile(height, ps), include.lowest = TRUE)) %>%
group_by(g) %>%
summarize(p = mean(sex == "Male"), height = mean(height)) %>%
qplot(height, p, data =.)
Sigma <- 9*matrix(c(1,0.5,0.5,1), 2, 2)
dat <- MASS::mvrnorm(n = 10000, c(69, 69), Sigma) %>%
data.frame() %>% setNames(c("x", "y"))
##Linear Regression##
galton_heights <- GaltonFamilies %>%
filter(childNum == 1 & gender == "male") %>%
select(father, childHeight) %>%
rename(son = childHeight)
y <- galton_heights$son
test_index <- createDataPartition(y, times = 1, p = 0.5, list = FALSE)
train_set <- galton_heights %>% slice(-test_index)
test_set <- galton_heights %>% slice(test_index)
model <- train_set %>% lm(son~father,data=.)
y_hat <- predict(model,test_set)
set.seed(1) # if using R 3.6 or later
n <- 100
Sigma <- 9*matrix(c(1.0, 0.5, 0.5, 1.0), 2, 2)
dat <- MASS::mvrnorm(n = 100, c(69, 69), Sigma) %>%
data.frame() %>% setNames(c("x", "y"))
rmse <- function(n)
replicate(n,{
y <- dat$y
test_index <- createDataPartition(y, times = 1, p = 0.5, list = FALSE)
train_set <- dat[-test_index,]
test_set <- dat[test_index,]
model <- lm(y~x,data=train_set)
y_hat <- predict(model,test_set)
sqrt(mean((y_hat- test_set$y)^2))
})
f <- function(n){
me <- mean(rmse(n))
se <- sd(rmse(n))
l <- list(me,se)
return(l)
}
n <- c(100)
sapply(n,f)
##Logistic Regression## glm(family='binomial')
heights %>%
mutate(x = round(height)) %>%
group_by(x) %>%
filter(n() >= 10) %>%
summarize(prop = mean(sex == "Female")) %>%
ggplot(aes(x, prop)) +
geom_point()
test_index <- createDataPartition(y, times = 1, p = 0.5, list = FALSE)
train_set <- heights %>% slice(-test_index)
test_set <- heights %>% slice(test_index)
lm_fit <- mutate(train_set, y = as.numeric(sex == "Female")) %>% lm(y ~ height, data = .)
p_hat <- predict(lm_fit, test_set)
y_hat <- ifelse(p_hat > 0.5, "Female", "Male") %>% factor()
confusionMatrix(y_hat, test_set$sex)
heights %>%
mutate(x = round(height)) %>%
group_by(x) %>%
filter(n() >= 10) %>%
summarize(prop = mean(sex == "Female")) %>%
ggplot(aes(x, prop)) +
geom_point() +
geom_abline(intercept = lm_fit$coef[1], slope = lm_fit$coef[2])
glm_fit <- train_set %>% mutate(y=as.numeric(sex=='Female')) %>%
glm(y~height,data=.,family='binomial')
p_hat_logit <- predict(glm_fit,newdata=test_set,type='response')
y_hat_logit <- ifelse(p_hat_logit>0.5,"Female","Male") %>% factor()
set.seed(2, sample.kind="Rounding") #if you are using R 3.6 or later
make_data <- function(n = 1000, p = 0.5,
mu_0 = 0, mu_1=z,
sigma_0 = 1, sigma_1 = 1){
y <- rbinom(n, 1, p)
f_0 <- rnorm(n, mu_0, sigma_0)
f_1 <- rnorm(n, mu_1, sigma_1)
x <- ifelse(y == 1, f_1, f_0)
test_index <- createDataPartition(y, times = 1, p = 0.5, list = FALSE)
list(train = data.frame(x = x, y = as.factor(y)) %>% slice(-test_index),
test = data.frame(x = x, y = as.factor(y)) %>% slice(test_index))
}
set.seed(1, sample.kind="Rounding") #if you are using R 3.6 or later
delta <- seq(0, 3, len = 25)
res <- sapply(delta, function(d){
dat <- make_data(mu_1 = d)
fit_glm <- dat$train %>% glm(y ~ x, family = "binomial", data = .)
y_hat_glm <- ifelse(predict(fit_glm, dat$test) > 0.5, 1, 0) %>% factor(levels = c(0, 1))
mean(y_hat_glm == dat$test$y)
confusionMatrix(data=y_hat_glm,reference = dat$test$y)$overall['Accuracy']
})
qplot(delta,res)
##Bin Smoothing## ksmooth()
span <- 7
fit <- with(polls_2008,ksmooth(day, margin, x.points = day, kernel="normal", bandwidth =span))
polls_2008 %>% mutate(smooth = fit$y) %>%
ggplot(aes(day, margin)) +
geom_point(size = 3, alpha = .5, color = "grey") +
geom_line(aes(day, smooth), color="red")
#Local Regression## loess()
library(purrr)
library(pdftools)
fn <- system.file("extdata", "RD-Mortality-Report_2015-18-180531.pdf", package="dslabs")
dat <- map_df(str_split(pdf_text(fn), "\n"), function(s){
s <- str_trim(s)
header_index <- str_which(s, "2015")[1]
tmp <- str_split(s[header_index], "\\s+", simplify = TRUE)
month <- tmp[1]
header <- tmp[-1]
tail_index <- str_which(s, "Total")
n <- str_count(s, "\\d+")
out <- c(1:header_index, which(n==1), which(n>=28), tail_index:length(s))
s[-out] %>%
str_remove_all("[^\\d\\s]") %>%
str_trim() %>%
str_split_fixed("\\s+", n = 6) %>%
.[,1:5] %>%
as_data_frame() %>%
setNames(c("day", header)) %>%
mutate(month = month,
day = as.numeric(day)) %>%
gather(year, deaths, -c(day, month)) %>%
mutate(deaths = as.numeric(deaths))
}) %>%
mutate(month = recode(month, "JAN" = 1, "FEB" = 2, "MAR" = 3, "APR" = 4, "MAY" = 5, "JUN" = 6,
"JUL" = 7, "AGO" = 8, "SEP" = 9, "OCT" = 10, "NOV" = 11, "DEC" = 12)) %>%
mutate(date = make_date(year, month, day)) %>%
dplyr::filter(date <= "2018-05-01")
span <- 60 / as.numeric(diff(range(dat$date)))
fit <- dat %>% mutate(x = as.numeric(date)) %>% loess(deaths ~ x, data = ., span = span, degree = 1)
dat %>% mutate(smooth = predict(fit, as.numeric(date)), day=yday(date),year=as.character(year(date))) %>%
ggplot() +
geom_line(aes(day, smooth,col=year), lwd = 2)
##Matrices## matrix(),dim(),t()
library(tidyverse)
library(dslabs)
if(!exists("mnist")) mnist <- read_mnist()
class(mnist$train$images)
x <- mnist$train$images[1:1000,]
y <- mnist$train$labels[1:1000]
grid <- matrix(x[3,], 28, 28)
image(1:28, 1:28, grid)
# flip the image back
image(1:28, 1:28, grid[, 28:1])
#Summing row values in matrix rowsums() rowSd()
avg <- rowSums(x)
rowMeans(x)
data_frame(labels = as.factor(y), row_averages = avg) %>%
qplot(labels, row_averages, data = ., geom = "boxplot")
apply(x,1,sd)
sds <- apply(x,2,sd)
##Filtering matrices on conditions##
colsd <- apply(x,2,sd)
new_x <- x[,colsd>60]
##sweep() to perform rowise or colwise ops
mymat <- matrix(data=c(1:10),5,2)
sweep(mymat,2,colSums(mymat)) %>% sweep(2,1.581139,FUN="/")
x <- matrix(rnorm(1000),100,10)
dim(x)[1]
x <- sweep(x,1,1:nrow(x),FUN="+")
x <- x+ seq(nrow(x))
class(mnist_27)
#KNN Model## dist() in used to find distance b/w points and predictors ##knn3()
if(!exists("mnist")) mnist <- read_mnist()
ind <- which(mnist$train$labels %in% c(2,7)) %>% sample(500)
#the predictors are in x and the labels in y
x <- mnist$train$images[ind,]
y <- mnist$train$labels[ind]
distance <- dist(x)
as.matrix(distance)[1:3,1:3]
##Example##
library(dslabs)
data(tissue_gene_expression)
tis <- as.matrix(dist(tissue_gene_expression$x))
image(tis)
##KNN Example## knn3()
x=as.matrix(mnist_27$train[,2:3])
y = mnist_27$train$y
knn_model <- knn3(y~x_1+x_2,k=5,data=mnist_27$train)
y_hat_knn <- predict(knn_model,mnist_27$test,type = "class")
confusionMatrix(data=y_hat_knn,reference = mnist_27$test$y)
##Sample example on logistic##
log_model <- glm(y~.,data=mnist_27$train,family = "binomial")
p_hat <- predict(log_model,mnist_27$test,type="response")
y_hat <- ifelse(p_hat>0.5,7,2) %>% factor()
confusionMatrix(data=y_hat,reference = mnist_27$test$y)
##KNN Example##
set.seed(1,sample.kind = "Rounding")
y <- heights$sex
x <- heights$height
md <- function(){
test_index <- createDataPartition(y, times = 1, p = 0.5, list = FALSE)
list(train = data.frame(x = x, y = as.factor(y)) %>% slice(-test_index),
test = data.frame(x = x, y = as.factor(y)) %>% slice(test_index))
}
mydata <- md()
neigh <- seq(1,101,3)
f1 <- sapply(neigh,function(d){
kn <- knn3(y~x,data=mydata$train,k=d)
yy <- predict(kn,mydata$test,type="class")
f1 <- F_meas(data=yy,reference = mydata$test$y)
return(f1)
})
##Another KNN Example###
set.seed(1,sample.kind = "Rounding")
y <- tissue_gene_expression$y
x <- tissue_gene_expression$x
md <- function(){
test_index <- createDataPartition(y, times = 1, p = 0.5, list = FALSE)
list(train = data.frame(x = x, y = as.factor(y)) %>% slice(-test_index),
test = data.frame(x = x, y = as.factor(y)) %>% slice(test_index))
}
mydata <- md()
neigh <- seq(1,11,2)
f1 <- sapply(neigh,function(d){
kn <- knn3(y~.,data=mydata$train,k=d)
yy <- predict(kn,mydata$test,type="class")
acc <- mean(yy== mydata$test$y)
return(acc)
})
kn <- knn3(y~.,data=mydata$train,k=1)
yy <- predict(kn,mydata$test,type="class")
acc <- mean(yy,reference = mydata$test$y)
return(acc)
##Cross Validation## colttest()
set.seed(1996, sample.kind="Rounding") #if you are using R 3.6 or later
n <- 1000
p <- 10000
x <- matrix(rnorm(n*p), n, p)
colnames(x) <- paste("x", 1:ncol(x), sep = "_")
y <- rbinom(n, 1, 0.5) %>% factor()
x_subset <- x[ ,statistically_significant]
##t-test()##
#install.packages("BiocManager")
#BiocManager::install("genefilter")
library(genefilter)
tt <- colttests(x, y) ## Column-wise p-value
statistically_significant <- which(tt$p.value<0.01)
##Running the predictions with significant predictors##
splitrule <- trainControl(method = "cv",number = 10)
fit <- train(x_subset, y,method='glm',trainControl=splitrule)
fit1 <- train(x_subset, y,method='glm',trControl=splitrule)
ggplot(fit)
##Tuning##
train(tissue_gene_expression$x,tissue_gene_expression$y,method='knn',tuneGrid=data.frame(k=seq(1,7,2)),trControl=trainControl(verboseIter=T,number=17,method = "cv"))
##Bootstrapping##
library(dslabs)
library(caret)
data(mnist_27)
# set.seed(1995) # if R 3.5 or earlier
set.seed(1995, sample.kind="Rounding") # if R 3.6 or later
indexes <- createResample(mnist_27$train$y, 10)
x=sapply(indexes, function(ind){
sum(ind == 3)
})
sum(x)
##Estimating 75th quantile by 10,000 MCS##
set.seed(1,sample.kind = "Rounding")
data <- replicate(10000,{
q <- rnorm(100,0,1)
return(quantile(q,0.75))
})
##Bootstrap sample##
set.seed(1,sample.kind = "Rounding")
y <- rnorm(100,0,1)
bots <- createResample(y,10000)
ss <- sapply(bots,function(i){
qnt <- quantile(y[i],0.75)
qnt
})
##Naive Bayes##
library("caret")
data("heights")
y <- heights$height
set.seed(2)
test_index <- createDataPartition(y, times = 1, p = 0.5, list = FALSE)
train_set <- heights %>% slice(-test_index)
test_set <- heights %>% slice(test_index)
# Estimating averages and standard deviations
params <- train_set %>%
group_by(sex) %>%
summarize(avg = mean(height), sd = sd(height))
params
# Estimating the prevalence
pi <- train_set %>% summarize(pi=mean(sex=="Female")) %>% pull(pi)
pi <- 0.5
# Getting an actual rule
x <- test_set$height
f0 <- dnorm(x, params$avg[2], params$sd[2])
f1 <- dnorm(x, params$avg[1], params$sd[1])
p_hat_bayes <- f1*pi / (f1*pi + f0*(1 - pi))
jd <- data.frame(height=x,given_male_p_height=f0,given_female_p_height=f1,given_height_p_female=p_hat_bayes)
jd %>% ggplot(aes(x=height,y=p_hat_bayes))+geom_line() + geom_point()
predictions <- ifelse(p_hat_bayes>0.5,"Female","Male") %>% factor()
sensitivity(data=predictions,reference = test_set$sex)
##QDA and LDA Example##
data("tissue_gene_expression")
# set.seed(1993) #if using R 3.5 or earlier
set.seed(1993, sample.kind="Rounding") # if using R 3.6 or later
ind <- which(tissue_gene_expression$y %in% c("cerebellum", "hippocampus"))
y <- droplevels(tissue_gene_expression$y[ind])
x <- tissue_gene_expression$x[ind, ]
x <- x[, sample(ncol(x), 10)]
df <- data.frame(x,y)
model <- train(y~.,data=df,method="lda",preProcess="center")
model$finalModel
t(model$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
set.seed(1993) #set.seed(1993, sample.kind="Rounding") if using R 3.6 or later
ind <- which(tissue_gene_expression$y %in% c("cerebellum", "hippocampus"))
y <- droplevels(tissue_gene_expression$y[ind])
x <- tissue_gene_expression$x[ind, ]
x <- x[, sample(ncol(x), 10)]
modelq <- train(y~.,data=df,method="qda")
t(modelq$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
set.seed(1993, sample.kind="Rounding") # if using R 3.6 or later
y <- tissue_gene_expression$y
x <- tissue_gene_expression$x
x <- x[, sample(ncol(x), 10)]
df <- data.frame(x,y)
model <- train(y~.,data=df,method="lda",preProcess="center")
##Regression Trees## rpart() package()
data("olive")
olive <- select(olive, -area)
# Predict region using KNN
library(caret)
fit <- train(region ~ ., method = "rpart",
tuneGrid = data.frame(cp= seq(0, 0.05, len=25)),
data = olive)
d <- knn3(y~x,tissue_gene_expression,k=3)
train(tissue_gene_expression$x,tissue_gene_expression$y,method='knn',tuneGrid=data.frame(k=seq(1,7,2)))
##Using rpart() package##
data("polls_2008")
train_rpart <- rpart(margin~.,data=polls_2008,control=rpart.control(cp=0)) #rpart package##
polls_2008 %>% mutate(y_hat=predict(train_rpart)) %>%
ggplot()+geom_point(aes(day,margin))+geom_line(aes(day,y_hat),col="Red")
rmse <- sqrt(mean((predict(train_rpart,polls_2008)-polls_2008$margin)^2))
tr <- train(margin~.,data=polls_2008,method="rpart",tuneGrid=data.frame(cp=seq(0,0.1,len=20)),trControl=trainControl(method = "cv",verboseIter = T),metric="RMSE",maximize=F)
##Another example## rpart() and train()##
##getModelinfo() to study tuning parameter##
mtc <- select(mtcars,c("mpg","hp","wt","qsec"))
tunn <- train(mpg~.,data=mtc,method="rpart",tuneGrid=data.frame(cp=0.02),trControl=trainControl(method = "cv"))
modl <- mtc %>% lm(mpg~.,data=.)
RMSE(predict(modl,mtc),obs=mtc$mpg)
rtree <- rpart(mpg~.,data=mtc,control=rpart.control(cp=0.02)) #rpart##
y_hat <- predict(rtree,mtc)
RMSE(pred = y_hat,obs=mtc$mpg)
##Random Forest##
rf <- randomForest::randomForest(mpg~.,data=mtc,importance=T)
##Examples##
library(rpart)
n <- 1000
sigma <- 0.25
# set.seed(1) # if using R 3.5 or ealier
set.seed(1, sample.kind = "Rounding") # if using R 3.6 or later
x <- rnorm(n, 0, 1)
y <- 0.75 * x + rnorm(n, 0, sigma)
dat <- data.frame(x = x, y = y)
fit <- rpart(y~x,data=dat) #Regression Tree
plot(fit)
text(fit)
dat %>%
mutate(y_hat = predict(fit)) %>%
ggplot() +
geom_point(aes(x, y)) +
geom_step(aes(x,y_hat))
library(randomForest)
fit <- randomForest(y~x,data=dat)
dat %>%
mutate(y_hat = predict(fit)) %>%
ggplot() +
geom_point(aes(x, y)) +
geom_step(aes(x, y_hat), col = "red")
plot(fit)
getModelInfo("glm")
modelLookup("knn")
##Parameter Tuning##
getModelInfo("glm")
modelLookup("glm")
train_knn <- train(y ~ ., method = "knn", data = mnist_27$train)
ggplot(train_knn, highlight = T)
#Example#
data("tissue_gene_expression")
set.seed(1991,sample.kind = "Rounding")
t <- data.frame(x=tissue_gene_expression$x,y=tissue_gene_expression$y)
c = data.frame(mtry=seq(50,200,25))
mod <- train(y~.,data=t,method="rpart",tuneGrid=c,control = rpart.control(minsplit = 0))
ggplot(mod)
plot(mod$finalModel)
text(mod$finalModel)
#Random Forest##
mod <- train(y~.,data=t,method="rf",tuneGrid=c,nodesize=1)
varImp(mod)
##Titanic dataset##
install.packages("titanic")
library(titanic) # loads titanic_train data frame
# 3 significant digits
options(digits = 3)
# clean the data - `titanic_train` is loaded with the titanic package
titanic_clean <- titanic_train %>%
mutate(Survived = factor(Survived),
Embarked = factor(Embarked),
Age = ifelse(is.na(Age), median(Age, na.rm = TRUE), Age), # NA age to median age
FamilySize = SibSp + Parch + 1) %>% # count family members
select(Survived, Sex, Pclass, Age, Fare, SibSp, Parch, FamilySize, Embarked)
set.seed(42,sample.kind="Rounding")
i <- createDataPartition(titanic_clean$Survived,p=0.2,list=F)
training <- titanic_clean %>% slice(-i)
test <- titanic_clean %>% slice(i)
table(titanic_clean$Survived)
##Random guessing##
set.seed(3,sample.kind="Rounding")
ans <- sample(c(0,1),size=length(test$Survived),replace = T) %>% factor()
mean(ans==test$Survived)
##Survived proportion by sex##
test %>% select(Survived,Sex) %>% group_by(Sex) %>% summarise(m=mean(Survived==1))
preds <- ifelse(test$Sex=="female",1,0) %>% factor()
mean(preds==test$Survived)
##Survived proportion by Pclass##
options(digits = 3)
test %>% select(Survived,Pclass,Sex) %>% group_by(Pclass,Sex) %>% summarise(m=mean(Survived==1)) %>% filter(m>0.5)
pred <- ifelse(test$Sex=="female" & test$Pclass != "3",1,0) %>% factor()
mean(test$Survived==pred)
##Only sex model##
test %>% select(Survived,Sex) %>% group_by(Sex) %>% summarise(m=mean(Survived==1))
preds <- ifelse(test$Sex=="female",1,0) %>% factor()
F_meas(data=preds,reference = test$Survived)
##Only Class model##
test %>% select(Survived,Pclass) %>% group_by(Pclass) %>% summarise(m=mean(Survived==1))
ps <- ifelse(test$Pclass=="1",1,0) %>% factor()
F_meas(data=ps,reference = test$Survived)
#Sex and Class model##
test %>% select(Survived,Pclass,Sex) %>% group_by(Pclass,Sex) %>% summarise(m=mean(Survived==1)) %>% filter(m>0.5)
pred <- ifelse(test$Sex=="female" & test$Pclass != "3",1,0) %>% factor()
mean(test$Survived==pred)
F_meas(data=pred,reference = test$Survived)
#lda model##
new <- select(test,Fare)
set.seed(1,sample.kind="Rounding")
LDA <- train %>% select(Fare,Survived) %>% train(Survived~.,data=.,method="qda")
mean(predict(LDA,new)==test$Survived)
##qda model##
new <- select(test,Fare)
set.seed(1,sample.kind="Rounding")
LDA <- train %>% select(Fare,Survived) %>% train(Survived~.,data=.,method="lda")
mean(predict(LDA,new)==test$Survived)
##logistic regression##
set.seed(1,sample.kind="Rounding")
logis <- glm(Survived~.,data=training,family = "binomial")
p_hat <- predict(logis,test,type = "response")
y_hat <- ifelse(p_hat>0.5,1,0)%>% factor()
mean(y_hat==test$Survived)
confusionMatrix(data=y_hat,reference = test$Survived)
##KNN using train() to tune k-values## Bootstrapped knn model##
set.seed(6,sample.kind = "Rounding")
k_param <- train(Survived~.,data=training,tuneGrid=data.frame(k=seq(3, 51, 2)),method="knn")
ggplot(k_param)
k_param$bestTune
res <- predict(k_param,test)
mean(res==test$Survived)
##Cross-Validated knn model##
set.seed(8,sample.kind = "Rounding")
k_param_cv <- train(Survived~.,data=training,tuneGrid=data.frame(k=seq(3, 51, 2)),method="knn",trControl=
trainControl(method = "cv",number=10,verboseIter=T))
ggplot(k_param_cv)
k_param$bestTune
res <- predict(k_param_cv,test)
mean(res==test$Survived)
##Classification tree## Bootstrapped
set.seed(10,sample.kind = "Rounding")
tree <- train(Survived~.,data=training,tuneGrid=data.frame(cp=seq(0, 0.05, 0.002)),method="rpart")
ggplot(tree) #CP vs. Accuracy##
tree$bestTune #Best cp value##
fm <- tree$finalModel #Final Model##
plot(fm)
text(fm)
res <- predict(tree,test)
mean(res==test$Survived)
new_test_data <- data.frame(Sex=c("male","female","female","male","female","female","male"),
Pclass=c("0","2","3","0","3","1","1"),Age=c(28,0,0,5,0,17,17),
Fare=c(0,0,8,0,25,0,0),SibSp=c(0,0,0,4,0,2,2),Parch=c(0,0,0,0,0,0,0),
FamilySize=c(1,1,1,5,1,3,3),Embarked=c('S','C','Q','S','Q','C','C'))
new_test_data <- new_test_data %>% mutate(Sex=as.character(Sex),Pclass=as.integer(Pclass),Age=as.numeric(Age),Fare=as.numeric(Fare),SibSp=as.integer(SibSp),Parch=as.integer(Parch),
FamilySize=as.numeric(FamilySize),Embarked=as.factor(Embarked))
res <- predict(tree,new_test_data)
##Random Forest##
set.seed(14,sample.kind = "Rounding")
RF <- train(Survived~.,data=training,tuneGrid=data.frame(mtry=seq(1:7)),method="rf",ntree=100)
ggplot(RF) #CP vs. Accuracy##
RF$bestTune #Best cp value##
fm <- RF$finalModel #Final Model##
plot(fm)
text(fm)
res <- predict(RF,test)
mean(res==test$Survived)
##Mnist example##
set.seed(123,sample.kind = "Rounding")
mnist <- read_mnist()
index <- sample(nrow(mnist$train$images),10000)
x <- mnist$train$images[index,]
colnames(x) <- 1:ncol(x)
y <- factor(mnist$train$labels[index])
t_index <- sample(nrow(mnist$test$images),10000)
x_test <- mnist$test$images[t_index,]
colnames(x_test) <- 1:ncol(x_test)
y_test <- factor(mnist$test$labels[t_index])
#Removing columns/predictors with 0 variance##
##nearZeroVar() gives predictors with almost 0 variance##
sds <- colSds(x)
qplot(sds, bins = 256)
nzv <- nearZeroVar(x)
length(nzv)
col_index <- setdiff(1:ncol(x),nzv)
#KNN Model##
n <- 1000
b <- 5
control <- trainControl(method = "cv", number = b, p = .9)
train_knn <- train(x[,col_index],y,
method = "knn",
tuneGrid = data.frame(k = c(3,5,7)),
trControl = control)
fit_knn <- knn3(x[ ,col_index], y, k = 3)
y_hat_knn <- predict(fit_knn,
x_test[, col_index],
type="class")
cm <- confusionMatrix(y_hat_knn, factor(y_test))
cm$overall["Accuracy"]
cm$byClass[,1:2]
library(Rborist)
control <- trainControl(method="cv", number = 5, p = 0.8)
grid <- expand.grid(minNode = c(1,5) , predFixed = c(10, 15, 25, 35, 50))
train_rf <- train(x[, col_index], y,
method = "Rborist",
nTree = 50,
trControl = control,
tuneGrid = grid,
nSamp = 5000)
ggplot(train_rf)
train_rf$bestTune
fit_rf <- Rborist(x[, col_index], y,
nTree = 1000,
minNode = train_rf$bestTune$minNode,
predFixed = train_rf$bestTune$predFixed)
y_hat_rf <- factor(levels(y)[predict(fit_rf, x_test[ ,col_index])$yPred])
cm <- confusionMatrix(y_hat_rf, y_test)
cm$overall["Accuracy"]
##Random Forest##
library(randomForest)
x <- mnist$train$images[index,]
y <- factor(mnist$train$labels[index])
rf <- randomForest(x, y, ntree = 50)
imp <- importance(rf)
image(matrix(imp, 28, 28))
p_max <- predict(fit_knn, x_test[,col_index])
#Ensemble##
p_rf <- predict(rf, x_test)$census
##Example##
set.seed(1, sample.kind = "Rounding") # if using R 3.6 or later
models <- c("glm", "lda", "naive_bayes", "svmLinear", "knn", "gamLoess", "multinom", "qda", "rf", "adaboost")
new_models <- models[ind]
data("mnist_27")
fits <- lapply(new_models, function(model){
print(model)
train(y ~ ., method = model, data = mnist_27$train)
})
acc <- sapply(fits, function(fit) min(fit$results$Accuracy))
ind <- which(acc>0.8)
res <- sapply(fits,function(fits){
predict(fits,mnist_27$test)
})
resl <- apply(res,1,function(x) names(which.max(table(x)))) ##Rowwise most common element
resl <- factor(resl)
confusionMatrix(resl,mnist_27$test$y)
##Movie recommendation example##
data("movielens")
movielens %>%
dplyr::count(userId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 30, color = "black") +
scale_x_log10() +
ggtitle("Movies")
set.seed(755)
test_index <- createDataPartition(y = movielens$rating, times = 1,
p = 0.2, list = FALSE)
train_set <- movielens[-test_index,]
test_set <- movielens[test_index,]
test_set <- test_set %>%
semi_join(train_set, by = "movieId") %>%
semi_join(train_set, by = "userId")
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
##Movielens##
movielens %>% group_by(movieId) %>% summarise(m=count(userId)) %>% top_n(10)
year_wise_ratings <- movielens %>% group_by(year) %>% summarise(ratings=length((rating))) %>% arrange(desc(ratings))
movies_after_1993 <- movielens %>%
filter(year >= 1993) %>%
group_by(title) %>%
summarize(n = n(), years = 2018 - first(year),
rating = mean(rating)) %>%
mutate(rate = n/years) %>%
top_n(25, rate) %>%
arrange(desc(rate))
movies_after_1993 %>% ggplot(aes(x=rate,y=rating)) + geom_point() + geom_smooth() + geom_hline(aes(yintercept=4.01))
time_rating <- movielens %>% mutate(date=as_datetime(timestamp),newdate=round_date(date,unit="day")) %>% group_by(newdate) %>%
summarise(rt = mean(rating))
time_rating %>% ggplot(aes(x=newdate,y=rt)) + geom_point() + geom_smooth()
##Ratings by genre#
movielens %>% select(-date) %>% group_by(genres) %>% summarise(n=n(),m=mean(rating),s=sd(rating)) %>% filter(n>1000) %>%
arrange(desc(m))
#Regularization##
set.seed(1986, sample.kind="Rounding") # if using R 3.6 or later
n <- round(2^rnorm(1000, 8, 1))
set.seed(1, sample.kind="Rounding") # if using R 3.6 or later
mu <- round(80 + 2*rt(1000, 5))
range(mu)
schools <- data.frame(id = paste("PS",1:1000),
size = n,
quality = mu,
rank = rank(-mu))
set.seed(1, sample.kind="Rounding") # if using R 3.6 or later
mu <- round(80 + 2*rt(1000, 5))
scores <- sapply(1:nrow(schools), function(i){
scores <- rnorm(schools$size[i], schools$quality[i], 30)
scores
})
schools <- schools %>% mutate(score = sapply(scores, mean))
overall <- mean(sapply(scores, mean))
alphas <- seq(10,250)
rmse <- sapply(alphas, function(alpha){
score_reg <- sapply(scores, function(x) sum(x)/(length(x)+alpha))
sqrt(mean((score_reg - schools$quality)^2))
})
schools %>% mutate(score_reg = score_reg) %>%
top_n(10, score_reg) %>% arrange(desc(score_reg))
##Matrix Factorization##
set.seed(1987,sample.kind = "Rounding")
#if using R 3.6 or later, use `set.seed(1987, sample.kind="Rounding")` instead
n <- 100
k <- 8
Sigma <- 64 * matrix(c(1, .75, .5, .75, 1, .5, .5, .5, 1), 3, 3)
m <- MASS::mvrnorm(n, rep(0, 3), Sigma)
m <- m[order(rowMeans(m), decreasing = TRUE),]
y <- m %x% matrix(rep(1, k), nrow = 1) + matrix(rnorm(matrix(n*k*3)), n, k*3)
colnames(y) <- c(paste(rep("Math",k), 1:k, sep="_"),
paste(rep("Science",k), 1:k, sep="_"),
paste(rep("Arts",k), 1:k, sep="_"))
my_image <- function(x, zlim = range(x), ...){
colors = rev(RColorBrewer::brewer.pal(9, "RdBu"))
cols <- 1:ncol(x)
rows <- 1:nrow(x)
image(cols, rows, t(x[rev(rows),,drop=FALSE]), xaxt = "n", yaxt = "n",
xlab="", ylab="", col = colors, zlim = zlim, ...)
abline(h=rows + 0.5, v = cols + 0.5)
axis(side = 1, cols, colnames(x), las = 2)
}
my_image(y)
my_image(cor(y), zlim = c(-1,1))
range(cor(y))
axis(side = 2, 1:ncol(y), rev(colnames(y)), las = 2)
s <- svd(y)
ss_y <- sapply(1:ncol(y),function(x){
z <- as.numeric(y[,x])
sum(z^2)
})
ss_yv <- apply((y%*%s$v)^2, 2, sum)
d <- data.frame(y=ss_y,yv=ss_yv,c=1:ncol(y))
d %>% ggplot()+geom_point(aes(x=c,y,col="Red"))+geom_point(aes(x=c,yv))
##Course 9##
##########################################################
# Create edx set, validation set (final hold-out test set)
##########################################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
library(tidyverse)
library(caret)
library(data.table)
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
# if using R 4.0 or later:
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)`
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
nrow(edx)
ncol(edx)
unique(edx$rating)
table(factor(edx$rating))
length(unique(factor(edx$movieId)))
length(unique(factor(edx$userId)))
edx %>% filter(genres %in% c("Drama","Comedy","Thriller","Romance")) %>%
group_by(genres) %>%summarise(n=length(movieId))
edx %>% filter(genres=="Drama") %>% select(rating)%>%
nrow()
edx %>% group_by(title) %>% summarise(r=length(userId)) %>%
arrange(desc(r)) %>% head()
rats <- edx %>% mutate(ratings=rating) %>% select(ratings) %>%
group_by(ratings) %>% summarise(n=n())
ra <- data.frame(rats)
ra %>% filter(ratings != c(1.0,2.0,3.0,4.0,5.0)) %>% sum(n)
|
5e60cfd42d7f1e1b4e5d5c50a21434fe64170a3f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Plasmidprofiler/examples/save_files.Rd.R
|
4611d8d99d29275eea10e3bdbc3920a7854b5c38
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
save_files.Rd.R
|
library(Plasmidprofiler)
### Name: save_files
### Title: Save Files
### Aliases: save_files
### ** Examples
## Not run:
##D save_files(report, plot.png=1, report.csv=1, webpage=NA)
## End(Not run)
|
8a3650bc048fed8147d0d7a3d29436049a1ca765
|
52befa499933e19c59748f938829ac855d05b8f5
|
/ms/figures/script_figures_analytical.R
|
7f4ac07bdd062ec83406229571d5abf2937e86f1
|
[] |
no_license
|
DominiqueGravel/ms_diversification
|
664a642ca6a7687939a2a5bd665497b7b4d9b30e
|
3acb5adace062ef83f2ee4294bfed14d01b8aa40
|
refs/heads/master
| 2021-09-14T17:32:59.890887
| 2018-04-20T18:57:06
| 2018-04-20T18:57:06
| 37,342,956
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,671
|
r
|
script_figures_analytical.R
|
u = function(u0, u1, a_u, I) u0 + u1*exp(-a_u*I)
e1 = function(e0, a_e1, I) e0 + (1-e0)*exp(-a_e1*I)
e2 = function(e1, e2, a_e2, I) 1 - e2 - (1 - e1 - e2)*exp(-a_e2*I)
C = 0.1
dev.new(width = 20, height = 8)
#par(mar = c(5,6,2,1),mfrow =c(3,2))
par(mar = c(5,6,2,1),mfrow = c(1,2))
#-----------------------------------------------
# FIGURE 1: Competition
C = 0.1
R = seq(0, 250, 1)
I = C*R
S = u(u0 = 0, u1 = 0.5, a_u = 0.1, I)
E = e1(e0 = 0, a_e1 = Inf, I) + e2(e1 = 0.1, e2 = 0.8, a_e2 = 0.1, I)
# Panel A
plot(R, S, type = "l", ylim = c(0,0.8), xlab = "Species richness", ylab =
"Rate", col = "darkblue", lwd = 2.5, cex.lab = 2.5, cex.axis = 2)
lines(R, E, col = "darkred",lwd = 2.5)
abline(v = 110, lty = 3, lwd = 1)
arrows(x0 = 5, y0 = 0.55 , x1 = 105, y1 = 0.55, length = 0.25, )
text(x = 110, y = 0.6, cex = 2.5, labels = "Net diversification", pos = 2)
arrows(x0 = 215, y0 = 0.55, x1 = 115, y1 = 0.55, length = 0.25, )
text(x = 125, y = 0.6, cex = 2.5, labels = "Net extinction", pos = 4)
legend("topright",bty = "n", legend = c("Speciation", "Extinction"), col =
c("darkblue","darkred"), lty = 1, lwd = 2.5, cex = 2)
# Panel B
plot(R, (S - E), type = "l", xlab = "Species richness", ylab
= "Per species diversification rate", lwd = 2.5, cex.lab = 2.5, cex.axis = 2)
abline(h = 0, lty = 3, lwd = 1)
abline(v = 110, lty = 3, lwd = 1)
dev.copy2pdf(file = 'competition.pdf')
#-----------------------------------------------
# FIGURE 2: Mutualism
par(mar = c(5,6,2,1),mfrow = c(1,2))
C = 0.1
R = seq(0, 250, 1)
I = C*R
S = u(u0 = 0.5, u1 = 0.5, a_u = 0.1, I)
E = e1(e0 = 0.2, a_e1 = 0.2, I) + e2(e1 = 0, e2 = 0, a_e2 = 0, I)
# Panel A
plot(R, S, type = "l", ylim = c(0,1), xlab = "Species richness", ylab =
"Rate", col = "darkblue", lwd = 2.5, cex.lab = 2.5, cex.axis = 2)
lines(R, E, col = "darkred",lwd = 2.5)
arrows(x0 = 85, y0 = 0.75 , x1 = 200, y1 = 0.75, length = 0.25, )
text(x = 195, y = 0.8, cex = 2.5, labels = "Net diversification", pos = 2)
legend("topright",bty = "n", legend = c("Speciation", "Extinction"), col =
c("darkblue","darkred"), lty = 1, lwd = 2.5, cex = 2)
# Panel B
plot(R, (S - E), type = "l", xlab = "Species richness", ylab
= "Per species diversification rate", lwd = 2.5, cex.lab = 2.5, cex.axis = 2)
abline(h = 0, lty = 3, lwd = 1)
dev.copy2pdf(file = 'mutualism.pdf')
#-----------------------------------------------
# FIGURE 3: Predation
par(mar = c(5,6,2,1),mfrow = c(1,2))
C = 0.1
R = seq(0, 250, 1)
I = C*R
S = u(u0 = 0.6, u1 = -0.4, a_u = 0.1, I)
E = e1(e0 = 0, a_e1 = 0.5, I) + e2(e1 = 0, e2 = 0.1, a_e2 = 0.05, I)
# Panel A
plot(R, S, type = "l", ylim = c(0,1), xlab = "Species richness", ylab =
"Rate", col = "darkblue", lwd = 2.5, cex.lab = 2.5, cex.axis = 2)
lines(R, E, col = "darkred",lwd = 2.5)
abline(v = 180, lty = 3, lwd = 1)
abline(v = 35, lty = 3, lwd = 1)
arrows(x0 = 30, y0 = 0.0 , x1 = 0, y1 = 0.0, length = 0.25, )
text(x = 35, y = 0.1, cex = 2.5, labels = "Net
extinct.", pos = 2)
arrows(x0 = 40, y0 = 0.0 , x1 = 175, y1 = 0.0, length = 0.25, )
text(x = 165, y = 0.1, cex = 2.5, labels = "Net diversification", pos = 2)
arrows(x0 = 250, y0 = 0.0 , x1 = 185, y1 = 0.0, length = 0.25, )
text(x = 190, y = 0.1, cex = 2.5, labels = "Net
extinction", pos = 4)
legend("topright",bty = "n", legend = c("Speciation", "Extinction"), col =
c("darkblue","darkred"), lty = 1, lwd = 2.5, cex = 2)
# Panel B
plot(R, (S - E), type = "l", xlab = "Species richness", ylab
= "Per species diversification rate", lwd = 2.5, cex.lab = 2.5, cex.axis = 2)
abline(h = 0, lty = 3, lwd = 1)
abline(v = 180, lty = 3, lwd = 1)
abline(v = 35, lty = 3, lwd = 1)
dev.copy2pdf(file = 'predation.pdf')
|
3a75a95893670973a9c39ae4a3e63d8ae037c023
|
7db4fbc1ad431152941743b8c7eae53ecca1ce51
|
/ui.R
|
3f77eb58004283cfbed2f0de49e96d26b3d6b59a
|
[] |
no_license
|
macadamia/ChillCalculator
|
1cdde28e6dfef6b5db6ab6b20176ac4b366189cb
|
c66b414720bd3dae06ebe8f4de992a21e54712dc
|
refs/heads/master
| 2021-01-11T18:59:57.556729
| 2018-06-06T03:59:47
| 2018-06-06T03:59:47
| 79,286,488
| 1
| 0
| null | 2018-05-24T05:51:15
| 2017-01-18T00:22:33
|
CSS
|
UTF-8
|
R
| false
| false
| 12,194
|
r
|
ui.R
|
#Chill Units Calculator
#ui.R
shinyUI(
fluidPage(
includeCSS( "www/assets/v3/css/qg-main.css"),
tags$head(
tags$script(src = "js.cookie.js")
),
useShinyjs(),
extendShinyjs('www/myJSCode.js'),
tags$head(includeScript("google_analytics.js"),
tags$head(tags$style('.headerRow{background-color: #4E7707;}')),
tags$div(id="fb-root"),
#tags$meta(http-equiv="Content-Type", content="text/html; charset=utf-8"),
tags$meta(name="description", content="calculates the chill and heating degrees for plant growth"),
tags$meta(name="keywords", content="chill, chill portions, chilling units, chill hours, growing degree days, growing degree hours, temperature, Australia, fruit, nut, trees "),
tags$link(rel="schema.DCTERMS", href="http://purl.org/dc/terms/"),
tags$link(rel="schema.AGLSTERMS", href="http://www.agls.gov.au/agls/terms/"),
tags$meta(name="DCTERMS.creator", scheme="AGLSTERMS.GOLD", content="c=AU; o=The State of Queensland; ou=DAF; ou=UNIT H&FS"),
tags$meta(name="DCTERMS.publisher", scheme="AGLSTERMS.AglsAgent", content="corporateName=The State of Queensland; jurisdiction=Queensland"),
tags$meta(name="DCTERMS.created", content="2017-7-12"),
tags$meta(name="DCTERMS.modified", content="2017-7-12"),
tags$meta(name="DCTERMS.title", content="Chill and Heat Calculator"),
tags$meta(name="DCTERMS.alternative", content="Add your heading"),
tags$meta(name="DCTERMS.description", content="DESCRIPTION"),
tags$meta(name="DCTERMS.subject", scheme="AGLSTERMS.APAIS", content="SUBJECT"),
tags$meta(name="AGLSTERMS.function", scheme="AGLSTERMS.AGIFT", content="FUNCTION"),
tags$meta(name="DCTERMS.type", scheme="DCTERMS.DCMIType", content="Text"),
tags$meta(name="AGLSTERMS.documentType", scheme="AGLSTERMS.agls-document", content="guidelines"),
tags$meta(name="DCTERMS.audience", scheme="AGLSTERMS.agls-audience", content=""),
tags$meta(name="DCTERMS.jurisdiction", scheme="AGLSTERMS.AglsJuri", content="Queensland"),
tags$meta(name="DCTERMS.license", scheme="DCTERMS.URI", content="https://creativecommons.org/licenses/by/4.0/"),
#tags$meta(http-equiv="X-UA-Compatible", content="IE=edge"),
tags$meta(name="viewport", content="width=device-width, initial-scale=1"),
tags$link(rel="shortcut icon", href="assets/v3/images/favicon.ico"),
tags$script("https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js"),
tags$link(href="//fonts.googleapis.com/css?family=Lato:100,300,400,700,900,100italic,300italic,400italic,700italic,900italic", rel="stylesheet", type="text/css"),
tags$noscript(
tags$link(href="assets/v3/css/qg-noscript.css", rel="stylesheet", type="text/css", media="all")
),
tags$link(href="assets/v3/css/qg-documentation.css", rel="stylesheet", type="text/css", media="all"),
tags$script("https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js"),
tags$script("assets/v3/lib/ext/butterfly/jquery.resize-events.js"),
tags$script("assets/v3/lib/ext/butterfly/jquery.history.js"),
tags$script("assets/v3/lib/ext/butterfly/jquery.butterfly.js"),
tags$script("assets/v3/js/qg-main.js")
),
fluidRow(
column(width=12,uiOutput("Logos"))
),
fluidRow(class = 'headerRow',
column(width=12,uiOutput("Share"))
),
fluidRow(class = 'headerRow',
column(width=3, align = 'left', uiOutput("SelectedLocation")),
column(width=2, align = 'center',uiOutput("yearOutput")),
column(width=2, align = 'center',uiOutput("dateStart")),
column(width=2, align = 'center',uiOutput("dateEnd")),
column(width=3, align = 'center',uiOutput("baseTemp"))
),
tabsetPanel(id='tabs',
tabPanel("Introduction",
includeHTML('Introduction.html')
),
tabPanel("Instructions",
includeHTML('HowToUse.html')
),
tabPanel("Upload",
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Select a file ----
fileInput("file1", "Choose CSV File",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
#use grower input or not
checkboxInput("source", "Use This Data Set for Calculations", FALSE),
# Input: Checkbox if file has header ----
checkboxInput("header", "My Data Has a Header", TRUE),
# # Input: Select separator ----
# radioButtons("sep", "Separator",
# choices = c(Comma = ",",
# Semicolon = ";",
# Tab = "\t"),
# selected = ","),
#Date format
radioButtons("dateformat", "Date Format",
choices = c("dd/mm/yyyy hh:mm" = "%d/%m/%Y %H:%M",
"dd/mm/yy hh:mm" = "%d/%m/%y %H:%M",
"dd-mm-yyyy hh:mm" = "%d-%m-%Y %H:%M",
"dd-mm-yy hh:mm" = "%d-%m-%y %H:%M"),
selected = "%d/%m/%Y %H:%M"),
# Horizontal line ----
tags$hr(),
# Input: Select number of rows to display ----
radioButtons("disp", "Display",
choices = c(Head = "head",
Tail = "tail",
All = "all"),
selected = "head")
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Data file ----
tableOutput("contents")
)
)
),
tabPanel("Locations", value='Locations',busyIndicator("Calculation In progress",wait = 0),
fluidPage(
fluidRow(
column(width=2,
selectInput("Region", label = h4("Select Region"),choices = list("Granite Belt" = 1, "NSW" = 2, "Victoria" = 3, 'Tasmania' = 4, 'SA' = 5, 'southern WA' = 6), selected = 1),
textInput("Location", label = h4("Search For Station"),value=''),
uiOutput("BuildStnLocations"),
textInput("Town", label = h4("Town Starts With..."),value=''),
htmlOutput("StationInfo"),
uiOutput("NTowns")
),
column(width=4, align = 'left',
checkboxInput("KeepLocation","Remember this station",F,width='100%'),
actionButton("recentre","Recentre")
),
column(width=9,
leafletOutput("map", width='100%',height='600px' )
)
)
)
),
tabPanel("Chill", value='Chill',busyIndicator("Calculation In progress",wait = 0),
fluidPage(
fluidRow(
column(width=6,
radioButtons("cType", inline = T, label = h4("Chill"), choices = list("Portions" = 1, "Hours" = 2, "Units" = 3),selected = 1)
)
),#fluidRow
fluidRow(
plotlyOutput("chillPlot")
)
) #fluidPage
),
tabPanel("Growing Degrees", value ='Growing Degrees', busyIndicator("Calculation In Progress",wait = 0),
fluidPage(
fluidRow(
column(width=3,
radioButtons("gType", label = h4("Growing Degree"), choices = list("Hours" = 1, "Days" = 2),inline = T,selected = 2)
)
),
fluidRow(
plotlyOutput("GDHPlot")
)
)#fluidPage
),
tabPanel("Temperature/Rainfall", value ='Temperature', busyIndicator("Calculation In Progress",wait = 0),
fluidPage(
fluidRow(
plotlyOutput("TempPlot")
),
fluidRow(
plotlyOutput("RainPlot")
)
)
),
tabPanel('Details',
fluidRow(
column(width=12,
includeHTML('Additional.html'),
h4("References"),
helpText("Anderson, J., Richardson, E., & Kesner, C. (1986). Validation of chill unit and flower bud phenology models for 'Montmorency' sour cherry. Acta Horticulturae, 184, 74-78."),
helpText("Bennett JP (1949) Temperature and bud rest period. Calif Agric 3 (11), 9-12"),
helpText('Darbyshire, R., K. Pope and I. Goodwin (2016). An evaluation of the chill overlap model to predict flowering time in apple tree. Scientia Horticulturae 198: 142-149.'),
helpText("Erez A, Fishman S, Linsley-Noakes GC, Allan P (1990). The dynamic model for rest completion in peach buds. Acta Hortic 276, 165-174"),
helpText('Ghariani, K. and R. L. Stebbins (1994). Chilling requirements of apple and pear cultivars. Fruit Varieties Journal 48: 215.'),
helpText("Luedeling E, Kunz A and Blanke M, 2013. Identification of chilling and heat requirements of cherry trees - a statistical approach. International Journal of Biometeorology 57,679-689."),
helpText("Luedeling, E., 2017. chillR: Statistical methods for phenology analysis in temperate fruit trees. R package version 0.66, URL http://cran.r-project.org/web/packages/chillR/."),
helpText("Richardson EA, Seeley SD, Walker DR (1974) A model for estimating the completion of rest for Redhaven and Elberta peach trees. HortScience 9(4), 331-332"),
helpText("Weinberger JH (1950) Chilling requirements of peach varieties. Proc Am Soc Hortic Sci 56, 122-128")
)
)
),
tabPanel("About & Legal",
h4("About This Site"),
helpText("This site is being developed to deliver up-to-date and historical information on the accumulation of chill and growing degree days."),
helpText("This work was undertaken for the project AP12029 Understanding apple and pear production systems in a changing climate funded by Horticulture Innovation Australia Limited using the Apple and Pear Industry levy and funds from the Australian Government. Additional financial support was contributed by Department of Agriculture and Fisheries (Qld), Department of Economic Development, Jobs, Transport and Resources (Vic), Department of Agriculture and Food Western Australia and Pomewest (WA)."),
helpText('Based on or contains data provided by the State of Queensland (Department of Science, Information Technology and Innovation) [2016]. In consideration of the State permitting use of this data you acknowledge and agree that the State gives no warranty in relation to the data (including accuracy, reliability, completeness, currency or suitability) and accepts no liability (including without limitation, liability in negligence) for any loss, damage or costs (including consequential damage) relating to any use of the data. Data must not be used in breach of the privacy laws.'),
hr(),
helpText("If you would like further information please contact: "),
HTML("<a href=mailto:heidi.parkes@daf.qld.gov.au?subject=Chill%20Calculator>Dr Heidi Parkes, Qld Dept. of Agriculture and Fisheries</a>"),
helpText("For Technical issues: "),
HTML("<a href=mailto:Neil.White@daf.qld.gov.au?subject=Chill%20Calculator>Dr Neil White, Qld Dept. of Agriculture and Fisheries</a> <br/><br/>"),
helpText("© State of Queensland, Department of Agriculture and Fisheries and Horticulture Innovation Australia Ltd, 2018.")
),
tabPanel("News",
includeHTML('News.html')
)
) #tabset
)#fluidPage
)
|
9029e1b4f0439a50accc49382345d9c08ffd77ea
|
7a74dcdfcbfa695a44188c0144a972e76e36c785
|
/app.R
|
cc496f087c944ba2317dc0e2cb5a3c08808d4c72
|
[] |
no_license
|
jhk0530/dynamicUI
|
aacf6451d1e3e52325c18976e283ae497263ecba
|
e5373b3d683df69920fbf711609ebd8b4581a714
|
refs/heads/master
| 2021-01-06T10:36:14.444129
| 2020-02-18T07:36:56
| 2020-02-18T07:36:56
| 241,299,073
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 612
|
r
|
app.R
|
library(shiny)
library(shinyjs)
ui = function(){
fluidPage(
shinyjs::useShinyjs(),
numericInput('num', 'num label', value = 1, min = 1, max = 5),
uiOutput('temp'),
actionButton('btn', 'btn label')
)
}
server = function(input, output, session){
output$temp = renderUI({
if(input$btn==0){return(NULL)}
input$btn
shinyjs::hide('num')
shinyjs::hide('btn')
cnt = as.integer(input$num)
lapply(1:cnt, function(i){
sliderInput(inputId = paste0('sld',i), label = paste0('sldLab',i), min = i, max = i+5, value = i+1)
})
})
}
shinyApp(ui,server)
|
684b09aa6f557db735c56c244664f7c81329a0b9
|
9719c43e784f48e79c81c7151ada584c105bbe11
|
/R/ffsea-shiny.R
|
c08e19fd582a57a69bcb96407e31a6d4e2c9e0b0
|
[
"Apache-2.0"
] |
permissive
|
edavidaja/FacileAnalysis
|
ade3de9b07fb4d614a04dce7783843dfc57d5ce4
|
8f96cdf41904d606f81294f4ff169c658113dd86
|
refs/heads/main
| 2023-09-04T21:15:56.014307
| 2021-11-17T20:35:35
| 2021-11-17T20:35:35
| 430,765,832
| 0
| 0
|
NOASSERTION
| 2021-11-22T15:38:31
| 2021-11-22T15:38:30
| null |
UTF-8
|
R
| false
| false
| 11,092
|
r
|
ffsea-shiny.R
|
#' Full interactive executation of GSEA, soup to nuts.
#'
#' For now, feature set enrichment analysis are only performed downstream of
#' a `FacileAnalysisResult`. This requires that we have an ffsea method defined
#' for the specific class of the result, ie. `ffsea.FacilePcaAnalysisResult`.
#'
#' @rdname interactive-ffsea
#'
#' @export
#' @importFrom shiny reactive
#'
#' @param x A FacileAnalysisResult that has an implemented `ffsea.*` method
#' @param gdb A `GeneSetDb` to use for the FSEA.
#' @examples
#' gdb <- sparrow::exampleGeneSetDb()
#' dge.crc <- FacileData::exampleFacileDataSet() %>%
#' FacileData::filter_samples(indication == "CRC") %>%
#' flm_def(covariate = "sample_type", numer = "tumor", denom = "normal",
#' batch = "sex") %>%
#' fdge(method = "voom")
#' if (interactive()) {
#' fres <- ffseaGadget(dge.crc, gdb)
#' }
ffseaGadget <- function(x, gdb, title = "Feature Set Enrichment Analysis",
height = 800, width = 1000, viewer = "browser", ...,
debug = FALSE) {
assert_class(x, "FacileAnalysisResult")
assert_class(gdb, "GeneSetDb")
rgdb <- reactive(gdb)
frunGadget(ffseaAnalysis, ffseaAnalysisUI, x, aresult = x, gdb = rgdb,
title = title, height = height, width = width, viewer = viewer,
..., retval = "faro", debug = debug)
}
#' A moodule that encapsulates configuring and running ffsea, and a view to
#' interact with the results.
#'
#' @noRd
#' @export
ffseaAnalysis <- function(input, output, session, rfds, aresult, gdb, ...,
debug = FALSE) {
res <- callModule(ffseaRun, "run", rfds, aresult, gdb, ..., debug = debug)
view <- callModule(ffseaView, "view", rfds, res, ..., debug = FALSE)
# Only show the view UI when there is an FfseaAnalysisResult ready
observe({
toggleElement("viewbox", condition = initialized(res))
})
vals <- list(
main = res,
view = view,
.ns = session$ns)
class(vals) <- c("ReactiveFacileFseaAnalysisResultContainer",
"ReactiveFacileAnalysisResultContainer")
vals
}
#' @noRd
#' @export
#' @importFrom shinyjs hidden
ffseaAnalysisUI <- function(id, ...) {
ns <- NS(id)
tagList(
tags$div(
id = ns("runbox"),
box(title = "Configure Feature Set Analysis",
width = 12,
ffseaRunUI(ns("run")))),
hidden(
tags$div(
id = ns("viewbox"),
box(title = "Feature Set Analysis Results", solidHeader = TRUE,
width = 12, ffseaViewUI(ns("view"), debug = debug)))))
}
# Run ==========================================================================
#' @section Custom Run Configuration:
#' The options presented to the user for running a feature set enrichment
#' analysis on a `FacilePcaAnalysisResult` will be different than the ones
#' made available for an enrichment analysis over a `FacileTtestAnalysisResult`
#' or even a `FacileAnovaAnalysisResult`.
#'
#' As such, each type of result should define a UI that accepts the appropriate
#' parameters for its corresponding `ffsea.*` method, and a server function
#' that extract and invokes the function.
#'
#' @rdname interactive-ffsea
#' @export
#' @importFrom shiny eventReactive withProgress
#' @importFrom shinyWidgets updatePickerInput
#' @importFrom sparrow GeneSetDb
#' @importFrom sparrow.shiny GeneSetDb.ReactiveGeneSetDb
#' @param aresult A `FacileAnalysisResult` that has a `ffsea.*` method defined.
#' @param gdb A `reactive(GeneSetDb)` object
ffseaRun <- function(input, output, session, rfds, aresult, gdb, ...,
debug = FALSE) {
ares <- reactive({
req(initialized(aresult))
faro(aresult)
})
# When the AnalysisResult changes, update the runopts UI based on the specific
# subclass of the AnalysisResult we have at play.
#
# Because the GeneSetDb knobs to subset collection and specify geneset size
# are buried in the run options menu, we pass this down to the runOpts
# module.
runopts <- callModule(ffseaRunOpts, "runopts", rfds, aresult = aresult,
gdb = gdb, ..., debug = debug)
# Updates the set-enrichment methods when the analysis result changes.
available_methods <- reactive({
ares. <- req(ares())
ffsea_methods(ares.)
})
observeEvent(available_methods(), {
methods <- req(available_methods())
choices <- split(methods[["method"]], methods[["type"]])
# Sub groups of length 1 break out of the grouping structure, one way
# to fix that if they exist is outlined here:
# https://github.com/rstudio/shiny/issues/1938#issuecomment-363942532
choices <- lapply(choices, function(xc) {
if (length(xc) == 1L) list(xc) else xc
})
# only pre-select first rank-based method, if not ranks based method is
# applicable (unlikely), this should will evaluate to NULL anyway
selected <- choices[["ranks"]][1L]
# rename 'ora' group to "Over Represented"
ora.idx <- which(names(choices) == "ora")
if (length(ora.idx)) names(choices)[ora.idx] <- "over representation"
opts <- NULL
updatePickerInput(session, "ffsea_methods", selected = selected,
choices = choices, choicesOpt = opts)
})
runnable <- reactive({
!unselected(input$ffsea_methods) &&
initialized(ares()) &&
initialized(runopts)
})
observe({
runnable. <- runnable()
ftrace("runnable: ", as.character(runnable.))
toggleState("runbtn", condition = runnable.)
})
observe({
ftrace("Run button pressed: ", as.character(input$runbtn))
})
fsea_res <- eventReactive(input$runbtn, {
req(runnable())
gdb.args <- list(
x = ares(),
fsets = GeneSetDb(runopts$gdb),
# min.gs.size = runopts$gdb$min.gs.size(),
# max.gs.size = runopts$gdb$max.gs.size(),
#
# Note that we don't set min/max sizes anymore because they were
# pre-specified in the universe, and depending on what features exist
# in the object under test, further filtering might happen which may
# be surprising
min.gs.size = 2,
max.gs.size = Inf)
methods <- list(methods = input$ffsea_methods)
method.args <- runopts$args()
args <- c(gdb.args, methods, method.args)
withProgress({
do.call(ffsea, args)
}, message = "Running Enrichment Analysis")
})
vals <- list(
faro = fsea_res,
.ns = session$ns)
# TODO: fix class hierarchy
classes <- c("ReactiveFacileFseaAnalysisResult",
"ReactiveFacileAnalysisResult",
"FacileFseaAnalysisResult")
class(vals) <- classes
vals
}
#' @noRd
#' @export
#' @importFrom shinyWidgets pickerInput
#' @importFrom shiny actionButton column fluidRow tags tagList
ffseaRunUI <- function(id, ..., debug = FALSE) {
ns <- NS(id)
tagList(
fluidRow(
column(
4,
pickerInput(ns("ffsea_methods"), "Methods", choices = NULL,
multiple = TRUE)),
column(
1,
tags$div(
style = "padding-top: 1.7em",
ffseaRunOptsUI(ns("runopts"), width = "350px"))),
column(1, actionButton(ns("runbtn"), "Run", style = "margin-top: 1.7em"))
)
)
}
# View =========================================================================
#' Responsible for the shiny view of a FacileFseaAnalysisResult
#'
#' @noRd
#' @export
#' @importFrom shiny observeEvent reactiveValues validate
#' @param rfds the reactive facile data store
#' @param ares The `FacileFseaAnalysisResult`
ffseaView <- function(input, output, session, rfds, aresult, ...,
debug = FALSE) {
state <- reactiveValues(
gsview_select = tibble(assay_name = character(), feature_id = character()),
set_select = tibble(collection = character(), name = character())
)
ares <- reactive({
req(initialized(aresult))
faro(aresult)
})
mgc <- reactive({
res <- req(ares())
mgres <- result(res)
# TODO: validate() isn't working here, only works inside a
# `output$xxx <- render*({})` block, which is generating outpout into the
# shiny app
validate(
need(is(mgres, "SparrowResult"), "SparrowResult can't be found")
)
sparrow.shiny::SparrowResultContainer(mgres)
})
gs_result_filter <- callModule(
sparrow.shiny::mgResultFilter,
"mg_result_filter", mgc)
# Overview Tab ...............................................................
output$gseaMethodSummary <- renderUI({
mgc. <- req(mgc())
tagList(
tags$h4("GSEA Analyses Overview"),
sparrow.shiny::summaryHTMLTable.sparrow(
mgc.$sr, mgc.$methods,
gs_result_filter$fdr(),
p.col = "padj.by.collection")
)
})
# GSEA Results Tab ...........................................................
gs_viewer <- callModule(
sparrow.shiny::geneSetContrastView,
"geneset_viewer",
mgc, maxOptions = 500, feature_table_filter = "top", server = TRUE)
# A table of GSEA statistics/results for the given method and fdr threshold
# The table is wired to the gs_viewer so that row clicks can signal updates
# to the contrast viewer
gs_table_browser <- callModule(
sparrow.shiny::mgTableBrowser,
"mg_table_browser",
mgc,
method=gs_result_filter$method,
fdr=gs_result_filter$fdr,
server=TRUE)
# clicks on gsea result table update the contrast view
observeEvent(gs_table_browser$selected(), {
.mgc <- req(mgc())
geneset <- req(gs_table_browser$selected())
sparrow.shiny::updateActiveGeneSetInContrastView(session, gs_viewer,
geneset, .mgc)
})
# A table of other genesets that brushed genes in the contrast viewer
# belong to. This table is also wired to the contrast viewer, so that
# a click on a row of the table will update the contrast view, too.
other_genesets_gsea <- callModule(
sparrow.shiny::mgGeneSetSummaryByGene,
"other_genesets_gsea",
mgc, features = gs_viewer$selected,
method = gs_result_filter$method,
fdr = gs_result_filter$fdr)
vals <- list(
selected_features = reactive(state$gsview_select),
selected_sets = reactive(state$set_select),
.ns = session$ns)
vals
}
#' @noRd
#' @export
#' @importFrom shiny fluidRow NS tags uiOutput wellPanel
ffseaViewUI <- function(id, rmd = FALSE, ..., debug = FALSE) {
ns <- NS(id)
tagList(
# wellPanel(mgResultFilterUI(ns("mg_result_filter"))),
tags$div(
style="margin-bottom: 10px; padding: 5px; background-color: white",
title='GSEA Results',
uiOutput(ns("gseaMethodSummary"))),
fluidRow(
column(
5, style = "padding: 0",
sparrow.shiny::mgResultFilterUI(ns("mg_result_filter")),
wellPanel(sparrow.shiny::geneSetContrastViewUI(ns("geneset_viewer")))),
column(
7,
sparrow.shiny::mgTableBrowserUI(ns("mg_table_browser")))),
fluidRow(
column(
12,
tags$h4("Other Gene Sets with Selected Genes"),
sparrow.shiny::mgGeneSetSummaryByGeneUI(ns("other_genesets_gsea"))))
)
}
|
642c6446a0818817955daa528501804b8a0b430d
|
017414614b3d26ea10faa775fc3d4e630752ddd1
|
/man/tar_cue.Rd
|
a63d97f5b7ea3da502fc1d6a741cb39d84d708fb
|
[
"MIT"
] |
permissive
|
krlmlr/targets
|
4822815b7ae412af115296e5010de2edc61d1c50
|
a8cbf46ce5d2274bd623085be749af3059ce6083
|
refs/heads/main
| 2023-04-13T16:43:18.213413
| 2021-04-21T20:29:07
| 2021-04-21T20:29:07
| 360,385,953
| 1
| 0
|
NOASSERTION
| 2021-04-22T03:55:53
| 2021-04-22T03:55:53
| null |
UTF-8
|
R
| false
| true
| 3,932
|
rd
|
tar_cue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_cue.R
\name{tar_cue}
\alias{tar_cue}
\title{Declare the rules that cue a target.}
\usage{
tar_cue(
mode = c("thorough", "always", "never"),
command = TRUE,
depend = TRUE,
format = TRUE,
iteration = TRUE,
file = TRUE
)
}
\arguments{
\item{mode}{Cue mode. If \code{"thorough"}, all the cues apply unless
individually suppressed. If \code{"always"}, then the target always
runs. If \code{"never"}, then the target does not run unless the
metadata does not exist or the last run errored.}
\item{command}{Logical, whether to rerun the target if command changed
since last time.}
\item{depend}{Logical, whether to rerun the target if the value of one
of the dependencies changed.}
\item{format}{Logical, whether to rerun the target if the user-specified
storage format changed. The storage format is user-specified through
\code{\link[=tar_target]{tar_target()}} or \code{\link[=tar_option_set]{tar_option_set()}}.}
\item{iteration}{Logical, whether to rerun the target if the user-specified
iteration method changed. The iteration method is user-specified through
\code{\link[=tar_target]{tar_target()}} or \code{\link[=tar_option_set]{tar_option_set()}}.}
\item{file}{Logical, whether to rerun the target if the file(s) with the
return value changed or at least one is missing.}
}
\description{
Declare the rules that mark a target as outdated.
}
\details{
\code{targets} uses internal metadata and special cues
to decide if a target is up to date.
A target is outdated if one of the following cues is met
(checked in the order given below). \code{tar_cue()} can activate
or suppress many of these cues. See the user manual for details.
\enumerate{
\item There is no metadata record of the target.
\item The target errored last run.
\item The target has a different class than it did before.
\item The cue mode equals \code{"always"}.
\item The cue mode does not equal \code{"never"}.
\item The \code{command} metadata field (the hash of the R command)
is different from last time.
\item The \code{depend} metadata field (the hash of the immediate upstream
dependency targets and global objects) is different from last time.
\item The storage format is different from last time.
\item The iteration mode is different from last time.
\item A target's file (either the one in \verb{_targets/objects/}
or a dynamic file) does not exist or changed since last time.
}
A target's dependencies can include functions, and these functions are
tracked for changes using a custom hashing procedure. When a function's
hash changes, the function is considered invalidated, and so are any
downstream targets with the \code{depend} cue turned on. The
\code{targets} package computes the hash of a function in the following way.
\enumerate{
\item Deparse the function with \code{targets:::deparse_safe()}. This
function computes a string representation of the function
that removes comments and standardizes whitespace so that
trivial changes to formatting do not cue targets to rerun.
\item Manually remove any literal pointers from the function string
using \code{targets:::mask_pointers()}. Such pointers arise from
inline compiled C/C++ functions.
\item Compute a hash on the preprocessed string above using
\code{targets:::digest_chr64()}.
}
Those functions themselves have dependencies, and those dependencies
are detected with \code{codetools::findGlobals()}.
Dependencies of functions may include other global functions or
global objects. If a dependency of a function is invalidated,
the function itself is invalidated, and so are any dependent
targets with the \code{depend} cue turned on.
}
\examples{
# The following target will always run when the pipeline runs.
x <- tar_target(x, download_data(), cue = tar_cue(mode = "always"))
}
\seealso{
Other targets:
\code{\link{tar_target_raw}()},
\code{\link{tar_target}()}
}
\concept{targets}
|
ab296ac9603a4a1a619baaf3f28803dcce52e764
|
6f4b03aff4fe842a787409355ca322f41e7ff42f
|
/Plot1.R
|
028098c528877c20d21f91740e210af54487445c
|
[] |
no_license
|
c0rn06/ExData_Plotting1
|
417f0c94fccb9dbe5d295f0a1752bc0f319f91b1
|
869f1274d6c8f20c23725bc8ee975cf27a93d1d1
|
refs/heads/master
| 2021-01-20T07:51:34.501291
| 2017-05-02T16:08:52
| 2017-05-02T16:08:52
| 90,052,422
| 0
| 0
| null | 2017-05-02T16:03:53
| 2017-05-02T16:03:52
| null |
UTF-8
|
R
| false
| false
| 671
|
r
|
Plot1.R
|
##read in an table and subset out dates to use
ptable <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", colClasses=c("character","character","double","double","double","double","double","double","numeric"),na.strings="?")
## subset out the two days we are looking for
ptabledata <- subset(ptable, Date == "1/2/2007" | Date == "2/2/2007")
##open .png file
png("plot1.png", width = 480, height = 480)
## draw red histogram of global active power with an x label
hist(ptabledata$Global_active_power,
col = "red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power")
##close png file
dev.off()
|
47756df4cdef3aeff65604f2955406da605470a0
|
ce3ffe0654f6754363e3cbbec727e7fd31fa4571
|
/r_tennis/tennis.r
|
f6f9967e0009f2a48db3dea6db5756a35788524d
|
[] |
no_license
|
rpln/dennis
|
e963db45c66efc716cb85d9488d8b81856048cf9
|
2cb1be34124f06042073dc8d4ce56ba7e0c5d0c5
|
refs/heads/master
| 2022-04-17T10:16:03.331087
| 2020-03-25T20:48:01
| 2020-03-25T20:48:01
| 180,239,017
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 782
|
r
|
tennis.r
|
library(R6)
dist = function(location_1, location_2){
sqrt(sum((location_1 - location_2)^2))
}
Tennis <- R6Class("Tennis",
public = list(
verbose = NULL,
r = NULL,
framerate = NULL,
initialize = function(redis_conn, framerate, verbose = 1){
self$verbose = verbose
self$r = redis_conn
self$framerate = framerate
},
# setting and getting from redis
set = function(x, what){
self$r$SET(what, jsonlite::toJSON(x))
if(self$verbose>=2) cat("set", what, " ", x, "\n")
},
get = function(what){
x <- jsonlite::fromJSON(self$r$GET(what))
if(self$verbose>=2) cat("get", what, " ", x, "\n")
return(x)
}
)
)
|
338611126e2a7093f6c983e306d24092045b5ff6
|
82ebca79ba8d951dcc595e9a7d75dbad04922434
|
/R/RSlo.R
|
a8c982dcce5b9bd859115b9961c66c028a96212f
|
[] |
no_license
|
cran/MGBT
|
a291743388abe24f6f5ce72a334b0dec1c191324
|
aa4356208f05d6036f3f425ec37c656da345bba0
|
refs/heads/master
| 2021-07-31T05:57:05.060470
| 2021-07-21T17:30:02
| 2021-07-21T17:30:02
| 218,823,042
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,099
|
r
|
RSlo.R
|
#COHN****|==|====-====|====-====|====-====|====-====|====-====|====-====|==////////
#COHN
#COHN Alternative test: RST from Rosner (1975a, TNMX 18(2) pp. 221-227)
#COHN (adjusted to look only for Low Outliers
#COHN p. 169
#COHN RST <- function(x,k,n=length(x)){
#COHN y=sort(x);(y[k]-mean(y[(k+1):(n-k)]))/sqrt(var(y[(k+1):(n-k)]))}
"RSlo" <- function(x, r, n=length(x)) {
y <- sort(x)
( y[r] - mean(y[(r+1):(n-r)]) ) / sqrt( var(y[(r+1):(n-r)]) )
}
# RST in COHN_MGBT_LowOutliers(R).txt is only used by testMGBvsN3()
# rstdat <-
# c(-1.056, -1.008, -0.340, +0.533, +0.109, +0.661, +1.638, -0.413, -0.667, -0.576,
# +1.207, -0.550, +2.290, +0.504, -2.215, +2.139, -0.048, -0.909, +0.967, -0.143)
# mean(rstdat) # 0.10615 matches Rosner (1975, table 11)
# sd(rstdat) # 1.14496 matches Rosner (1975, table 11)
#RST <- function(x, r=0) {
# n <- length(x); x <- sort(x)
# rsta <- function(k) sum(sapply((k+1):(n-k), function(i) x[i]/(n-2*k)))
# rstb <- function(k) sum(sapply((k+1):(n-k), function(i) (x[i]-rsta(k))^2/(n-2*k-1)))
# max(abs(x[r]-rsta(r))/rstb(r))
#}
|
5aa2b2fda1a97be4203a14a3c833aa586d1a5bbd
|
20ffda6c995a3c7fccb8015d4d972d7326c02576
|
/Getting_Trees_And_Data/R/functions.R
|
4c547bd46fdf7682f19195494a54c5f99e3dae1e
|
[] |
no_license
|
JBaily/phylometh_exercises
|
abc659ae8e8325ddab31565b63ed66d6c6a0d88c
|
fcd78aebbc7cd68bab7e84a8a577c5b0db82980c
|
refs/heads/main
| 2023-04-08T17:48:48.696199
| 2021-04-21T20:31:24
| 2021-04-21T20:31:24
| 332,876,886
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
functions.R
|
plot_tree <- function(phy, file) {
pdf(file=file, width=20, height=20)
plot(phy)
plot(phy, type="fan")
plot(phy, type="fan", show.tip.label=FALSE, edge.width=0.1)
dev.off()
}
|
22d62d3cb0cf8c0246a40b601eab15b52dc0b3fe
|
2e2b4e090626d27adf065735751efe31248dcbbd
|
/BGLS.R
|
5d04be2554fb1493bd4628491e6c1a5e68bd5a5a
|
[] |
no_license
|
P-R-McWhirter/skycamT_variable_classification_functions
|
62742e89da01dee71bd690119f6079389721c7c4
|
ae32976f882a8fc999843571a4d0b82c5aed6538
|
refs/heads/master
| 2021-02-18T21:16:55.109810
| 2020-03-05T18:26:20
| 2020-03-05T18:26:20
| 245,238,114
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,283
|
r
|
BGLS.R
|
BGLS <- function(data, radii = 0.001, lim = 100, plow = 0.5, phigh = 100, ofac = 1, spur = F, jit = 0){
starttime <- Sys.time()
ra <- as.numeric(as.vector(data$RA))
declin <- as.numeric(as.vector(data$DEC))
radra <- radii / abs(cos(declin*pi/180))
channel <- odbcConnect("sktobs", uid = "ross", pwd = "Ccqabw00")
objects <- sqlQuery(channel, paste("SELECT DISTINCT usnoref, entries, RA, DEClin FROM objdat WHERE RA BETWEEN '",
ra-radra, "' AND '", ra+radra, "' AND DEClin BETWEEN '", declin-radii, "' AND '",
declin+radii, "' AND entries > '", lim, "'", sep=""))
geodist <- sqrt((declin - objects$DEClin)^2.0 + (ra - objects$RA)^2.0)
mindist <- which.min(geodist)
objects <- objects[mindist,]
info <- sqlQuery(channel, paste("SELECT MJD, Rcat, Rcaterr FROM obsdat100split WHERE usnoref = '",
objects$usnoref[1], "'", sep=""))
info <- unique(info)
#info <- info[which(info[,1] <= min(info[,1])+500),]
#info <- info[sample(1:nrow(info), nrow(info)),]
#info <- info[1:5000,]
#info <- info[order(info[,1]),]
t <- info$MJD
y <- info$Rcat
err <- info$Rcaterr
n_steps <- as.integer(ofac*length(t)*(1/plow - 1/phigh))
f <- seq(from = 1/phigh, to = 1/plow, length.out = n_steps)
f <- sort(f)
y <- y - mean(y)
nor <- max(abs(y))
y <- y / nor
err <- err / nor
err2 <- err*err + jit*jit
if (spur == T){
y <- rep(0, length(t))
}
w <- 1/err2
W <- sum(w)
bigY <- sum(w*y)
logp <- rep(0, length(f))
p <- rep(0, length(f))
constants <- rep(0, length(f))
exponents <- rep(0, length(f))
for (i in 1:length(f)){
wi <- 2 * pi * f[i]
theta <- 0.5 * atan2(sum(w*sin(2*wi*t)), sum(w*cos(2*wi*t)))
st <- (wi * t) - theta
cosx <- cos(st)
sinx <- sin(st)
wcosx <- w*cosx
wsinx <- w*sinx
C <- sum(wcosx)
S <- sum(wsinx)
YCh <- sum(y*wcosx)
YSh <- sum(y*wsinx)
CCh <- sum(wcosx*cosx)
SSh <- sum(wsinx*sinx)
if (CCh != 0 & SSh != 0){
K <- (C*C*SSh + S*S*CCh - W*CCh*SSh)/(2*CCh*SSh)
L <- (bigY*CCh*SSh - C*YCh*SSh - S*YSh*CCh)/(CCh*SSh)
M <- (YCh*YCh*SSh + YSh*YSh*CCh)/(2*CCh*SSh)
constants[i] <- 1/(sqrt(CCh*SSh*abs(K)))
}
else if (CCh == 0){
K <- (S*S - W*SSh)/(2*SSh)
L <- (bigY*SSh - S*YSh)/(SSh)
M <- (YSh*YSh)/(2*SSh)
constants[i] <- 1/(sqrt(SSh*abs(K)))
}
else if (SSh == 0){
K <- (C*C - W*CCh)/(2*CCh)
L <- (bigY*CCh - C*YCh)/(CCh)
M <- (YCh*YCh)/(2*CCh)
constants[i] <- 1/(sqrt(CCh*abs(K)))
}
if (K > 0){
print("K is positive, This should not happen.")
}
exponents[i] <- (M - ((L*L)/(4*K)))
logp[i] <- log10(constants[i]) + (exponents[i] * log10(exp(1)))
p[i] <- 10^(as.numeric(logp[i]))
if (p[i] < 0.00001){
p[i] <- 0
}
}
p <- p / max(p)
plot(1/f, p, type = "l")
print(1 / f[order(p, decreasing = T)][1:50])
print(p[order(p, decreasing = T)][1:50])
Sys.time() - starttime
}
BGLS2 <- function(ts, plow = 0.5, phigh = 100, ofac = 1, dt = NULL, lent = NULL, spur = F, jit = 0, plot = F){
starttime <- Sys.time()
t <- ts[,1]
y <- ts[,2]
err <- ts[,3]
if (is.null(dt)){
dt <- max(t) - min(t)
}
if (is.null(lent)){
lent <- length(t)
}
n_steps <- as.integer(ofac*lent*(1/plow - 1/phigh))
f <- seq(from = 1/phigh, to = 1/plow, length.out = n_steps)
f <- sort(f)
y <- y - mean(y)
nor <- max(abs(y))
y <- y / nor
err <- err / nor
err2 <- err*err + jit*jit
if (spur == T){
y <- rep(0, length(t))
}
w <- 1/err2
W <- sum(w)
bigY <- sum(w*y)
logp <- rep(0, length(f))
p <- rep(0, length(f))
constants <- rep(0, length(f))
exponents <- rep(0, length(f))
for (i in 1:length(f)){
wi <- 2 * pi * f[i]
theta <- 0.5 * atan2(sum(w*sin(2*wi*t)), sum(w*cos(2*wi*t)))
st <- (wi * t) - theta
cosx <- cos(st)
sinx <- sin(st)
wcosx <- w*cosx
wsinx <- w*sinx
C <- sum(wcosx)
S <- sum(wsinx)
YCh <- sum(y*wcosx)
YSh <- sum(y*wsinx)
CCh <- sum(wcosx*cosx)
SSh <- sum(wsinx*sinx)
if (CCh != 0 & SSh != 0){
K <- (C*C*SSh + S*S*CCh - W*CCh*SSh)/(2*CCh*SSh)
L <- (bigY*CCh*SSh - C*YCh*SSh - S*YSh*CCh)/(CCh*SSh)
M <- (YCh*YCh*SSh + YSh*YSh*CCh)/(2*CCh*SSh)
constants[i] <- 1/(sqrt(CCh*SSh*abs(K)))
}
else if (CCh == 0){
K <- (S*S - W*SSh)/(2*SSh)
L <- (bigY*SSh - S*YSh)/(SSh)
M <- (YSh*YSh)/(2*SSh)
constants[i] <- 1/(sqrt(SSh*abs(K)))
}
else if (SSh == 0){
K <- (C*C - W*CCh)/(2*CCh)
L <- (bigY*CCh - C*YCh)/(CCh)
M <- (YCh*YCh)/(2*CCh)
constants[i] <- 1/(sqrt(CCh*abs(K)))
}
if (K > 0){
print("K is positive, This should not happen.")
}
exponents[i] <- (M - ((L*L)/(4*K)))
logp[i] <- log10(constants[i]) + (exponents[i] * log10(exp(1)))
p[i] <- 10^(as.numeric(logp[i]))
if (p[i] < 0.00001){
p[i] <- 0
}
}
norm <- max(p)
p <- p / max(p)
if (plot == T){
plot((1/f), p, type = "l")
}
#print(logp[which.max(p)])
Sys.time() - starttime
sp.out <- list(norm = norm, f = f, p = p, logp = log10(p))
}
BGLSgen <- function(ts, plow = 0.5, phigh = 100, ofac = 1, dt = NULL, lent = NULL, spur = F, jit = 0, plot = F){
starttime <- Sys.time()
t <- ts[,1]
y <- ts[,2]
err <- ts[,3]
if (is.null(dt)){
dt <- max(t) - min(t)
}
if (is.null(lent)){
lent <- length(t)
}
n_steps <- as.integer(ofac*lent*(1/plow - 1/phigh))
f <- seq(from = 1/phigh, to = 1/plow, length.out = n_steps)
f <- sort(f)
y <- y - mean(y)
nor <- max(abs(y))
y <- y / nor
err <- err / nor
err2 <- err*err + jit*jit
if (spur == T){
y <- rep(0, length(t))
}
w <- 1/err2
W <- sum(w)
bigY <- sum(w*y)
logp <- rep(0, length(f))
p <- rep(0, length(f))
constants <- rep(0, length(f))
exponents <- rep(0, length(f))
for (i in 1:length(f)){
wi <- 2 * pi * f[i]
theta <- 0.5 * atan2(sum(w*sin(2*wi*t)), sum(w*cos(2*wi*t)))
st <- (wi * t) - theta
cosx <- cos(st)
sinx <- sin(st)
wcosx <- w*cosx
wsinx <- w*sinx
C <- sum(wcosx)
S <- sum(wsinx)
YCh <- sum(y*wcosx)
YSh <- sum(y*wsinx)
CCh <- sum(wcosx*cosx)
SSh <- sum(wsinx*sinx)
if (CCh != 0 & SSh != 0){
K <- (C*C*SSh + S*S*CCh - W*CCh*SSh)/(2*CCh*SSh)
L <- (bigY*CCh*SSh - C*YCh*SSh - S*YSh*CCh)/(CCh*SSh)
M <- (YCh*YCh*SSh + YSh*YSh*CCh)/(2*CCh*SSh)
constants[i] <- 1/(sqrt(CCh*SSh*abs(K)))
}
else if (CCh == 0){
K <- (S*S - W*SSh)/(2*SSh)
L <- (bigY*SSh - S*YSh)/(SSh)
M <- (YSh*YSh)/(2*SSh)
constants[i] <- 1/(sqrt(SSh*abs(K)))
}
else if (SSh == 0){
K <- (C*C - W*CCh)/(2*CCh)
L <- (bigY*CCh - C*YCh)/(CCh)
M <- (YCh*YCh)/(2*CCh)
constants[i] <- 1/(sqrt(CCh*abs(K)))
}
if (K > 0){
print("K is positive, This should not happen.")
}
exponents[i] <- (M - ((L*L)/(4*K)))
logp[i] <- log10(constants[i]) + (exponents[i] * log10(exp(1)))
p[i] <- 10^(as.numeric(logp[i]))
if (p[i] < 0.00001){
p[i] <- 0
}
}
norm <- max(p)
p <- p / max(p)
if (plot == T){
plot((1/f), p, type = "l")
}
finfreq <- f[which.max(p)]
#print(logp[which.max(p)])
tsin <- matrix(c(seq(from = min(ts[,1]), to = max(ts[,1]), length.out = 15768), rep(weighted.mean(ts[,2], (1 / ts[,3])), times = 15768), rep(1/0, times = 15768)), nrow = 15768)
tsin <- rbind(tsin, ts)
tsin <- tsin[order(tsin[,1]),]
SSTlm <- lm(tsin[,2] ~ tsin[,1] + sin(2*pi*finfreq*tsin[,1]) + cos(2*pi*finfreq*tsin[,1]), data = as.data.frame(tsin[,1:2]), weights = 1/tsin[,3])
onemodel <- lm(tsin[,2] ~ 1, data = as.data.frame(tsin[,1:2]), weights = 1/tsin[,3])
daymodel <- lm(tsin[,2] ~ tsin[,1] + sin(2*pi*tsin[,1]) + cos(2*pi*tsin[,1]), data = as.data.frame(tsin[,1:2]), weights = 1/tsin[,3])
Sys.time() - starttime
sp.out <- list(norm = norm, f = f, p = p, logp = log10(p), model = SSTlm, onemodel = onemodel, daymodel = daymodel)
}
SBGLS <- function(data, radii = 0.001, lim = 100, plow = 0.5, phigh = 100, ofac = 1, jit = 0, obsstart = 50, obsstep = 1){
starttime <- Sys.time()
ra <- as.numeric(as.vector(data$RA))
declin <- as.numeric(as.vector(data$DEC))
radra <- radii / abs(cos(declin*pi/180))
channel <- odbcConnect("sktobs", uid = "ross", pwd = "Ccqabw00")
objects <- sqlQuery(channel, paste("SELECT DISTINCT usnoref, entries, RA, DEClin FROM objdat WHERE RA BETWEEN '",
ra-radra, "' AND '", ra+radra, "' AND DEClin BETWEEN '", declin-radii, "' AND '",
declin+radii, "' AND entries > '", lim, "'", sep=""))
geodist <- sqrt((declin - objects$DEClin)^2.0 + (ra - objects$RA)^2.0)
mindist <- which.min(geodist)
objects <- objects[mindist,]
info <- sqlQuery(channel, paste("SELECT MJD, Rcat, Rcaterr FROM obsdat100split WHERE usnoref = '",
objects$usnoref[1], "'", sep=""))
info <- unique(info)
t <- info$MJD
y <- info$Rcat
err <- info$Rcaterr
n <- length(t)
timespan <- max(t) - min(t)
ts <- cbind(t, y, err)
n_steps <- as.integer(ofac*n*(1/plow - 1/phigh))
f <- seq(from = 1/phigh, to = 1/plow, length.out = n_steps)
f <- sort(f)
num <- seq(obsstart, n, by = obsstep)
rownum <- length(num)
logp <- matrix(0, nrow = rownum, ncol = n_steps)
k <- 1
for (i in num){
ans <- BGLS2(ts[1:i,], plow = plow, phigh = phigh, ofac = ofac, dt = timespan, lent = n, spur = F, jit = jit)
logp[k,] <- ans$p
k <- k + 1
}
image(x = sort(1/f), y = num, t(logp)[nrow(t(logp)):1,], axes = T, col = grey(seq(0, 1, length = 256)))
Sys.time() - starttime
}
SBGLS2 <- function(ts, plow = 0.5, phigh = 100, ofac = 1, jit = 0, obsstart = 50, obsstep = 1){
starttime <- Sys.time()
t <- ts[,1]
y <- ts[,2]
err <- ts[,3]
n <- length(t)
timespan <- max(t) - min(t)
ts <- cbind(t, y, err)
n_steps <- as.integer(ofac*n*(1/plow - 1/phigh))
f <- seq(from = 1/phigh, to = 1/plow, length.out = n_steps)
f <- sort(f)
num <- seq(obsstart, n, by = obsstep)
rownum <- length(num)
logp <- matrix(0, nrow = rownum, ncol = n_steps)
k <- 1
for (i in num){
if (i %% 10 == 0){print(i)}
ans <- BGLS2(ts[1:i,], plow = plow, phigh = phigh, ofac = ofac, dt = timespan, lent = n, spur = F, jit = jit)
logp[k,] <- ans$p
k <- k + 1
}
image(x = sort(1/f), y = num, t(logp)[nrow(t(logp)):1,], axes = T, col = grey(seq(0, 1, length = 256)))
Sys.time() - starttime
}
|
227c382fd0ce0b69996abeb5ea081ceaae0e0b81
|
82e54075c63f2368841edfcc3c3ff9a45be213f7
|
/man/utils_rsltsexprob.Rd
|
bcbe588752ae5987cdc3e66c11810e60d3c8fa5f
|
[] |
no_license
|
aawhip/hydrospatial
|
d4ff9b651e65eb36eacfe7ccf2599d609447ba68
|
14bc119df1d4d8be3222fe07e7cd6d2800920205
|
refs/heads/master
| 2021-06-28T10:21:08.621803
| 2020-10-11T19:52:55
| 2020-10-11T19:52:55
| 157,914,278
| 4
| 4
| null | 2020-10-11T19:52:56
| 2018-11-16T19:49:11
|
HTML
|
UTF-8
|
R
| false
| true
| 624
|
rd
|
utils_rsltsexprob.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_rsltsexprob.R
\name{utils_rsltsexprob}
\alias{utils_rsltsexprob}
\title{Set exceedance probability table from annual time series}
\usage{
utils_rsltsexprob(d, tmsrs, sc)
}
\arguments{
\item{d}{Data frame in format used for 'hsa' functions}
\item{tmsrs}{Data frame of annual time series from 'utils_areaday' function}
\item{sc}{Scenario name to add as a column}
}
\value{
Flows data frame as input for hydrospatial analysis
}
\description{
Prepares the exceedance probability table from time series results table for
use in visualizing.
}
|
0f680fdf55b87b292af1a84afc19d540d9081c61
|
b25dc2fbf7c7815a2ab0e0657b6c0355104b1679
|
/R/mylm.R
|
9ce1ae80e7658ad7bc20c8cff9a378eba71ad12b
|
[] |
no_license
|
Anderssorby/mylm
|
61406efff5a21976ce50582ac2464f8bb535f025
|
bdcd712b765ca3e2e61afa80c0020006ef2e50b0
|
refs/heads/master
| 2020-03-28T12:18:57.677918
| 2018-09-28T12:13:22
| 2018-09-28T12:13:22
| 148,287,063
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,833
|
r
|
mylm.R
|
# Select Build, Build and reload to build and lode into the R-session.
mylm <- function(formula,
data = list(),
contrasts = NULL,
...) {
# Extract model matrix & responses
mf <- model.frame(formula = formula, data = data)
x <-
model.matrix(attr(mf, "terms"),
data = mf,
contrasts.arg = contrasts)
y <- model.response(mf)
# Add code here to calculate coefficients, residuals, fitted values, etc...
# and store the results in the list est
est <- list(terms = terms, model = mf)
n <- length(y)
est$p <-p <- ncol(x)
terms <- attr(mf, "terms")
est$H <- H <- solve(t(x) %*% x) %*% t(x)
est$beta <- beta <- H %*% y
est$yhat <- yhat <- x %*% beta
est$residuals <- residuals <- y - yhat
sigma2 <- drop(t(residuals) %*% (residuals) / (n - p))
est$covar <- covar <- solve(t(x) %*% x) * sigma2
est$dof <- dof <- n-length(beta)
est$ssr <- ssr <- sum(residuals^2) # Residual sum of squares
est$rse <- rse <- sqrt(ssr/dof) # Residual standard error
est$sst <- sst <- sum((y-mean(y))^2) # Total sum of squares
est$sse <- sse <- sst-ssr
est$r2 <- r2 <- 1-ssr/sst # R^2
est$r2adj <- r2adj <- 1-(1-r2)*(n-1)/(n-length(beta))
est$Fstat <- Fstat <- (sse)/(length(beta) - 1) * (n-p)/ssr # F-statistic
est$Fpval <- Fpval <- 1-pchisq(Fstat*(p-1), df = p-1)
# z-test
statistics <- rep(0, p)
pvalues <- rep(0, p)
for (j in 1:p) {
statistics[j] <- beta[j] / sqrt(covar[j, j])
pvalues[j] <- 2*(1-pnorm(abs(statistics[j])))
}
# Store call and formula used
est$statistics <- statistics
est$pvalues <- pvalues
est$call <- match.call()
est$formula <- formula
est$sigma <- sqrt(sigma2)
# Set class name. This is very important!
class(est) <- 'mylm'
# Return the object with all results
return(est)
}
print.mylm <- function(est, ...) {
# Code here is used when print(object) is used on objects of class "mylm"
# Useful functions include cat, print.default and format
cat("Call:\nmylm : formula = ")
print(est$formula)
cat('\nCoefficients:\n')
v = as.vector(as.numeric(format(est$beta, digits = 4, nsmall = 4, trim = T))) # formatting s.t. there are only five decimals
names(v) = rownames(est$beta)
v
}
summary.mylm <- function(est, ...) {
#df <- as.data.frame(matrix(c(reg$beta,sqrt(diag(reg$covar)),reg$statistics,reg$pvalues),ncol = 4))
# Code here is used when summary(object) is used on objects of class "mylm"
# Useful functions include cat, print.default and format
cat('Summary of mylm\n\n')
cat("Residuals: \n")
#max_res <- max(est$residuals)
#min_res <- min(est$residuals)
#mean_res <- mean(est$residuals)
#median_res <- median(est$residuals)
#cat("Min \t Median \t Max \t \n")
#cat(sprintf("%.5f\t%.5f\t%.5f\t\n", min_res, median_res, max_res)) # WHAT ABOUT 1Q AND 3Q?
v = quantile(est$residuals, names = T)
names(v) = c("Min", "1Q", "Median", "3Q", "Max")
print(v, digits = 3)
cat("\nCoefficients:\n")
mat = as.matrix(cbind(est$beta, sqrt(diag(est$covar)), est$statistics, est$pvalues))
colnames(mat) = c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
print(mat, digits = 4) # how many digits?
cat("---\n")
cat("Signif. codes:\t0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1")
cat("\n\nResidual standard error:", est$rse, "on", est$dof, "degrees of freedom\n")
cat("Multiple R-squared:", est$r2, "\tAdjusted R-squared:", est$r2adj, "\n")
cat("F-statistic:", est$Fstat, "on", length(est$beta)-1, "and", est$dof, "DF, p-value: <", est$Fpval, "\n")
}
plot.mylm <- function(est, ...) {
# Code here is used when plot(object) is used on objects of class "mylm"
#plot(est$covar, title = est$formula)
plot(est$yhat,est$residuals,ylab="Residuals",xlab="Fitted values",pch=2)
}
# This part is optional! You do not have to implement anova
anova.mylm <- function(object, ...) {
# Code here is used when anova(object) is used on objects of class "mylm"
# Components to test
comp <- attr(object$terms, "term.labels")
# Name of response
response <- deparse(object$terms[[2]])
# Fit the sequence of models
txtFormula <- paste(response, "~", sep = "")
model <- list()
for (numComp in 1:length(comp)) {
if (numComp == 1) {
txtFormula <- paste(txtFormula, comp[numComp])
}
else{
txtFormula <- paste(txtFormula, comp[numComp], sep = "+")
}
formula <- formula(txtFormula)
model[[numComp]] <- lm(formula = formula, data = object$model)
}
# Print Analysis of Variance Table
cat('Analysis of Variance Table\n')
cat(c('Response: ', response, '\n'), sep = '')
cat(' Df Sum sq X2 value Pr(>X2)\n')
for (numComp in 1:length(comp)) {
# Add code to print the line for each model tested
}
return(model)
}
|
5140d85be9c357ca464c6dba1ea4db29c0f44a54
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612988237-test.R
|
5fc7b421c864f2d54d23c7fbb080d186ccdea74c
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 495
|
r
|
1612988237-test.R
|
testlist <- list(lims = structure(0, .Dim = c(1L, 1L)), points = structure(c(NaN, 1.12512633411741e+224, 3.18758514287482e-241, 1.1125369201759e-308, 1.21357302056518e+132, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, 7.90667225870809e-310, 6.48706401202575e+174, 2.4567853463798e-305, 7.29290188850906e-304, 1.52135018973407e-309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(3L, 8L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
42dd2fbcb2e4b60940bf2fbc2326829df330cbfc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PUPAIM/examples/freundlichanalysis.Rd.R
|
d1b2ccd298de5be0547342a3af661e69cdea58c1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
freundlichanalysis.Rd.R
|
library(PUPAIM)
### Name: freundlichanalysis
### Title: Freundlich Isotherm Analysis
### Aliases: freundlichanalysis
### ** Examples
freundlichanalysis(c(1,2,3,4,5),c(1,2,3,4,5))
|
70e76c78f5daf60278ddbe5de9ca72a974fe23e6
|
02203a5e1487c6bf95647b38038e2428c261aad7
|
/R/nearblack.R
|
3ae56d39c65864f276dd3a7665cf94adb216d93e
|
[] |
no_license
|
cran/gdalUtils
|
8793292640f94d0f8960804f0ba9d4b5099baad7
|
9aa3955becca0970f98513ca20e4bff56be44d81
|
refs/heads/master
| 2021-06-07T07:27:23.972525
| 2020-02-13T19:10:02
| 2020-02-13T19:10:02
| 17,696,298
| 3
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,334
|
r
|
nearblack.R
|
#' nearblack
#'
#' R wrapper for nearblack: convert nearly black/white borders to black
#'
#' @param infile Character. The input file. Any GDAL supported format, any number of bands, normally 8bit Byte bands.
#' @param o Character. outfile. The name of the output file to be created. Newly created files are created with the HFA driver by default (Erdas Imagine - .img)
#' @param of Character. format. (GDAL 1.8.0 or later) Select the output format. Use the short format name (GTiff for GeoTIFF for example).
#' @param co Character. "NAME=VALUE". (GDAL 1.8.0 or later) Passes a creation option to the output format driver. Multiple -co options may be listed. See format specific documentation for legal creation options for each format. Only valid when creating a new file.
#' @param white Logical. Search for nearly white (255) pixels instead of nearly black pixels.
#' @param color Numeric. c1,c2,c3...cn. (GDAL >= 1.9.0) Search for pixels near the specified color. May be specified multiple times. When -color is specified, the pixels that are considered as the collar are set to 0.
#' @param near Numeric. dist. Select how far from black, white or custom colors the pixel values can be and still considered near black, white or custom color. Defaults to 15.
#' @param nb Numeric. non_black_pixels. number of non-black pixels that can be encountered before the giving up search inwards. Defaults to 2.
#' @param setalpha Logical. (GDAL 1.8.0 or later) Adds an alpha band if the output file is specified and the input file has 3 bands, or sets the alpha band of the output file if it is specified and the input file has 4 bands, or sets the alpha band of the input file if it has 4 bands and no output file is specified. The alpha band is set to 0 in the image collar and to 255 elsewhere.
#' @param setmask Logical. (GDAL 1.8.0 or later) Adds a mask band to the output file, or adds a mask band to the input file if it does not already have one and no output file is specified. The mask band is set to 0 in the image collar and to 255 elsewhere.
#' @param q Logical. (GDAL 1.8.0 or later) Suppress progress monitor and other non-error output.
## @param additional_commands Character. Additional commands to pass directly to gdaladdo.
#' @param output_Raster Logical. Return outfile as a RasterBrick?
#' @param overwrite Logical. If output file exists, OR if output file is not set (which would defualt to overwriting the input file), allow overwriting?
#' @param ignore.full_scan Logical. If FALSE, perform a brute-force scan if other installs are not found. Default is TRUE.
#' @param verbose Logical. Enable verbose execution? Default is FALSE.
## @param ... Other parameters to pass to nearblack.
#'
#' @return NULL
#' @author Jonathan A. Greenberg (\email{gdalUtils@@estarcion.net}) (wrapper) and Frank Warmerdam (GDAL lead developer).
#' @details #' This is an R wrapper for the 'nearblack' function that is part of the
#' Geospatial Data Abstraction Library (GDAL). It follows the parameter naming
#' conventions of the original function, with some modifications to allow for more R-like
#' parameters. For all parameters, the user can use a single character string following,
#' precisely, the gdalinfo format (\url{http://gdal.org/nearblack.html}), or,
#' in some cases, can use R vectors to achieve the same end.
#'
#' This utility will scan an image and try to set all pixels that are nearly or
#' exactly black, white or one or more custom colors around the collar to black
#' or white. This is often used to "fix up" lossy compressed airphotos so that
#' color pixels can be treated as transparent when mosaicking.
#'
#' This function assumes the user has a working GDAL on their system. If the
#' "gdalUtils_gdalPath" option has been set (usually by gdal_setInstallation),
#' the GDAL found in that path will be used. If nothing is found, gdal_setInstallation
#' will be executed to attempt to find a working GDAL.
#'
#' @references \url{http://www.gdal.org/nearblack.html}
#'
#' @examples
#' # None available at present.
#' @export
nearblack <- function(
infile,o,
of,co,white,color,near,nb,setalpha,setmask,q,
# additional_commands,
output_Raster=FALSE,
overwrite=FALSE,
ignore.full_scan=TRUE,
verbose=FALSE#,
# ...
)
{
if(output_Raster && (!requireNamespace("raster") || !requireNamespace("rgdal")))
{
warning("rgdal and/or raster not installed. Please install.packages(c('rgdal','raster')) or set output_Raster=FALSE")
return(NULL)
}
if(missing(o) & !overwrite) stop("Warning: You are attempting to overwrite your input file. Set an output file 'o' or overwrite=T (to overwrite the input file) to proceed.")
if(file.exists(o) & !overwrite) stop("Output file exists. Set overwrite=T or pick another output name.")
parameter_values <- as.list(environment())
if(verbose) message("Checking gdal_installation...")
gdal_setInstallation(ignore.full_scan=ignore.full_scan,verbose=verbose)
if(is.null(getOption("gdalUtils_gdalPath"))) return()
# Start gdalinfo setup
parameter_variables <- list(
logical = list(
varnames <- c("white","setalpha","setmask","q")),
vector = list(
varnames <- c()),
scalar = list(
varnames <- c("near","nb")),
character = list(
varnames <- c("o","of","co","infile")),
repeatable = list(
varnames <- c("color"))
)
parameter_order <- c("white","setalpha","setmask","q",
"near","nb",
"o","of","co",
"color",
"infile"
)
parameter_noflags <- c("infile")
parameter_doubledash <- NULL
parameter_noquotes <- unlist(parameter_variables$vector)
executable <- "nearblack"
# End gdalinfo setup
cmd <- gdal_cmd_builder(
executable=executable,
parameter_variables=parameter_variables,
parameter_values=parameter_values,
parameter_order=parameter_order,
parameter_noflags=parameter_noflags,
parameter_doubledash=parameter_doubledash,
parameter_noquotes=parameter_noquotes)
if(verbose) message(paste("GDAL command being used:",cmd))
cmd_output <- system(cmd,intern=TRUE)
if(output_Raster)
{
if(missing(o))
{
return(brick(infile))
} else
{
return(brick(o))
}
} else
{
return(NULL)
}
}
|
cf45f5df640faa4c75f54b5bd1c6435fac4498b1
|
20925a992f5e542366b049232bdea1aa7aca1112
|
/config/census_spec.R
|
fef14b2f9dfed9648a0250eedeefddca696cbd68
|
[] |
no_license
|
r4atlantis/atlantisom
|
608a42edcfa16a4e4ee1cbc449d12f26fd15421a
|
b39a88f268a76014de9844ae8fffa67876c218fd
|
refs/heads/master
| 2023-06-25T13:57:15.235092
| 2022-06-30T22:30:05
| 2022-06-30T22:30:05
| 47,479,048
| 7
| 6
| null | 2021-04-29T14:03:50
| 2015-12-06T01:14:26
|
R
|
UTF-8
|
R
| false
| false
| 1,864
|
r
|
census_spec.R
|
# should return a perfectly efficient survey
effic1 <- data.frame(species=funct.group.names,
efficiency=rep(1.0,length(funct.group.names)))
# should return all lengths fully sampled (Atlantis output is 10 age groups per spp)
selex1 <- data.frame(species=rep(funct.group.names, each=10),
agecl=rep(c(1:10),length(funct.group.names)),
selex=rep(1.0,length(funct.group.names)*10))
# should return all model areas
boxpars <- load_box(d.name, box.file)
boxall <- c(0:(boxpars$nbox - 1))
# generalized timesteps all models
runpar <- load_runprm(d.name, run.prm.file)
noutsteps <- runpar$tstop/runpar$outputstep
stepperyr <- if(runpar$outputstepunit=="days") 365/runpar$toutinc
midptyr <- round(median(seq(0,stepperyr)))
# a survey that takes place once per year mid year
annualmidyear <- seq(midptyr, noutsteps, stepperyr)
timeall <- c(0:noutsteps)
# learned the hard way this can be different from ecosystem outputs
fstepperyr <- if(runpar$outputstepunit=="days") 365/runpar$toutfinc
# define set of species we expect surveys to sample (e.g. fish only? vertebrates?)
# for ecosystem indicator work test all species, e.g.
survspp <- funct.group.names
# for length and age groups lets just do fish and sharks
# NOBA model has InvertType, changed to GroupType in file, but check Atlantis default
#if(initNOBA) funct.groups <- rename(funct.groups, GroupType = InvertType)
survspp <- funct.groups$Name[funct.groups$IsTurnedOn==1 &
funct.groups$GroupType %in% c("FISH", "SHARK")]
# needed for sample_fish
# this effective N is high but not equal to total for numerous groups
effNhigh <- data.frame(species=survspp, effN=rep(1e+8, length(survspp)))
# needed for sample_survey_xxx
# perfect observation
surv_cv <- data.frame(species=survspp, cv=rep(0.0,length(survspp)))
|
03bcfc1b7fc045d9a1584841bec3a8be4afc83cf
|
9805296e474418dcbe3b3796be3ccf1deda87699
|
/R/na_pad.R
|
736e5a348822cd5ef1fcaf2e19a0b479badaecc7
|
[
"MIT"
] |
permissive
|
gpilgrim2670/SwimmeR
|
0b23f5a940e8e91de0e5f28618921649781a0f73
|
99771b92d5b692a4e09d795e0ab809a844b3c32e
|
refs/heads/master
| 2023-03-31T03:21:05.070693
| 2023-03-24T01:02:25
| 2023-03-24T01:02:25
| 211,669,184
| 2
| 2
|
NOASSERTION
| 2023-03-15T00:04:13
| 2019-09-29T13:42:59
|
R
|
UTF-8
|
R
| false
| false
| 595
|
r
|
na_pad.R
|
#' Pads shorter lists in a list-of-lists with \code{NA}s such that all lists are
#' the same length
#'
#' Adds \code{NA} values to the end of each list in a list of lists such that
#' they all become the length of the longest list. The longest list will not
#' have any \code{NA}s added to it.
#'
#' @param x a list of lists, with sub-lists having different lengths
#' @param y a list of the number of \code{NA} values to append to each sub-list
#' @return a list of lists with each sub-list the same length
na_pad <- function(x, y){
padded_list <- c(x, rep(NA, y))
return(padded_list)
}
|
37974f9f7ace402a614bde3b382f94c38aa521ae
|
885f74da2928461078ace7d0dedb814797b3efd1
|
/Parte1/scatterPlot.R
|
9eff02f89fb1fd06b6b36f0965f5957aa7c60fa6
|
[] |
no_license
|
t0ny00/Inteligencia-2-Proeycto-1
|
bc5013a2caf12ec1503bc5b6b263588001f1c962
|
bd2cd842a18a7d70e3f7cace8479b57cf1b127b4
|
refs/heads/master
| 2021-01-19T10:58:59.957243
| 2017-02-20T04:32:30
| 2017-02-20T04:32:30
| 82,225,968
| 0
| 0
| null | 2017-02-20T02:08:53
| 2017-02-16T21:04:03
|
Python
|
UTF-8
|
R
| false
| false
| 527
|
r
|
scatterPlot.R
|
data = read.table("x01-data-30Iter.txt")
weights = read.table("x01-weights-30Iter.txt")
plot(data$V2,data$V3,type = "p",pch = 19, col="blue",
xlab = "Brain Weight (normalized, unknown scale)",
ylab = "Body Weight (normalized, unknown scale)",
main = expression(paste("Weight relation between brain and body among mammals")),
panel.first = grid())
legend(x = 5, y = 1,
legend = "Regression line",
col = c("red"),
pch="-",
cex = 0.75)
abline(a= weights$V1,b= weights$V2, col = "red")
|
26ab7a831f508b124ad2d742a6567ccb20307e55
|
636bffab8f50a57cd20a171f724ecfcdd43e10d1
|
/tests/testthat/tests.R
|
af6de4879e3357ec217ea46a30492d3612d90c38
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
shiandy/methods1proj
|
71e29eee0d256cb9727e946f659c7f879e28accf
|
d4eb49718986b4bf18b3401770297d848593007d
|
refs/heads/master
| 2021-07-06T20:47:39.119688
| 2020-11-05T20:25:53
| 2020-11-05T20:25:53
| 74,291,212
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,006
|
r
|
tests.R
|
context("General Unit Tests")
test_that("Generated xs have right dimensions", {
expect_equal(dim(gen_xs_default(5, 2)), c(5, 1))
expect_equal(dim(gen_xs_corr(5, 2)), c(5, 1))
})
test_that("Generated ys have right dimensions", {
n <- 100
ys <- gen_data(n, c(1, 2))$ys
expect_equal(length(ys), n)
})
test_that("run_sim stops with wrong parameters", {
expect_error(run_sim(1, 10, c(1, 2, 3), split_prop = 1.1))
})
test_that("Train/test have unique rows and correct # of rows", {
xs <- data.frame(a = 1:10, b = 1:10)
ys <- data.frame(a = 20:30, b = 20:30)
split_prop <- 0.7
train_test <- train_test_split(xs, ys, split_prop)
xs_train <- train_test$xs_train
xs_test <- train_test$xs_test
expect_false(any(xs_train$a %in% xs_test$a))
expect_equal(nrow(xs_train), floor(split_prop * nrow(xs)))
})
test_that("train_test_split stops when split_prop wrong", {
expect_error(train_test_split(1, 1, -1))
expect_error(train_test_split(1, 1, 1))
})
|
a9d82c919b43519d114c9aa8abeab929cd3213ac
|
0d80c27ba1f05646b567709a435661372b7ab131
|
/example_investing_dot_com.R
|
9b9f41e7ee41e950a723d7a666d9c6914b05983a
|
[] |
no_license
|
ByeongheonSong/qae
|
b86edea675afbe3168027f40898e40ad24d80920
|
0edac75981f3baecc1f894779a9b921c53601531
|
refs/heads/master
| 2022-12-10T21:39:02.171515
| 2020-08-22T03:56:42
| 2020-08-22T03:56:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,066
|
r
|
example_investing_dot_com.R
|
## Investing.com
library(rvest)
library(stringr)
library(dplyr)
## Major indicies
URL <- "https://www.investing.com/indices/major-indices"
res <- read_html(URL)
# Table
tab <- res %>%
html_table() %>%
.[[1]]
names(tab)[1] <- "v1"
names(tab)[dim(tab)[2]] <- "v.last"
major.indices <- tab %>%
select(-v1, -v.last)
major.indices.core <- major.indices %>%
slice(1:5, 9:12, 29, 32, 36, 39, 42)
## Future indicies
URL <- "https://www.investing.com/indices/indices-futures"
res <- read_html(URL)
# Table
tab <- res %>%
html_table() %>%
.[[1]]
names(tab)[1] <- "v1"
names(tab)[dim(tab)[2]] <- "v.last"
future.indices <- tab %>%
select(-v1, -v.last)
future.indices.core <- future.indices %>%
slice(1:3, 6:9, 20, 22:24, 28, 30)
## 참고자료 rvest template
URL <- ""
res <- read_html(URL)
# Table
#Sys.setlocale("LC_ALL", "C")
res %>%
html_table()
#Sys.setlocale("LC_ALL", "Korean")
# Element
pattern <- ""
res %>%
html_nodes() %>%
html_text()
# Attribute
pattern <- ""
res %>%
html_nodes() %>%
html_attr("href")
|
c6ebf35712f280fb8a5c482a2838541f18f613ec
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diversityForest/inst/testfiles/numSmaller/libFuzzer_numSmaller/numSmaller_valgrind_files/1610037254-test.R
|
c56fc4769a75dca214c3d117a31e3db633599bdc
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
1610037254-test.R
|
testlist <- list(reference = numeric(0), values = c(8.81439202903199e-280, NaN, NaN, 3.22723679863502e-319, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(diversityForest:::numSmaller,testlist)
str(result)
|
25d1c131b12863e1391ee1420b12cecf19314e5f
|
fb04f67adde5c1cb23591b0f7f1c9327b2cb2c0d
|
/R/plot_FDEffect.R
|
97c67e1b7bbe9d2a8533b228085ca05d8a3869be
|
[] |
no_license
|
cc458/postregplots
|
083570ad08456414a62515a5a30aac44e6f8df2a
|
d7622e3a201de582e4bd1d8812ca222be3bb6cd8
|
refs/heads/master
| 2021-03-19T17:06:21.245912
| 2019-08-21T00:48:22
| 2019-08-21T00:48:22
| 118,621,131
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,197
|
r
|
plot_FDEffect.R
|
#' A function to plot the first difference in the predicted probability of the interaction between a binary and interval variables in Logit model with cluster sd .
#'
#' This function creates a facet plot for the interaction terms in Logit model using simulations
#'
#' @param ModelResults list. Specify model name of the logit model.
#' @param n.sim numeric. Specify the number of simulations
#' @param varname1 character. A binary variable
#' @param varname2 character. An interval vaiable
#' @param data data.frame. The name of the data frame in logit model
#' @param val1 numeric. Specify the min value
#' @param val2 numeric. Specify the max value
#' @param intervals numeric. Specify the intervals for the sequence
#' @param clusterid character. The cluster id
#' @param xlabs character. The x label for the plot
#' @param ylabs character. The y label for the plot
#' @import ggplot2 arm multiwayvcov lmtest gridExtra viridis ggridges dplyr
#' ggplot2 tidyr stringr purrr
#' @export
##function for first difference
plot_FDEffect = function(ModelResults, n.sim = 1000, data, clusterid, varname1, varname2,
val1, val2, intervals, xlabs, ylabs){
#get a sim objective
require(arm)
library(multiwayvcov)
library(lmtest)
library(ggplot2)
cluster <- data[,clusterid]
vcov_cluster <- cluster.vcov(ModelResults, cluster)
coef_cluster <- coeftest(ModelResults, vcov = vcov_cluster)
set.seed(12345)
sim <- mvrnorm(n= n.sim, coef(ModelResults), vcov_cluster)
##set simulation
varname2_val = seq(val1, val2, by =intervals)
df <- array(NA, c(length(varname2_val), 4))
for (i in 1:length(varname2_val)){
X1 <- model.matrix(ModelResults)
X2 <- model.matrix(ModelResults)
X1[, varname1] = 0
X1[, varname2] = varname2_val[i]
X1[, paste(varname1, varname2, sep = ":")] = 0*varname2_val[i]
X2[,varname1] = 1
X2[,varname2] = varname2_val[i]
X2[, paste(varname1, varname2, sep = ":")] = 1*varname2_val[i]
fd = apply(apply(X2, 1, function (x) plogis(sim %*% x)) -
apply(X1, 1, function (x) plogis(sim %*% x)), 1, mean)
df[i, 1] <- varname2_val[i]
df[i, 2] <- mean(fd)
df[i, 3:4] <- quantile(fd, probs = c(.05,.95))
}
df_plot <- df
colnames(df_plot) <- c("X", "mean", "lo", "hi")
df_p <- as.data.frame(df_plot)
p = ggplot(df_p, aes(X)) +
geom_ribbon(aes(ymin = lo, ymax = hi), fill = "grey70") +
geom_line(aes(y = mean)) + theme_gray() +
geom_hline(yintercept = 0, colour = gray(1/2), lty = 2) +
scale_x_continuous(name = xlabs, breaks = seq(val1, val2, by =2)) +
ylab(paste("First Difference in",ylabs, sep = " ")) +
theme(axis.title.y = element_text(margin = margin(1,1,1,1)),
axis.text = element_text(size=14),
axis.title=element_text(size=14),
strip.text = element_text(size=15))
return(p)
}
|
c640682d0b23c04a2bea4006f21510711b267e3e
|
6467006af3e2c7eeef63f15755a6a307cfc89d62
|
/man/dclust.Rd
|
0ee4fbaf00a69a6e453baa33ac4fa3d259509799
|
[] |
no_license
|
cran/RcppML
|
a73d9b855fdf18b020095fdb6ffda99334a5ca3e
|
10d0c0f9a7706a18036186d50b21b7600b6e08a3
|
refs/heads/master
| 2023-08-06T17:40:58.842650
| 2021-09-21T18:00:02
| 2021-09-21T18:00:02
| 390,405,623
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,631
|
rd
|
dclust.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dclust.R
\name{dclust}
\alias{dclust}
\title{Divisive clustering}
\usage{
dclust(
A,
min_samples,
min_dist = 0,
verbose = TRUE,
tol = 1e-05,
maxit = 100,
nonneg = TRUE,
seed = NULL
)
}
\arguments{
\item{A}{matrix of features-by-samples in sparse format (preferred class is "Matrix::dgCMatrix")}
\item{min_samples}{stopping criteria giving the minimum number of samples permitted in a cluster}
\item{min_dist}{stopping criteria giving the minimum cosine distance of samples within a cluster to the center of their assigned vs. unassigned cluster. If \code{0}, neither this distance nor cluster centroids will be calculated.}
\item{verbose}{print number of divisions in each generation}
\item{tol}{in rank-2 NMF, the correlation distance (\eqn{1 - R^2}) between \eqn{w} across consecutive iterations at which to stop factorization}
\item{maxit}{stopping criteria, maximum number of alternating updates of \eqn{w} and \eqn{h}}
\item{nonneg}{in rank-2 NMF, enforce non-negativity}
\item{seed}{random seed for rank-2 NMF model initialization}
}
\value{
A list of lists corresponding to individual clusters:
\itemize{
\item id : character sequence of "0" and "1" giving position of clusters along splitting hierarchy
\item samples : indices of samples in the cluster
\item center : mean feature expression of all samples in the cluster
\item dist : if applicable, relative cosine distance of samples in cluster to assigned/unassigned cluster center.
\item leaf : is cluster a leaf node
}
}
\description{
Recursive bipartitioning by rank-2 matrix factorization with an efficient modularity-approximate stopping criteria
}
\details{
Divisive clustering is a sensitive and fast method for sample classification. Samples are recursively partitioned into two groups until a stopping criteria is satisfied and prevents successful partitioning.
See \code{\link{nmf}} and \code{\link{bipartition}} for technical considerations and optimizations relevant to bipartitioning.
\strong{Stopping criteria}. Two stopping criteria are used to prevent indefinite division of clusters and tune the clustering resolution to a desirable range:
\itemize{
\item \code{min_samples}: Minimum number of samples permitted in a cluster
\item \code{min_dist}: Minimum cosine distance of samples to their cluster center relative to their unassigned cluster center (an approximation of Newman-Girvan modularity)
}
Newman-Girvan modularity (\eqn{Q}) is an interpretable and widely used measure of modularity for a bipartition. However, it requires the calculation of distance between all within-cluster and between-cluster sample pairs. This is computationally intensive, especially for large sample sets.
\code{dclust} uses a measure which linearly approximates Newman-Girvan modularity, and simply requires the calculation of distance between all samples in a cluster and both cluster centers (the assigned and unassigned center), which is orders of magnitude faster to compute. Cosine distance is used instead of Euclidean distance since it handles outliers and sparsity well.
A bipartition is rejected if either of the two clusters contains fewer than \code{min_samples} or if the mean relative cosine distance of the bipartition is less than \code{min_dist}.
A bipartition will only be attempted if there are more than \code{2 * min_samples} samples in the cluster, meaning that \code{dist} may not be calculated for some clusters.
\strong{Reproducibility.} Because rank-2 NMF is approximate and requires random initialization, results may vary slightly across restarts. Therefore, specify a \code{seed} to guarantee absolute reproducibility.
Other than setting the seed, reproducibility may be improved by setting \code{tol} to a smaller number to increase the exactness of each bipartition.
}
\examples{
\dontrun{
library(Matrix)
data(USArrests)
A <- as(as.matrix(t(USArrests)), "dgCMatrix")
clusters <- dclust(A, min_samples = 2, min_dist = 0.001)
str(clusters)
}
}
\references{
Schwartz, G. et al. "TooManyCells identifies and visualizes relationships of single-cell clades". Nature Methods (2020).
Newman, MEJ. "Modularity and community structure in networks". PNAS (2006)
Kuang, D, Park, H. (2013). "Fast rank-2 nonnegative matrix factorization for hierarchical document clustering." Proc. 19th ACM SIGKDD intl. conf. on Knowledge discovery and data mining.
}
\seealso{
\code{\link{bipartition}}, \code{\link{nmf}}
}
\author{
Zach DeBruine
}
|
9e897dc59faf39176dc7d1cc4b29209d67bf2e86
|
ecd22941528dc4486b1df8cc79aacfda31ff8b31
|
/Assignment_4_kawthar.R
|
387b5544ded0ac836430e01f2a0e366609c78d0c
|
[] |
no_license
|
kawbabs/GreyCampus
|
d4c67810efb4cc320ddf1e12a63bea3d555331c5
|
70b3a009cd5a4b9f92f9d0c52778f7537dbeb402
|
refs/heads/main
| 2023-04-17T07:39:07.203617
| 2021-05-01T06:47:15
| 2021-05-01T06:47:15
| 340,961,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,940
|
r
|
Assignment_4_kawthar.R
|
# ## Case I
# I. Data: The World Values Survey is an ongoing worldwide survey that polls the world
# population about perceptions of life, work, family, politics, etc. The most recent
# phase of the survey that polled 77,882 people from 57 countries estimates that
# 36.2% of the world's population agrees with the statement "Men should have more
# right to a job than women." The survey also estimates that 13.8% of people have
# a university degree or higher and that 3.6% of people fit both criteria.
# Question 1: is agreeing with the statement "Men should have more right to a job
# than women" and having a university degree or higher disjoint event?
#The events are not disjoint as 3.6% of respondents have higher degree and agree with
# the statement 'Men should have more right to a job than women'
population = 77882
prob_A = 0.362 #Event A = agreeing with the statement
prob_B = 0.138 # Event B = Having a university degree
prob_A_and_B = 0.036 # Probability of A and B. Since this isn't zero, the events aren't disjoint
prob_neither = 1- prob_A - prob_A_and_B - prob_B
prob_neither #prob_neither = 0.464
# Question 2: Draw a Venn diagram summarizing the variables and their associated
# probabilities.
library(VennDiagram)
grid.newpage()
draw.pairwise.venn(
area1 = prob_A, area2 = prob_B, cross.area = prob_A_and_B,
category = c('Agrees with statement', 'Has university degree'),
lty = rep("blank"), fill = c("light blue", "pink"),
alpha = rep(0.5, 2), cat.pos = c(0, 0), cat.dist = rep(0.025, 2)
)
# Question 3: What is the probability that a randomly drawn person has a
# university degree or higher or agrees with the statement about men having
# more right to a job than women?
prob_A_or_B = prob_A + prob_B
prob_A_or_B #prob_A_or_B = 0.5
# Question 4: What percent of the world population do not have a university degree
# and disagree with the statement about men having more right to a job than women?
prob_neither = 1- prob_A - prob_A_and_B - prob_B
prob_neither #prob_neither = 0.464
# Question 5: Does it appear that the event that someone agrees with the statement
# is independent of the event that they have a university degree or higher?
#independent means P(A and B) = P(A)*P(B)
# P(A and B) = prob_A_and_B = 0.036
prob_A *prob_B #0.049956 = P(A)*P(B)
#P(A)*P(B)!=P(A and B), thus the two events are independent
# Question 6: What is the probability that at least 1 in 5 randomly selected
# people to agree with the statement about men having more right to a job than women?
prob_one_five = 1 - (1-prob_A)^5
prob_one_five #prob_one_five = 0.8942931
# Case II.
#
# Data: As of 2009, Swaziland had the highest HIV prevalence in the world.
# 25.9% of this country's population is infected with HIV. The ELISA test is
# one of the first and most accurate tests for HIV. For those who carry HIV,
# the ELISA test is 99.7% accurate. For those who do not carry HIV, the test
# is 92.6% accurate.
# Question1: If an individual from Swaziland has tested positive,
# what is the probability that he carries HIV? Create a tree diagram to calculate
# the probability.
P_hiv = 0.259
p_not_hiv = 1- P_hiv
p_pos_given_hiv = 0.997
p_neg_not_hiv = 0.926
p_neg_given_hiv = 1- p_pos_given_hiv
p_pos_not_hiv = 1- p_neg_not_hiv
p_hiv_and_pos = P_hiv*p_pos_given_hiv
p_hiv_and_neg = p_hiv*p_neg_given_hiv
p_not_hiv_and_pos = p_not_hiv*p_pos_not_hiv
p_not_hiv_and_neg = p_not_hiv*p_neg_not_hiv
p_positve = p_hiv_and_pos + p_not_hiv_and_pos
p_hiv_given_negative = p_hiv_and_pos/p_positve
p_hiv_given_negative # p_hiv_given_negative = 0.8248434
# Question 2: According to a 2013 Gallup poll, worldwide only 13% of employees are
# engaged at work (psychologically committed to their jobs and likely to be making
# positive contributions to their organizations). Among a random
# sample of 10 employees, what is the probability that 8 of them are engaged at work?
dbinom(8, 10, 0.13)
# Ans = 2.77842e-06
# Question 3: Recent study: “Facebook users get more than they give”
#
# - friend requests: 40% made, 63% received at least one
# - likes: liked 14 times, had their content “liked” 20 times, on average
# - messages: sent 9 messages, received 12, on average
# - tags:12% tagged a friend in a photo, but 35% tagged other findings:
# - 25% considered power users
# - average Facebook user has 245 friends
# - P(70 or more power user friends) = ?
sum(dbinom(70:245, size = 245, p = 0.25))
# Question 4: According to a 2014 Gallup poll, 56% of uninsured Americans who plan
# to get health insurance say they will do so through a government health insurance
# exchange. What is the probability that in a random sample of 10 people exactly 6
# plan to get health insurance through a government health insurance exchange?
dbinom(6, 10, 0.56)
#ans = 0.2427494
|
d2866b4666db154b1af89177da5888292cf16515
|
f0d5df048c0d5ac4f969a03b477515bd762a446c
|
/R/sysinfo.R
|
76fbf2080267c4b3e619ddddc193b420766a161c
|
[] |
no_license
|
HenrikBengtsson/startup
|
67a01ac529ff0adc8dd0e722bbaccbd80010cb2a
|
abd1be760a8665e7f301129ec97e1d5d1b175a43
|
refs/heads/develop
| 2023-04-07T06:44:59.018075
| 2023-04-06T01:42:50
| 2023-04-06T01:42:50
| 73,848,752
| 163
| 8
| null | 2022-04-03T06:44:22
| 2016-11-15T19:38:47
|
R
|
UTF-8
|
R
| false
| false
| 1,368
|
r
|
sysinfo.R
|
#' Information on the current R session
#'
#' @return A named list.
#'
#' @examples
#' startup::sysinfo()
#'
#' @export
sysinfo <- function() {
## Built-in system information (character)
sysinfo <- as.list(Sys.info())
sysinfo$os <- .Platform$OS.type
sysinfo$gui <- .Platform$GUI
sysinfo$interactive <- interactive()
## Built-in system flags (logical)
sysinfo$rapp <- (.Platform$GUI == "AQUA")
sysinfo$rgui <- (.Platform$GUI == "Rgui")
sysinfo$rstudio <- is_rstudio_console()
sysinfo$rstudioterm <- is_rstudio_terminal()
sysinfo$microsoftr <- is_microsoftr()
sysinfo$ess <- is_ess()
sysinfo$radian <- is_radian()
## Deprecated: Renamed rtichoke -> radian in December 2018
sysinfo$rtichoke <- sysinfo$radian
## Deprecated: Renamed rice -> rtichoke in February 2018
sysinfo$rice <- sysinfo$radian
sysinfo$pqr <- is_pqr()
sysinfo$wine <- is_wine()
## Session-specific variables
sysinfo$dirname <- basename(getwd())
sysinfo$quiet <- any(c("-q", "--quiet", "--silent") %in% r_cli_args())
save <- NA
if ("--save" %in% r_cli_args()) save <- TRUE
if ("--no-save" %in% r_cli_args()) save <- FALSE
sysinfo$save <- save
sysinfo
}
r_cli_args <- local({
cli_args <- NULL
function() {
if (is.null(cli_args)) {
cli_args <<- setdiff(commandArgs(), commandArgs(trailingOnly = TRUE))
}
cli_args
}
})
|
bb750ead754ec5d606aaf9fde5cd350196ddd351
|
42616e3030398ffd971a88a3ba57886d7eed7795
|
/man/is_valid.Rd
|
4409c352c2cb367299cb8041fe4bb0c3c1f276ba
|
[] |
no_license
|
jandersonniche/stringe
|
aa38709cabefbe5af873793f20381a17ece1e9e8
|
eca3f18faeeded0a75a583483cc719b527eff189
|
refs/heads/master
| 2020-09-25T03:24:03.149105
| 2019-12-04T16:21:18
| 2019-12-04T16:21:18
| 225,906,940
| 0
| 0
| null | 2019-12-04T16:11:26
| 2019-12-04T16:11:26
| null |
UTF-8
|
R
| false
| true
| 334
|
rd
|
is_valid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base.R
\name{is_valid}
\alias{is_valid}
\title{Is Valid Values}
\usage{
is_valid(x)
}
\arguments{
\item{x}{}
}
\description{
This function selects only non-null, non-na values, non-blank, and non text 'NA' values.
}
\examples{
is_valid()
}
\keyword{valid}
|
49b86fdfeff25630578319210c8fd01e2dc65ff4
|
8fda15f44fe49fdd40d0f9779c2161d25ef119d7
|
/R/filterLocalMaxima.R
|
a5f20d33a33e0cfff0000846f1311f4a766f146c
|
[] |
no_license
|
hansoleorka/myR
|
8316ec286e1bb448991062e291b7178e25985ada
|
93f3a8ec529fe1067287913695b73d9a62e1e84f
|
refs/heads/master
| 2021-01-22T07:18:27.714349
| 2019-01-08T12:30:12
| 2019-01-08T12:30:12
| 23,343,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,717
|
r
|
filterLocalMaxima.R
|
#' Local Maxima Filtering
#'
#' Find local maxima's in a local neighborhod
#'
#' @param CHM a object of class SpatialGridDataFrame
#' @param w matrix of weights e.g. a 3 by 3 matrix; see Details. The matrix can also be expressed as the number of cells in a single direction or in two directions from the focal cell, in which case the weights are all set to 1. I.e. w=3 refers to a 3 by 3 matrix: 2 cells at each side of the focal cell, queen's case, 9 cells in total. This is equivalent to w=c(3,3). You can also specify a rectangular neighborhood, e.g. w=c(3,5); but the sides must be odd numbers. If you need even sides, you can add a column or row with weights of zero.
#' @param ov threshold of local maximas to consider
#' @return object of class SpatialPointsDataFrame-class
#' @author Hans Ole Orka \email{hans.ole.orka@@gmail.org}
filterLocalMaxima <- function(CHM,w=3,ov=2){
require("raster")
require("sp")
r <- raster(CHM)
#The extended maxima transformation is the regional maxima computation of the corresponding hmaxima transformation.
#finds extended maxima where the range of values is not greater than h.
LM <- focal(r,w=w,fun=max)
#ELM <- focal(LM,w=3,fun=max)
LM@data@values <- (r@data@values - LM@data@values)
LM@data@values <- ifelse(abs(LM@data@values) == 0,1,NA)
#adj <- adjacent(LM,cell=c(1:length(LM))[LM@data@values==1])
#Convert to points
LM <- as(LM,"SpatialPointsDataFrame")
o <- overlay(CHM,LM)
#LM@data$dz <- CHM@data[o,1] old update retention okt13
LM@data$dz <- o@data[,1]
LM <- LM[LM@data$dz>ov,]
# ELM <- CHM.sm*LM
# ELM@data@values <- ifelse(ELM@data@values >= ov,ELM@data@values,NA)
# ELM <- as(ELM,"SpatialGridDataFrame")
return(LM)
}
|
b7fe65f2448fbd0a72be5cf64e3e1ec507795f48
|
c7e9a7fe3ee4239aad068c6c41149a4a09888275
|
/OLD_GALLERY_RSCRIPT/#197_HivePlot.R
|
ee4b3b14a399e877748ab4261977c6f3f9753d93
|
[
"MIT"
] |
permissive
|
holtzy/R-graph-gallery
|
b0dfee965ac398fe73b3841876c6b7f95b4cbae4
|
7d266ad78c8c2d7d39f2730f79230775930e4e0b
|
refs/heads/master
| 2023-08-04T15:10:45.396112
| 2023-07-21T08:37:32
| 2023-07-21T08:37:32
| 31,253,823
| 591
| 219
|
MIT
| 2023-08-30T10:20:37
| 2015-02-24T09:53:50
|
HTML
|
UTF-8
|
R
| false
| false
| 3,659
|
r
|
#197_HivePlot.R
|
# -----------------------------------------------------------------------------------------------------------------------------------------------------
# HIVE PLOT
library(HiveR)
library(dplyr)
library(stringr)
library(RColorBrewer)
library(grid)
# for readability, rows are added via rbind
# generates an edgelist
# In this example we assume, that we have regulator, which are causing proteins to be produced
# These proteins in turn use up substrates, which are in turn manipulating the regulators
# In this case we have one master regulator (Regulator 4), which affects all proteins.
# Protein 1 uses all substrates
# There are hoever only two substrates, which alter our regulators
sample_data <- data.frame(source = "Regulator 1", target = "Protein 1", weight = 1, stringsAsFactors = F)
sample_data <- rbind(sample_data, c("Regulator 1", "Protein 2", 1))
sample_data <- rbind(sample_data, c("Regulator 1", "Protein 3", 1))
sample_data <- rbind(sample_data, c("Regulator 1", "Protein 4", 1))
sample_data <- rbind(sample_data, c("Regulator 2", "Protein 2", 1))
sample_data <- rbind(sample_data, c("Regulator 2", "Protein 3", 1))
sample_data <- rbind(sample_data, c("Regulator 3", "Protein 4", 1))
sample_data <- rbind(sample_data, c("Regulator 3", "Protein 1", 1))
sample_data <- rbind(sample_data, c("Protein 1", "Substrate 1", 1))
sample_data <- rbind(sample_data, c("Protein 1", "Substrate 2", 1))
sample_data <- rbind(sample_data, c("Protein 1", "Substrate 3", 1))
sample_data <- rbind(sample_data, c("Protein 2", "Substrate 2", 1))
sample_data <- rbind(sample_data, c("Protein 3", "Substrate 3", 1))
sample_data <- rbind(sample_data, c("Protein 4", "Substrate 1", 1))
sample_data <- rbind(sample_data, c("Substrate 2", "Regulator 1", 1))
sample_data <- rbind(sample_data, c("Substrate 2", "Regulator 2", 1))
sample_data <- rbind(sample_data, c("Substrate 3", "Regulator 3", 1))
sample_data$weight = as.numeric(sample_data$weight)
hpd <- edge2HPD(sample_data, axis.cols = c("red", "green", "blue")) # turn edgelist into a workable object
# We need to split the edges onto their own axis. The axis has to be an integer (and NOT a numeric)
hpd$nodes[grep("Protein", hpd$nodes$lab), "axis"] = as.integer(2) # Assign all "Proteins" to axis 2
hpd$nodes[grep("Substrate", hpd$nodes$lab), "axis"] = as.integer(3) # Substrates will be on axis 3
# Now we define the position on the axis (= radius)
hpd$nodes$radius = as.numeric(str_sub(hpd$nodes$lab,-1,-1))
# The radius (position on the axis) is determined by the "number" of the Substrate/Protein/Regulator
# e.g. Protein 1 has a radius of 1, Substrate 3 a radius of 3
color_lookup = c("Protein" = "Greens", "Regulator" = "Reds", "Substrate" = "Blues") # assign each type a unique color palette (will be used with ColorBrewer below)
color_counts = c("Protein" = 4, "Regulator" = 3, "Substrate" = 3) # these numbers are used in ColorBrewer to determine the number of colors in each category
# Helper function to color the edges according to the origin
determine_line_color <- function(id1)
{
source = hpd$nodes[id1, "lab"]
type = str_sub(source, 1, -3) # extract the "Protein", "Regulator" or "Substrate" part
number = as.numeric(str_sub(source, -1, -1)) # extraxt the "id"
color = brewer.pal(color_counts[type], color_lookup[type])[number]
return(color)
}
hpd$edges$color <- sapply(hpd$edges$id1, determine_line_color) # assign colors to the lines based on the source
# Create the hive plot
png("#197_HivePlot.png" , width = 480, height = 480 )
plotHive(hpd,axLabs = c("Regulator", "Protein", "Substrate"), bkgnd = "black", axLab.gpar = gpar(col = "#bbbbbb"))
dev.off()
|
6fa99654f6c81d8f914671374f92e6380c8289c4
|
18461b6f1bae5ccf97469723d98ae53e4d1aa705
|
/man/simulateDataTtest.Rd
|
5ee4c6868a81d3d4b89181ac3ee628d78b837047
|
[] |
no_license
|
rjbderooij/blindData_Git
|
4f96a1c24c3b1d6ab4893e6edcbaf322de337aff
|
55b12b7ffc852de37a5db1cbc2b70774f3e485cc
|
refs/heads/master
| 2023-01-30T19:00:59.850132
| 2020-12-17T13:47:46
| 2020-12-17T13:47:46
| 290,745,050
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,477
|
rd
|
simulateDataTtest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulateDataTtest.R
\name{simulateDataTtest}
\alias{simulateDataTtest}
\title{Suitable for t-test:
The function "simulateDataRegression" simulates correlational simulates
experimental data for a 2 group t-test analysis. The function returns a
data.frame "df_sim_ttest" with two variables: one numeric dependent
variable "score" and one independent factor variables "expert". By default,
the function simulates 50 (cell.size = 50) cases per group; with 2 groups
this yields 100 cases. This simulation is made to resemble a dataset from
a psychological study.
from a psychological study.}
\usage{
simulateDataTtest(cell.size = 50)
}
\arguments{
\item{cell.size}{# number of cases to simulate per cell (i.e., group
combination)}
}
\value{
df_sim_ttest = cell.size*4 length data.frame with variables in two
colums:
}
\description{
Suitable for t-test:
The function "simulateDataRegression" simulates correlational simulates
experimental data for a 2 group t-test analysis. The function returns a
data.frame "df_sim_ttest" with two variables: one numeric dependent
variable "score" and one independent factor variables "expert". By default,
the function simulates 50 (cell.size = 50) cases per group; with 2 groups
this yields 100 cases. This simulation is made to resemble a dataset from
a psychological study.
from a psychological study.
}
\keyword{Simulate}
\keyword{data,}
\keyword{ttest}
|
27a905e50ee73421f733bae1a75d203369ad92f9
|
40fd023a950315e2a5d916643b9b3f884ab4b02c
|
/week_after_2ndSet.R
|
bf5792f581a1291a67be99679210a94c38c27bf5
|
[] |
no_license
|
MattSnively/capstone
|
43eb6ee6d9ef455c9a75288c0d513d5792cc2b2e
|
63d30056f3d1c3a8053a729242a1c59ef67f7194
|
refs/heads/master
| 2021-04-29T19:20:52.167991
| 2018-04-25T15:39:25
| 2018-04-25T15:39:25
| 121,711,780
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,628
|
r
|
week_after_2ndSet.R
|
library(plyr)
library(tidyverse)
library(rtweet)
#make sure it's a data frame!
df5 = as.data.frame(week_after_sb_2)
#strip down to actual tweet only
s5 <- df5$text
# check check
#summary(s5)
#str(s5)
#remove non-graphical characters
s5 <- iconv(s5, 'UTF-8', 'ASCII')
#remove non-graphical characters
s5<- tolower(s5)
#check
#head(s5,2)
#more cleaning
s5<- stringr::str_replace_all(s5, "@\\S+", "")
s5 <- stringr::str_replace_all(s5, "https?:[[:graph:]]+", "")
#check
#head(s5,3)
s5 <-stringr::str_replace_all(s5, "#", "")
#head(s5,3)
s5 <- stringr::str_replace_all(s5, "[[:punct:]]+\\b|\\b[[:punct:]]+", "")
#head(s5,3)
## MORE CLEANING
s5 <- stringr::str_replace_all(s5, "\\n+", "")
s5 <- stringr::str_replace_all(s5, "\\t+", "")
s5 <- stringr::str_replace_all(s5, "[[\\s]]+\\A|[[\\s]]+\\Z", "")
#head(s5,3)
#unlist to start tokenization
w5 <- unlist(strsplit(s5, "\\s+"), use.names = FALSE)
#head(w5,20)
## use stopwords list from rtweet
stopwords <- rtweet::stopwordslangs$word[rtweet::stopwordslangs$p > .999]
## remove stopwords
w5 <- lapply(w5, function(x) return(x[!tolower(x) %in% c("", stopwords)]))
## remove all non-letter characters and drop empty tokens
w5 <- lapply(w5, function(x) {
x <- stringr::str_replace_all(x, "\\W", "")
x[x != ""]
})
wds5 <- table(unlist(w5))
top_wds5 <- names(sort(wds5, decreasing=TRUE)[1:200])
#head(top_wds5, 20)
##Now let's get the count of the top words used ##
t5 <- wds5[names(wds5) %in% c(top_wds5)]
top5 <- data_frame(
count= "Number",
word=names(t5),
n=as.integer(t5)
)
arrange(top5, desc(n))
|
ffa5d8b54323e23b4b6cfa9496415d093dccd545
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/plotwidgets/examples/showPals.Rd.R
|
9a651aedb415056f31e15f38f4b31ee78944b183
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 245
|
r
|
showPals.Rd.R
|
library(plotwidgets)
### Name: showPals
### Title: Demonstrate selected palettes
### Aliases: showPals
### ** Examples
## Show all palettes in plotwidget
showPals(plotPals())
## Show just a few colors
showPals(c("red", "green", "blue"))
|
62e1aeb27a6a1b5503f16ef1a3c8f3646e6586ff
|
e313d8481e8d51c1b55b51296b939dafcc78482f
|
/loadfromgsheet.R
|
7832a127b316e55bb34f161c117868de42cfb102
|
[] |
no_license
|
deekshit3991/analytics1
|
3a63654a9726a1145f1ecc64191b436b3f8b6813
|
de38d7957a4d5c5a9b22397a1206be5802b17733
|
refs/heads/master
| 2023-06-22T23:42:38.115132
| 2023-06-11T02:46:14
| 2023-06-11T02:46:14
| 145,305,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 438
|
r
|
loadfromgsheet.R
|
library(gsheet)
url <- 'https://docs.google.com/spreadsheets/d/1h7HU0X_Q4T5h5D1Q36qoK40Tplz94x_HZYHOJJC_edU/edit#gid=216113907'
denco <- as.data.frame(gsheet2tbl(url))
str(denco)
head(denco)
#loyal customer
denco %>% group_by(custname) %>% summarise(n=n()) %>% arrange(desc(n))
denco %>% count(custname,sort = T)
# revenue generated by part numbers
denco %>% group_by(partnum) %>% summarise(sum = sum(revenue)) %>% arrange(desc(sum))
|
85aa98223b8e08363bc1b818532e004ecd0c2788
|
03946226e5dd1c00c21d84b9b677eb1a0bd5c738
|
/man/export.Rd
|
ca50f542faa9607efeedba9970fecc0c433a81fe
|
[] |
no_license
|
lgeistlinger/EnrichmentBrowser
|
c9b072a07c9a70cbc940b36cb9973303c7b4ab84
|
81a7d679cec2aeca0feb8eec21cacaf00646e6fa
|
refs/heads/devel
| 2023-08-08T05:02:45.920640
| 2023-07-30T03:43:28
| 2023-07-30T03:43:28
| 102,148,735
| 21
| 14
| null | 2023-07-17T21:24:02
| 2017-09-01T20:00:56
|
R
|
UTF-8
|
R
| false
| true
| 637
|
rd
|
export.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export.R
\name{export}
\alias{export}
\title{Export results from set- and network-based enrichment analysis}
\usage{
export(res, to = c("enrichplot", "GOPlot"))
}
\arguments{
\item{res}{Enrichment analysis results as returned by \code{\link{sbea}}
or \code{\link{nbea}}.}
\item{to}{Character. Downstream package to which export enrichment analysis
results to. Defaults to \code{"enrichplot"}, currently the only supported
export option.}
}
\description{
This function exports results of differential expression (DE) analysis such
as enrichplot and GOPlot.
}
|
3a0530cd9991580207e28e608fdb36e84b72c204
|
25a907150cf6db44764d436cd9adb83c90f13554
|
/Code/NeuralNetCast01.2.R
|
053f7bd30922fa0be52928ff0b3b900e9eb0a751
|
[] |
no_license
|
jevanilla/NeuralNetCast
|
46ac63d290142e9e76118e38dfac3d6d66204893
|
e7468a777022619c124dbda6f2cbc15346f7d8de
|
refs/heads/master
| 2022-04-05T06:18:17.527232
| 2020-02-07T08:22:50
| 2020-02-07T08:22:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,838
|
r
|
NeuralNetCast01.2.R
|
# Functions for neural network based forecast of species
library(keras)
# TODO
# - Make data cleanup a function
# - Make index selection for train/test a function
# - choose based on year(s), area(s), or percent (random)
speciesLoad <- function(fname = '../Data/CWM_yearsub.csv')
{
data <- read.csv(fname)
return(data)
}
standardizeData <- function(data = NA) # Not working
{
# Standardizes each column to mean zero, standard dev 1
for (i in 1:dim(data)[2]) # normalize/standardize data by column
{
data[,i] <- (data[,i]-mean(data[,i]))/sd(data[,i])
return(data)
}
}
arrangeImages <- function(data = NA, times=NA, areas=NA,
labelcol = NA,
ntimes = 5)
{
# data is the input data table
# times is the time step associated with each row
# areas is the location associated with each row
# nyears is the number of time steps back to include in each image
# labelcol is the outcome label
# Standardizes each column to mean zero, standard dev 1
for (i in 1:dim(data)[2]) # normalize/standardize data by column
{
data[,i] <- (data[,i]-mean(data[,i],na.rm = T))/sd(data[,i],na.rm = T)
}
subareas <- unique(areas)
subtimes <- unique(times)
nspecies <- dim(data)[2]
images <- matrix(data=NA,
nrow=length(subareas)*(length(subtimes)-ntimes),
ncol=ntimes*nspecies)
label <- matrix(data=NA,
nrow=length(subareas)*(length(subtimes)-ntimes))
labelarea <- label
labeltime <- label
idx <- 0 # This index counts the images added
for (i in 1:length(subareas))
{
for (j in (ntimes+1):length(subtimes))
{
I <- which(areas==subareas[i] & times==subtimes[j])
if (length(I)>0)
{
J <- which(areas==subareas[i] & times<subtimes[j]
& times>=subtimes[j]-ntimes)
m <- as.vector(as.matrix(data[J,]))
if (length(m)==ntimes*nspecies)
{
idx <- idx + 1
images[idx,] <- m
label[idx] <- labelcol[I]
labelarea[idx] <- subareas[i]
labeltime[idx] <- subtimes[j]
}
}
}
}
output <- list(images = images, labels = label,
labelarea = labelarea,
labeltime = labeltime)
return(output)
}
arrangeImagesOld <- function(data = NA, times=NA, areas=NA,
labelcol = NA,
ntimes = 5)#, nquants = 5)
{
# -- NOT WORKING ---
# Standardizes each column to mean zero, standard dev 1
for (i in 1:dim(data)[2]) # normalize/standardize data by column
{
data[,i] <- (data[,i]-mean(data[,i]))/sd(data[,i])
}
subareas <- unique(areas)
subtimes <- unique(times)
nspecies <- dim(data)[2]
#qu <- quantile(labelcol,probs=seq(1/nquants,1,1/nquants))
images <- matrix(data=NA,
nrow=length(subareas)*length(min(subtimes):(max(subtimes)-ntimes)),
ncol=ntimes*nspecies)
label <- matrix(data=NA,
nrow=length(subareas)*length(min(subtimes):(max(subtimes)-ntimes)))
labelarea <- label
labeltime <- label
idx <- 0 # This index counts the images added
for (i in 1:length(subareas))
{
for (j in min(subtimes):(max(subtimes)-ntimes))
{
idx <- idx + 1
m <- matrix(data=NA, nrow=ntimes, ncol=nspecies)
for (k in 0:(ntimes-1))
{
m[k+1,]=as.double(
data[areas==subareas[i] & times==j+k,])
#print(as.double(data[areas==subareas[i] & times==j+k,]))
if (is.na(m[k+1,1])) # Use overall mean for missing values
{
m[k+1,]=as.double(colMeans(data[,]))
}
}
#print(dim(images))
images[idx,1:(ntimes*nspecies)] <- array_reshape(m,ntimes*nspecies)
#label[idx] <- max(1,sum(labelcol[areas==subareas[i] & times==j+k+1]<=qu))-1
print(c(subareas[i],i,j,k,idx))
print(as.double(labelcol[areas==subareas[i] & times==j+k]))
label[idx] <- as.double(labelcol[areas==subareas[i] & times==j+k])
labelarea[idx] <- as.character(areas[i])
labeltime[idx] <- j+k+1
}
}
output <- list(images = images, labels = label,
labelarea = labelarea,
labeltime = labeltime)
return(output)
}
# Run neural network forecast model
netForecast <- function(trainimages = NA, trainlabels = NA,
testimages = NA, testlabels = NA,
nepochs = 20, nunits=512)
{
trainlabels <- to_categorical(trainlabels)
testlabels <- to_categorical(testlabels)
network <- keras_model_sequential() %>%
layer_dense(units = nunits, activation = "relu",
input_shape = c(dim(trainimages)[2])) %>%
layer_dense(units = nunits, activation = "relu") %>%
layer_dense(units = dim(trainlabels)[2], activation = "softmax")
network %>% compile(
optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
history <- network %>% fit(trainimages,
trainlabels,
epochs = nepochs,
batch_size = 128)
metrics <- network %>% evaluate(testimages, testlabels)
return(network)
}
netPlot <- function(network = NA, testimages = NA, testlabels = NA)
{
testlabels <- to_categorical(testlabels)
predictions <- network %>% predict(testimages)
predicted <- matrix(NA,nrow=dim(predictions)[1])
measured <- matrix(NA,nrow=dim(predictions)[1])
for (i in 1:dim(predictions)[1])
{
predicted[i] <- which.max(predictions[i,])+runif(1)*.4-.4
measured[i] <- which.max(testlabels[i,])+runif(1)*.4-.4
}
plot(predicted,measured)
return(predictions)
#comparison <- list("predicted" = predicted, "measured" = measured)
#return(comparison)
}
|
8f39838459571612c461d9c06af2377aea852722
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rLDCP/examples/xml2rldcp.Rd.R
|
55c458280c7bbf27630f6e8e097545d1ba34b85d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 243
|
r
|
xml2rldcp.Rd.R
|
library(rLDCP)
### Name: xml2rldcp
### Title: XML to rLDCP
### Aliases: xml2rldcp
### ** Examples
## Not run: xml2rldcp('extdata/comfortableroom.xml','comfortableroom.R')
## The xml is valid
## The code has been generated successfully
|
c8871527aaa616da74f0a404b9c618defe5bc42b
|
9ff405bac37b4b84e6651ae628e3ccdf009ef50d
|
/install.R
|
32b013623791d66509a68cc10e145b33732ac298
|
[
"MIT"
] |
permissive
|
haochunchang/Bioinformatics_Course
|
819d94602aee3eae916e26348fbc6370628b5df3
|
c060c1cfe2ccc3c6f0adbe3e414b3c7ce8a5385c
|
refs/heads/master
| 2020-08-31T15:12:32.849074
| 2020-05-29T09:59:45
| 2020-05-29T09:59:45
| 218,719,280
| 0
| 3
|
MIT
| 2020-05-29T09:57:27
| 2019-10-31T08:32:03
|
HTML
|
UTF-8
|
R
| false
| false
| 268
|
r
|
install.R
|
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("limma")
BiocManager::install("edgeR")
install.packages("RColorBrewer")
install.packages("ggplot2")
install.packages("ggrepel")
install.packages("statmod")
|
840761c6611d73d38c5266c58004f900a87935d9
|
cd4c6064925c9fbb74a68b52ce8614a65926afef
|
/commit.R
|
3e5afd3320be8fcbaeb8902390de625c8c5e029d
|
[] |
no_license
|
nicholasdavies/newcovid
|
53cfe0b6bd70e01606c369a2fa11e16acedd4ece
|
204c35dd4b810f71c90472fbbd61068de1c4c1a4
|
refs/heads/master
| 2023-03-12T05:57:13.246461
| 2021-02-25T19:42:16
| 2021-02-25T19:42:16
| 323,409,362
| 27
| 4
| null | 2021-01-21T10:12:45
| 2020-12-21T17:52:11
|
R
|
UTF-8
|
R
| false
| false
| 9,353
|
r
|
commit.R
|
library(ggplot2)
library(data.table)
library(cowplot)
library(stringr)
# previous_data = existing
# new_data = rlang::duplicate(existing)
# new_data[between(date, "2020-07-01", "2020-07-06"), value := value + 5]
# new_data[between(date, "2020-07-01", "2020-07-06")]
# new_data = rbind(new_data, new_data[date == "2020-07-01", .(date = ymd("2020-07-15"), location, indicator, value, description)])
# new_data = new_data[date > "2020-03-15"]
#
# previous_data = rbind(previous_data, data.table(date = ymd("2020-05-05"), location = "Wales", indicator = "some_old_thing", value = 42, description = "Who cares?"))
# new_data = rbind(new_data, data.table(date = ymd("2020-05-05"), location = "East of England", indicator = "some_new_thing", value = 42, description = "Who knows?"))
# TODO
# 4. add age groups as well!?
commit = function(previous_data, new_data)
{
# Make previous and new data into data.tables
setDT(previous_data)
setDT(new_data)
# TODO check all columns expected and check no value is NA
# Enumerate all indicators in previous and new data
indicators = union(previous_data[, unique(indicator)], new_data[, unique(indicator)]);
# Create final data interactively, one indicator at a time
final_data = NULL;
for (ind in indicators) {
# Extract previous data for this indicator
previous_indicator = previous_data[indicator == ind];
setnames(previous_indicator, "value", "previous_value");
# Extract new data for this indicator
new_indicator = new_data[indicator == ind];
setnames(new_indicator, "value", "new_value");
# Merge data and mark type of change
merged_indicator = merge(previous_indicator, new_indicator, by = c("date", "location", "indicator", "description"), all = T);
merged_indicator[is.na(previous_value) & !is.na(new_value), change := "added"];
merged_indicator[!is.na(previous_value) & is.na(new_value), change := "removed"];
merged_indicator[!is.na(previous_value) & !is.na(new_value) & previous_value == new_value, change := "unchanged"];
merged_indicator[!is.na(previous_value) & !is.na(new_value) & previous_value != new_value, change := "updated"];
# Build plot for showing changes
plot = ggplot();
manual_colours = character(0);
if (merged_indicator[change == "added", .N] > 0) {
plot = plot +
geom_point(data = merged_indicator[change == "added"], aes(x = date, y = new_value, colour = "added", shape = "added"));
manual_colours = c(manual_colours, "#009900");
}
if (merged_indicator[change == "removed", .N] > 0) {
plot = plot +
geom_point(data = merged_indicator[change == "removed"], aes(x = date, y = previous_value, colour = "removed", shape = "removed"));
manual_colours = c(manual_colours, "#cc0000");
}
if (merged_indicator[change == "unchanged", .N] > 0) {
plot = plot +
geom_line (data = merged_indicator[change == "unchanged"], aes(x = date, y = new_value, colour = "unchanged")) +
geom_point(data = merged_indicator[change == "unchanged"], aes(x = date, y = new_value, colour = "unchanged", shape = "unchanged"), size = 0);
manual_colours = c(manual_colours, "#bbbbbb");
}
if (merged_indicator[change == "updated", .N] > 0) {
plot = plot +
geom_point(data = merged_indicator[change == "updated"], aes(x = date, y = previous_value, colour = "updated (old)", shape = "updated (old)"), size = 1) +
geom_point(data = merged_indicator[change == "updated"], aes(x = date, y = new_value, colour = "updated (new)", shape = "updated (new)"), size = 0.5);
manual_colours = c(manual_colours, "#6666ff", "#6666ff");
}
plot = plot +
facet_wrap(~location, scales = "free_x") +
scale_x_date(limits = merged_indicator[, c(min(date), max(date))], date_labels = "%b", date_breaks = "1 month") +
labs(x = "date", y = ind, title = ind, subtitle = str_wrap(merged_indicator[, description[1]])) +
theme_cowplot(font_size = 8) +
theme(legend.position = c(0.7, 0.15), strip.background = element_blank()) +
scale_colour_manual(values = c(added = "#009900", removed = "#cc0000", unchanged = "#bbbbbb", "updated (old)" = "#6666ff", "updated (new)" = "#6666ff"), guide = "none") +
scale_shape_manual(values = c(added = 3, removed = 4, unchanged = 15, "updated (old)" = 1, "updated (new)" = 16),
guide = guide_legend(title = NULL, override.aes = list(colour = manual_colours, size = 1)))
# Show plot and prompt user
print(plot);
cat(paste0("\n", "Indicator: ", ind, "\n", merged_indicator[, description[1]], "\n"));
print(merged_indicator[, table(changes = change)]);
a = readline(prompt = "Commit changes for this indicator? (y/n/u/a) ");
if (a %in% c("Y", "y")) {
setnames(new_indicator, "new_value", "value");
final_data = rbind(final_data, new_indicator);
} else if (a %in% c("U", "u")) {
merged_indicator[, value := ifelse(is.na(new_value), previous_value, new_value)];
final_data = rbind(final_data, merged_indicator[, .(date, location, indicator, value, description)]);
} else if (a %in% c("A", "a")) {
merged_indicator[, value := ifelse(is.na(previous_value), new_value, previous_value)];
final_data = rbind(final_data, merged_indicator[, .(date, location, indicator, value, description)]);
} else {
setnames(previous_indicator, "previous_value", "value");
final_data = rbind(final_data, previous_indicator);
}
}
return (final_data)
}
most_recent = function(path, tag, format_converter)
{
files = list.files(path, tag);
files = files[!files %like% "^~\\$"]; # Exclude "owner lock" files created by MS Office
dates = format_converter(str_extract(str_replace_all(files, "[ -]", ""), "[0-9]{6,12}"));
return (file.path(path, files[which.max(dates)]))
}
read_normal = function(path, sheet)
{
if (str_sub(path, -4) %like% "xls") {
return (data.table(read_excel(path, sheet)));
} else {
return (fread(path))
}
}
read_2_row_header = function(path, sheet)
{
# Read header, abbreviating names
header = data.table(read_excel(path, sheet, n_max = 1));
empty = which(names(header) %like% "\\.\\.\\.[0-9]+");
names(header)[empty] = names(header)[empty - 1];
append = unname(unlist(header[1]));
names(header) = paste0(names(header), ifelse(is.na(append), "", paste0(" ", append)));
names(header) = make.unique(abbreviate(str_replace_all(names(header), "[^.0-9A-Za-z]", "")));
# Read data
data = data.table(read_excel(path, sheet, skip = 2, col_names = FALSE));
names(data) = names(header);
return (data)
}
read_horiz_date = function(path, sheet, range, variable.name, value.name)
{
# Read and tame data
data = t(data.table(read_excel(path, sheet, range, col_names = FALSE)));
data = matrix(str_trim(c(data)), nrow = nrow(data), ncol = ncol(data));
# Cut out summary/note rows at right side of data
rows_keep = tail(data[, 1], -1) == as.numeric(head(data[, 1], -1)) + 1;
rows_keep = sum(rows_keep, na.rm = TRUE) + 1;
data = data[1:(rows_keep + 1), ];
# Reread data as data.table and fix date column
data[1, 1] = "date";
data_string = paste(apply(data, 1, function(v) paste(v, collapse = "\t")), collapse = "\n");
dt = fread(data_string);
dt[, date := ymd("1900-01-01") + date - 2];
dt = melt(dt, id.vars = "date");
dt[, value := as.numeric(value)];
setnames(dt, c("variable", "value"), c(variable.name, value.name));
return (dt)
}
read_wales_bulk_export = function(path)
{
files = list.files(path, "BulkExport|bulk_export");
exports = list();
for (f in seq_along(files)) {
exports[[f]] = fread(file.path(path, files[f]));
}
exports = rbindlist(exports);
exports = exports[!duplicated(exports, by = 1:7)];
exports = exports[order(UpdateDateTime, HealthBoard, Hospital, Dataset, Section, Question, Measure)];
exports[, date := as.Date(ymd_hms(UpdateDateTime))];
return (exports)
}
melt_annotate = function(data)
{
melted = melt(data, id.vars = c("date", "location"), variable.name = "indicator", value.name = "value");
melted[indicator == "death_inc_line", description := "All deaths (by date of death)"];
melted[indicator == "hospital_inc", description := "New and newly confirmed patients in hospital"];
melted[indicator == "hospital_prev", description := "Total beds occupied"];
melted[indicator == "icu_prev", description := "ICU beds occupied"];
return (melted)
}
blank_fill = function(data, date_col, fill_value)
{
date_min = data[, min(get(date_col), na.rm = T)];
date_max = data[, max(get(date_col), na.rm = T)];
new_data = data.table(date000 = seq(date_min, date_max, by = "1 day"));
setnames(new_data, "date000", date_col);
new_data = merge(new_data, data, by = date_col, all = T)
new_data[is.na(new_data)] = fill_value;
return (new_data)
}
|
0c54657b0b0372d27b12dd0b55d0c58355b41519
|
d97ac05c04ac282164943b6f2ad1202a96f4f835
|
/sims/2_timeseries/2_timeseries.R
|
6294699526848af21ec14d3260734bb547b6ace6
|
[] |
no_license
|
TomBearpark/ECO518_code
|
d81bbba0bd674b7662ec544e27a96b9d2b1bc6f4
|
6a998f58d63160dace441cca159a406edbf264fd
|
refs/heads/main
| 2023-04-14T16:33:03.008259
| 2021-04-29T19:32:32
| 2021-04-29T19:32:32
| 335,358,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,521
|
r
|
2_timeseries.R
|
# Code for PSET 2
# Creates visualizations of the time series and ACFs for each of the
# models.
############################################
# 0 Set up
############################################
rm(list = ls())
pacman::p_load(purrr, dplyr, ggplot2, xtable)
theme_set(theme_minimal())
out <- paste0(
"/Users/tombearpark/Documents/princeton/1st_year/term2/",
"ECO518_Metrics2/sims/exercises/2_AR_ts/"
)
set.seed(1)
# Function for saving a time series and acf for each model
create_plots <- function(df, problem) {
ggplot(df) +
geom_line(aes(x = t, y = y)) +
geom_hline(
yintercept = mean(df$y, na.rm = TRUE),
color = "red", alpha = 0.3
)
ggsave(paste0(out, "p", problem, "_ts.png"), height = 4, width = 5)
png(paste0(out, "p", problem, "_acf.png"))
acf(df$y[!is.na(df$y)], main = "ACF")
dev.off()
}
############################################
# Problem 2
############################################
# Visualise the model
N <- 100
df2 <-
tibble(epsilon = rnorm(N, mean = 0, sd = 1)) %>%
mutate(
t = row_number(),
y = epsilon + 2 * dplyr::lag(epsilon) + 0.64 * dplyr::lag(epsilon, 2)
)
# Plot
create_plots(df2, 2)
# Function to calculate the coefficient in the lag
# polynomial
gamma <- function(n, b1, b2){
coef <- 0
for (i in 0:n){
coef <- coef + (b1)^(n - i)*(b2)^(i)
}
coef
}
# Function to calculate prediction variance, see overleaf for details
prediction_variance <- function(N, ACF, b){
ACF <- append(ACF, rep(0, N))
var <- ACF[1]
for (n in 1:N) {
var <- var + 2 * gamma(n, b[1], b[2]) * ACF[n + 1]
for (m in 1:N){
var <- var +
gamma(n, b[1], b[2]) * gamma(m, b[1], b[2]) * ACF[abs(n - m ) + 1]
}
}
data.frame(N = N, var = var)
}
# Calculate the variance of each estimator, plot as a function of N
b <- c(-0.4, -0.625)
ACF <- c(5.4096, 3.28, 0.64)
plot_df2 <- map_dfr(seq(1,10), prediction_variance, ACF = ACF, b = b)
ggplot(plot_df2) +
geom_point(aes(x = N, y = var)) +
geom_hline(yintercept = 3.2, color = "red") + ylab("Forecast error variance")
ggsave(paste0(out, "p", 2, "_prediction_variance.png"), height = 4, width = 5)
# Print out the coefficients of the selected model to copy into overleaf
for (i in 1:3) print(gamma(i, b[1], b[2]))
# check the results of the prediction - get predicted values based on our
# model
get_prediction <- function(order, df){
df[paste0('pred_N')] <- 0
for (i in 1:order){
df[paste0('pred_N')] <- df[paste0('pred_N')] -
gamma(i, b[1], b[2]) * dplyr::lag(df$y, i)
}
data.frame(order = order, pred = df[paste0('pred_N')], y = df$y) %>%
mutate(error = pred_N - y)
}
# Scatter predicted vs actual values for differing lag lengths
plot_df2 <- map_dfr(1:10, get_prediction, df = df2)
ggplot(plot_df2 %>% filter(order < 5)) +
geom_point(aes(x = y, y = pred_N)) +
geom_smooth(aes(x = y, y = pred_N), alpha = 0.1)+
facet_wrap(~order)
ggsave(paste0(out, "p", 2, "_empirical_prediction_scatter.png"),
height = 4, width = 5)
# Make a table of variances to copy into overleaf
plot_df2 %>%
group_by(order) %>%
summarise(var = sd(error, na.rm = TRUE)^2) %>%
xtable()
############################################
# Problem 3
############################################
# Generate time series from the model
N <- 100
df3 <-
tibble(epsilon = rnorm(N, mean = 0, sd = 1)) %>%
mutate(
t = row_number(),
y = epsilon + 1.5 * lag(epsilon) + 0.5 * lag(epsilon, 2)
)
# Plot
create_plots(df3, 3)
# Function for getting the coefficient on the lag
gammaK <- function(n, K, b1){
coef <- 0
for (i in 0:n){
coef <- coef + (-(1 - n/K))^(n - i)*(b1)^(i)
}
coef
}
# Find the variance for a given model
prediction_variance2 <- function(N, ACF, b){
ACF <- append(ACF, rep(0, N))
var <- ACF[1]
for (n in 1:N) {
var <- var + 2 * gammaK(n, K = N, b) * ACF[n + 1]
for (m in 1:N){
var <- var +
gammaK(n, K = N, b) * gammaK(m, K = N, b) * ACF[abs(n - m) + 1]
}
}
data.frame(N = N, var = var)
}
# PLot prediction variance as a funciton of N
ACF <- c(3.5, 2.25, 0.5)
b <- -0.5
plot_df3 <- map_dfr(seq(1,30), prediction_variance2, ACF = ACF, b = b)
ggplot(plot_df3) +
geom_point(aes(x = N, y = var)) +
geom_hline(yintercept = 1.25, color = "red")
ggsave(paste0(out, "p", 3, "_prediction_variance.png"),
height = 4, width = 5)
# print coefficients to copy into latex
l <- ""
for(n in 1:12)
l <- paste0(l, " + ",round(gammaK(n, 12, -0.5), 4), "y_{t-", n, "}")
l
|
36fa971f1b0f80214a6a25111517eb4666d3bfd8
|
da02903fddc6257c36b39bd6ebc40bf282d1ea7f
|
/man/tv2taxlist.Rd
|
5d658848e3cadadae3885b1d4d81e29a09771fe4
|
[] |
no_license
|
heibl/taxlist
|
df8115f197e06b71fc2abe9b681541bcc34b10f3
|
0c1a054d0cb80915e20ad73bb6feef7139e3e814
|
refs/heads/master
| 2020-03-29T06:47:04.138313
| 2018-07-09T20:33:35
| 2018-07-09T20:33:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
rd
|
tv2taxlist.Rd
|
\name{tv2taxlist}
\alias{tv2taxlist}
\title{Import species lists from Turboveg databases.}
\description{
Importing species lists from Turboveg
\url{https://www.synbiosys.alterra.nl/turboveg/} databases into an object of
class \code{\linkS4class{taxlist}}.
}
\usage{
tv2taxlist(taxlist, tv_home=tv.home())
}
\arguments{
\item{taxlist}{The name of a species list in Turboveg as character value.}
\item{tv_home}{Character value indicating the path to the main Turboveg folder.}
}
\details{
This function imports species lists using the function
\code{\link[foreign]{read.dbf}}.
When available, also taxon traits will be imported into the output object
(usually the file \code{'ecodbase.dbf'}).
During import of taxon traits, duplicated entries for a same concept will
be discarded as well as entries for non-existing concepts.
By default \code{tv_home} will be set by the function
\code{\link[vegdata]{tv.home}} from the package
\code{\link[=vegdata-package]{vegata}}.
By default, the name of the database will be set as concept view for all
concepts included in the species list.
If this is not correct, consider setting it manually by using the functions
\code{\link{taxon_views}} and \code{\link{add_view}}.
}
\value{
An object of class \code{\linkS4class{taxlist}}.
}
\author{
Miguel Alvarez, \email{kamapu78@gmail.com}
}
\seealso{
\code{\linkS4class{taxlist}}.
}
\examples{
library(taxlist)
## Cyperus data set installed as Turboveg species list
Cyperus <- tv2taxlist("cyperus", file.path(path.package("taxlist"), "tv_data"))
summary(Cyperus)
}
|
8d48edc8b1fdef8288edd149561c81b0c432b3ec
|
bbd072b9e809ae264edff70af0c8ece04ffa8ba2
|
/Assigment-3/7.R
|
eddd45e32965242fca60813f2f728f28ff126c66
|
[] |
no_license
|
GislaineCurty/Brasil_2019
|
ef402323cd02aa092ebf7001a6419018835da15b
|
80e15aeab2ea0cc3428f6ff698955a20642b0935
|
refs/heads/master
| 2020-07-07T00:35:10.283612
| 2019-08-21T20:00:02
| 2019-08-21T20:00:02
| 203,187,842
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 75
|
r
|
7.R
|
x=c(rep(1,10), rep(2,10),rep(3,10),rep(4,10),rep(5,10))
x
print(length(y))
|
c8ef6e527f8057dba86bd0841e1d91a0d5a626e9
|
f696d5a4aeccc4e4a9c25824c511bd80c481ba42
|
/Training system (induce models)/00_TRAINING_FROM_FULL_DATASET.R
|
15b65ac4852ea956e16947162786524508c2b0a7
|
[] |
no_license
|
ursusdm/predictingHourlySolarRadiation
|
fd1a13a93418f58f1f752ec34f9e466c909a5cc5
|
5978d460f70703544ee4ff8492c81666b51c24b6
|
refs/heads/master
| 2022-12-17T21:10:26.551012
| 2020-09-17T16:47:32
| 2020-09-17T16:47:32
| 296,289,858
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 237
|
r
|
00_TRAINING_FROM_FULL_DATASET.R
|
source("./01_cleaning_demo_dataset.R")
source("./02_inducing_RF_for_regression.R")
source("./03_creating_dataset_for_classification.R")
source("./04_inducing_LMT_for_classification.R")
source("./05_creating_dataset_with_type_of_days.R")
|
4eead31580c4cf8b3ec5dd4e3caed905be66c380
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.machine.learning/man/lexruntimeservice_post_text.Rd
|
1d7ec8372906ee563d51f06ebc16d8adb1c030d7
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 7,078
|
rd
|
lexruntimeservice_post_text.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexruntimeservice_operations.R
\name{lexruntimeservice_post_text}
\alias{lexruntimeservice_post_text}
\title{Sends user input to Amazon Lex}
\usage{
lexruntimeservice_post_text(botName, botAlias, userId,
sessionAttributes, requestAttributes, inputText, activeContexts)
}
\arguments{
\item{botName}{[required] The name of the Amazon Lex bot.}
\item{botAlias}{[required] The alias of the Amazon Lex bot.}
\item{userId}{[required] The ID of the client application user. Amazon Lex uses this to identify
a user's conversation with your bot. At runtime, each request must
contain the \code{userID} field.
To decide the user ID to use for your application, consider the
following factors.
\itemize{
\item The \code{userID} field must not contain any personally identifiable
information of the user, for example, name, personal identification
numbers, or other end user personal information.
\item If you want a user to start a conversation on one device and
continue on another device, use a user-specific identifier.
\item If you want the same user to be able to have two independent
conversations on two different devices, choose a device-specific
identifier.
\item A user can't have two independent conversations with two different
versions of the same bot. For example, a user can't have a
conversation with the PROD and BETA versions of the same bot. If you
anticipate that a user will need to have conversation with two
different versions, for example, while testing, include the bot
alias in the user ID to separate the two conversations.
}}
\item{sessionAttributes}{Application-specific information passed between Amazon Lex and a client
application.
For more information, see \href{https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-session-attribs}{Setting Session Attributes}.}
\item{requestAttributes}{Request-specific information passed between Amazon Lex and a client
application.
The namespace \verb{x-amz-lex:} is reserved for special attributes. Don't
create any request attributes with the prefix \verb{x-amz-lex:}.
For more information, see \href{https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-request-attribs}{Setting Request Attributes}.}
\item{inputText}{[required] The text that the user entered (Amazon Lex interprets this text).}
\item{activeContexts}{A list of contexts active for the request. A context can be activated
when a previous intent is fulfilled, or by including the context in the
request,
If you don't specify a list of contexts, Amazon Lex will use the current
list of contexts for the session. If you specify an empty list, all
contexts for the session are cleared.}
}
\value{
A list with the following syntax:\preformatted{list(
intentName = "string",
nluIntentConfidence = list(
score = 123.0
),
alternativeIntents = list(
list(
intentName = "string",
nluIntentConfidence = list(
score = 123.0
),
slots = list(
"string"
)
)
),
slots = list(
"string"
),
sessionAttributes = list(
"string"
),
message = "string",
sentimentResponse = list(
sentimentLabel = "string",
sentimentScore = "string"
),
messageFormat = "PlainText"|"CustomPayload"|"SSML"|"Composite",
dialogState = "ElicitIntent"|"ConfirmIntent"|"ElicitSlot"|"Fulfilled"|"ReadyForFulfillment"|"Failed",
slotToElicit = "string",
responseCard = list(
version = "string",
contentType = "application/vnd.amazonaws.card.generic",
genericAttachments = list(
list(
title = "string",
subTitle = "string",
attachmentLinkUrl = "string",
imageUrl = "string",
buttons = list(
list(
text = "string",
value = "string"
)
)
)
)
),
sessionId = "string",
botVersion = "string",
activeContexts = list(
list(
name = "string",
timeToLive = list(
timeToLiveInSeconds = 123,
turnsToLive = 123
),
parameters = list(
"string"
)
)
)
)
}
}
\description{
Sends user input to Amazon Lex. Client applications can use this API to
send requests to Amazon Lex at runtime. Amazon Lex then interprets the
user input using the machine learning model it built for the bot.
In response, Amazon Lex returns the next \code{message} to convey to the user
an optional \code{responseCard} to display. Consider the following example
messages:
\itemize{
\item For a user input "I would like a pizza", Amazon Lex might return a
response with a message eliciting slot data (for example,
PizzaSize): "What size pizza would you like?"
\item After the user provides all of the pizza order information, Amazon
Lex might return a response with a message to obtain user
confirmation "Proceed with the pizza order?".
\item After the user replies to a confirmation prompt with a "yes", Amazon
Lex might return a conclusion statement: "Thank you, your cheese
pizza has been ordered.".
}
Not all Amazon Lex messages require a user response. For example, a
conclusion statement does not require a response. Some messages require
only a "yes" or "no" user response. In addition to the \code{message}, Amazon
Lex provides additional context about the message in the response that
you might use to enhance client behavior, for example, to display the
appropriate client user interface. These are the \code{slotToElicit},
\code{dialogState}, \code{intentName}, and \code{slots} fields in the response.
Consider the following examples:
\itemize{
\item If the message is to elicit slot data, Amazon Lex returns the
following context information:
\itemize{
\item \code{dialogState} set to ElicitSlot
\item \code{intentName} set to the intent name in the current context
\item \code{slotToElicit} set to the slot name for which the \code{message} is
eliciting information
\item \code{slots} set to a map of slots, configured for the intent, with
currently known values
}
\item If the message is a confirmation prompt, the \code{dialogState} is set to
ConfirmIntent and \code{SlotToElicit} is set to null.
\item If the message is a clarification prompt (configured for the intent)
that indicates that user intent is not understood, the \code{dialogState}
is set to ElicitIntent and \code{slotToElicit} is set to null.
}
In addition, Amazon Lex also returns your application-specific
\code{sessionAttributes}. For more information, see \href{https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html}{Managing Conversation Context}.
}
\section{Request syntax}{
\preformatted{svc$post_text(
botName = "string",
botAlias = "string",
userId = "string",
sessionAttributes = list(
"string"
),
requestAttributes = list(
"string"
),
inputText = "string",
activeContexts = list(
list(
name = "string",
timeToLive = list(
timeToLiveInSeconds = 123,
turnsToLive = 123
),
parameters = list(
"string"
)
)
)
)
}
}
\keyword{internal}
|
414a3cd5c1613202e35e0fd19ec01fe8aa03f1cb
|
31d601468b54cb3e5ee0d91718c26a1ca23cefc0
|
/latex/21-bootstrap/normal-rejection.R
|
8d235c25e719090e342b1f61a74973129f276e85
|
[] |
no_license
|
echalkpad/sungsoo.github.com
|
0dd8adb60516ce2faec77dde6de250e4d0935228
|
9c47302f0e122ef74b079bde4a4d90faaa861793
|
refs/heads/master
| 2023-03-17T12:38:09.129987
| 2015-09-16T07:34:00
| 2015-09-16T07:34:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 558
|
r
|
normal-rejection.R
|
# generating N(0,1) by rejection method
# using exponential(beta=1) as the majorizing density
t0=Sys.time()
one.X=function()
{
n=1
c=(2/pi)^.5*exp(.5)
Y=-log(runif(1))
U=runif(1)
while (U*c*exp(-Y) > (2/pi)^.5*exp(-.5*Y^2))
{
Y=-log(runif(1))
U=runif(1)
n=n+1
}
Y=Y*(2*rbinom(1,1,.5)-1)
return(c(Y,n))
}
################
set.seed(1235)
n.rep=100000
iid.sample=replicate(n.rep, one.X())
plot(density(iid.sample[1,]))
mean(iid.sample[2,])
x=seq(-4,4,.01)
lines(x,dnorm(x),col="red")
Sys.time()-t0
|
d2ec3ba35c10efd144e2210dcf18e928d6dfd8c2
|
98adcc451dd266a13fe2ca6a49142010062cfd3e
|
/simple_code/outputdata.R
|
b65bc5beda5edebc0812ac6a61b5c240ee047b42
|
[] |
no_license
|
3mofstudy/R
|
11dde775e05d78783478111483d83ae0db48d5ac
|
69e51c6ad4b13337ac937cf4ccce4a9f24eeeb5f
|
refs/heads/master
| 2021-10-26T18:08:04.969050
| 2021-10-07T06:05:18
| 2021-10-07T06:05:18
| 245,588,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 626
|
r
|
outputdata.R
|
# 可匯出 資料矩陣或資料框
write.table(x = Kidney1, # 資料矩陣或資料框
file ="D:/Kidney1.txt", # 儲存位置
quote = TRUE, # 文字兩邊加" "
sep =",", # 分隔符號
row.names = FALSE, # 是否輸出行名
col.names = TRUE, # 是否輸出行名
eol = "\r\n", # 結尾特殊符號 Unix/Windows/Mac = "\n" ,"\r\n", "\r"
na = ".", # 遺漏值設定
append = FALSE) # True表示可以接續在原先存在的檔案尾端
|
c97fa055c51caf1b8adac91667e59c85b85be6e7
|
19fb0ebe587de3dff7dfc097706c8cf5de3b5ff3
|
/flashdrive/afternoon/wrtds.R
|
9a84565d52e98a6f4b90847c4784889fce63dbea
|
[] |
no_license
|
fawda123/swmp_workshop_2016
|
8de326e856cd1e8a1793c5b874e9282acfc0341e
|
985917d2563ddf7bab0c29adafb6c01166b41a29
|
refs/heads/master
| 2020-06-23T17:20:21.224440
| 2017-01-17T17:09:49
| 2017-01-17T17:09:49
| 66,284,723
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 953
|
r
|
wrtds.R
|
## TS topics 1: WRTDS
# setwd('mypath')
# load SWMPr, nutrient data
library(SWMPr)
load(file = 'noczbnut.RData')
# rename, qaqc clean up, subset
nut <- noczbnut
nut <- qaqc(nut, qaqc_keep = c(0, 4))
nut <- subset(nut, select = 'chla_n')
head(nut)
# load wq data
load(file = 'noczbwq.RData')
# rename, qaqc clean up, subset
wq <- noczbwq
wq <- qaqc(wq, qaqc_keep = c(0, 4))
wq <- subset(wq, select = 'sal')
head(wq)
# combine at weekly time step
tomod <- comb(nut, wq, timestep = 'weeks')
# plot both
overplot(tomod, type = c('p', 'l'))
library(WRTDStidal)
# add arbitrary limit column, datetimestamp as date
tomod$lim <- -1e6
tomod$datetimestamp <- as.Date(tomod$datetimestamp)
# create tidalmean object, note if response is in log or not
tomod <- tidalmean(tomod, reslog = FALSE)
head(tomod)
# use modfit function
mod <- modfit(tomod)
wrtdsperf(mod)
fitplot(mod)
prdnrmplot(mod)
gridplot(mod, logspace = F, month = 'all', floscl = F)
|
d1ac8273383adf4e9a465e9cb6cb1544e9595f1b
|
71ac65903672ab795f4ccc5eefd322a68a634615
|
/man/splt_fsmi.Rd
|
3e762ade4a83bbc4dca436c0f9edac4ffd3b4b8a
|
[] |
no_license
|
jflournoy/probly
|
754b2e213982af304736aac89d785331957f6534
|
a3398f5ba56846dc7be31428aa882ed1c68634b7
|
refs/heads/master
| 2021-08-06T05:18:44.420012
| 2021-02-09T20:05:01
| 2021-02-09T20:05:01
| 125,664,436
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 608
|
rd
|
splt_fsmi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{splt_fsmi}
\alias{splt_fsmi}
\title{Fundamental Social Motives Inventory Data}
\format{A data frame with M rows and N variables: \describe{
\item{blah}{blah} }}
\usage{
splt_fsmi
}
\description{
College sample only. Questionnaire source is:
}
\details{
Neel, R., Kenrick, D. T., White, A. E., & Neuberg, S. L. (2015). Individual
Differences in Fundamental Social Motives. Journal of Personality and Social
Psychology, No Pagination Specified. https://doi.org/10.1037/pspp0000068
}
\keyword{datasets}
|
ca343aa3bd5214fac8ba55711d14e67ef4399b74
|
781f542f9b84c87af5913de75f884dc397c5beca
|
/moshefiles/rjags_model-moshe.R
|
9cfebc4a207119ef53fb43e79a5c0a2e8369c63e
|
[] |
no_license
|
hezibu/invasionrate
|
827cf642418aae27f9b114a1be03b29fddbe5447
|
8c08061f48da59decb60185a8c7ce4df07fce962
|
refs/heads/master
| 2021-10-10T16:06:58.038104
| 2021-09-28T14:10:37
| 2021-09-28T14:10:37
| 173,117,997
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,444
|
r
|
rjags_model-moshe.R
|
setwd("C:/Users/mkiflawi/Dropbox/Ideas/Belmaker/")
library(rjags)
modelString="#open quote
model {
for ( i in 1:N ) {
logit_P[i]=a+b*t[i]
P[i]=1/(1+exp(-logit_P[i])) # <---------------logistic
#P[i]=exp(-a*exp(-b*t[i])) # <---------------Gompertz
#P[i]=1-exp(-(a*t[i]^b)) # <---------------Weibull
dI[i] ~ dbin( P[i], dsps[i] ) # p first, then n
res[i] = dI[i] - P[i]*dsps[i] # residual, pr*dsps is the mean
dI.new[i] ~ dbin( P[i], dsps[i] )
res.new[i] = dI.new[i] - P[i]*dsps[i]
}
fit <- sum(res[])
fit.new <- sum(res.new[])
a ~ dunif(-10,10)
b ~ dnorm(0, 1) #dunif(-1, 1)
}
" # close quote for modelString
writeLines( modelString , con="lessep_model_pr.txt" ) # write to file
jags.params <- c("a", "b", "P", 'fit','fit.new')
#initial values for logistic
jags.inits <- function(){
list("a"=runif(1, -10, 10), "b"=rnorm(1, 0, 1))
}
#initial values for Gompertz
jags.inits <- function(){
list("a"=runif(1, 1, 10), "b"=runif(1, 0.01, 0.1))
}
#initial values for weibull - need to play with these values
jags.inits <- function(){
list("a"=runif(1, 0, 0.1), "b"=runif(1, 0.1, 10))
}
###sim.discovery.data=sim.record(M, b0, b1)
jags.data=list(
t=sim.discovery.data$t,
N=dim(sim.discovery.data)[1], #number of observations
dI=as.integer(sim.discovery.data$n.Inv_t), # Invasives added on t
dsps=as.integer(sim.discovery.data$n.sps_t) # Overall new species sampled on year t
)
#run using rjags
jagsModel = jags.model( file="lessep_model_pr.txt" , data=jags.data , inits=jags.inits, n.chains=3) # may need to run several times to initiate
update(jagsModel, n.iter=5000)
samples <- coda.samples(jagsModel,
variable.names=c("a","b", "P", 'fit','fit.new'),
n.iter=5000)
summary(samples)
dic.samples(jagsModel, n.iter=1000)
#compare the maximum-likelihood binomial probabilities and those estimated in rjags
sim.discovery.data$P=as.numeric(summary(samples)$quantiles[1:dim(sim.discovery.data)[1],3]) #the median (50th percentile) of P
plot(P~t, sim.discovery.data, ylim=c(0,1))
plot(P~p_max.LL, sim.discovery.data, ylim=c(0,1))
abline(0,1)
sim.discovery.data$exp_I.tot=sim.discovery.data$n.Inv+(sim.discovery.data$P/(1-sim.discovery.data$P))*(M-sim.discovery.data$n.Nativ)
plot(n.Nativ~t, sim.discovery.data, col='red', ylim=c(0,M+20))
points(n.Inv~t, sim.discovery.data, col='blue')
lines(I.tot~t, sim.discovery.data)
lines(exp_I.tot~t, sim.discovery.data, col='green')
abline(h=M)
#model fitting the 'actual' (simulation) I.tot <---- need to think of a better function than the log-log
lm.res.sim=lm(log(I.tot+0.5)~log(t), sim.discovery.data)
summary(lm.res.sim)
#model fitting the 'extimated' (max. likelihood) I.tot
lm.res=lm(log(exp_I.tot+0.5)~log(t), sim.discovery.data, weights=t^0.5) #<----note the weights
summary(lm.res)
plot(log(I.tot+0.5)~log(t), sim.discovery.data, ylim=c(0,8))
abline(lm.res.sim)
points(log(exp_I.tot+0.5)~log(t), sim.discovery.data, col='red')
abline(lm.res, col='red')
library(MCMCvis)
MCMCsummary(samples, params = 'a', n.eff = TRUE, digits = 2) #2.5% to 97.5% gives the 95% credible interval
MCMCsummary(samples, params = 'b', n.eff = TRUE, digits = 2)
MCMCtrace(samples,
params = c('a', 'b'),
ind = T, #individual chains
ISB = FALSE,
pdf = FALSE,
Rhat = TRUE,
n.eff = TRUE)
MCMCplot(samples, params = 'P')
MCMCsummary(samples, round = 2)
|
f5debeccaf5d58f4677d3fc7e50512c186d4d7af
|
0dd7ba5c65f37a4674f6c5f57620af3cb4a28e81
|
/apps/shinydisp2/server.R
|
a08b678ee48dd294b441616b7acdb53944db923a
|
[] |
no_license
|
uwban/cvapps
|
2a7d86096c579392b47fb2e57270a7bad4fcb17d
|
efdf0f702c1ee53ccb7db4d4a1a30d13f28cc939
|
refs/heads/master
| 2020-04-16T16:15:43.372554
| 2018-07-05T19:13:34
| 2018-07-05T19:13:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,980
|
r
|
server.R
|
server <- function(input, output, session) {
# Relabel rxns dropdown menu based on selected drug
observeEvent(input$search_drug, {
hlt_choices <- drug_PT_HLT
pt_choices <- drug_PT_HLT
if (all("" != input$search_drug)) {
if (length(input$search_drug) == 1) {
hlt_choices %<>% filter(ing == input$search_drug)
pt_choices %<>% filter(ing == input$search_drug)
} else {
hlt_choices %<>% filter(ing %in% c(input$search_drug))
pt_choices %<>% filter(ing %in% c(input$search_drug))
}
}
soc_choices <- pt_choices %>% distinct()
hlt_choices %<>% distinct(HLT_NAME_ENG) %>% as.data.frame() %>% `[[`(1) %>% sort()
pt_choices %<>% distinct(PT_NAME_ENG) %>% as.data.frame() %>% `[[`(1) %>% sort()
updateSelectizeInput(session, "search_hlt",
choices = c("Start typing to search..." = "", hlt_choices))
updateSelectizeInput(session, "search_pt",
choices = c("Start typing to search..." = "", pt_choices))
})
observeEvent(c(input$search_button,input$search_hlt),{
if(is.null(input$search_pt)){
updateTabsetPanel(session, "tabbox",selected ="panel2")
}else {
updateTabsetPanel(session, "tabbox",selected ="panel1")
}
})
observeEvent(input$search_hlt, {
if (input$checkbox_filter_pt) {
pt_choices <- drug_PT_HLT
if (all("" != input$search_hlt) & !is.null(input$search_hlt)) {
if (length(input$search_hlt) == 1) {
pt_choices %<>% filter(HLT_NAME_ENG == input$search_hlt)
} else {
pt_choices %<>% filter(HLT_NAME_ENG %in% c(input$search_hlt))
}
}
if (all("" != input$search_drug) & !is.null(input$search_drug)) {
if (length(input$search_drug) == 1) {
pt_choices %<>% filter(ing == input$search_drug)
} else {
pt_choices %<>% filter(ing %in% c(input$search_drug))
}
}
pt_choices %<>% distinct(PT_NAME_ENG) %>% as.data.frame() %>% `[[`(1) %>% sort()
updateSelectizeInput(session, "search_pt",
choices = c("Start typing to search..." = "", pt_choices))
}}, ignoreNULL = FALSE)
observeEvent(input$checkbox_filter_pt, {
pt_choices <- drug_PT_HLT
print(input$search_drug)
if (input$checkbox_filter_pt) {
if (all("" != input$search_hlt) & !is.null(input$search_hlt)) {
if (length(input$search_hlt) == 1) {
pt_choices %<>% filter(HLT_NAME_ENG == input$search_hlt)
} else {
pt_choices %<>% filter(HLT_NAME_ENG %in% c(input$search_hlt))
}
}
}
if (all("" != input$search_drug) & !is.null(input$search_drug)) {
if (length(input$search_drug) == 1) {
pt_choices %<>% filter(ing == input$search_drug)
} else {
pt_choices %<>% filter(ing %in% c(input$search_drug))
}
}
pt_choices %<>% distinct(PT_NAME_ENG) %>% as.data.frame() %>% `[[`(1) %>% sort()
updateSelectizeInput(session, "search_pt",
choices = c("Start typing to search..." = "", pt_choices))
})
########## Reactive data processing
# Data structure to store current query info
current_search <- reactive({
input$search_button
isolate({
withProgress(message = 'Calculation in progress', value = 0, {
min_count <- as.numeric(input$min_count)
if (is.na(min_count) | min_count < 0) min_count = 0
min_count <- floor(min_count)
updateTextInput(session, "min_count", value = min_count)
incProgress(1/3)
min_exp <- as.numeric(input$min_exp)
if (is.na(min_exp) | min_exp < 0) min_exp = 0
updateTextInput(session, "min_exp", value = min_exp)
incProgress(1/3)
list(min_count = min_count,
min_exp = min_exp,
drug = input$search_drug,
hlt = input$search_hlt,
pt = input$search_pt,
filter_inf = input$inf_filter,
display_total_pt = input$display_total_pt,
display_total_hlt = input$display_total_hlt)
})
})
})
########## Output
# Display what query was searched
output$current_search <- renderTable({
data <- current_search()
print(data)
result <- data.frame(names = c("Generic Name:",
"High-Level Term:",
"Preferred Term:"),
terms = c(paste0(data$drug, collapse = ", "),
paste0(data$hlt, collapse = ", "),
paste0(data$pt, collapse = ", ")),
stringsAsFactors=FALSE)
result["" == result] <- "Not Specified"
result
}, include.colnames = FALSE)
output$pt_data_dl <- downloadHandler(
filename = function() {
current_drug <- current_search()$drug
if (current_drug == "") current_drug <- "all"
current_drug <- gsub(" ", "_", current_drug)
current_drug <- gsub("\\|", "-", current_drug)
paste0('pt_data_', current_drug, '.csv')
},
content = function(file) {
write.csv(table_pt_data(), file, row.names=FALSE)
}
)
output$hlt_data_dl <- downloadHandler(
filename = function() {
current_drug <- current_search()$drug
if (current_drug == "") current_drug <- "all"
current_drug <- gsub(" ", "_", current_drug)
current_drug <- gsub("\\|", "-", current_drug)
paste0('hlt_data_', current_drug, '.csv')
},
content = function(file) {
write.csv(table_hlt_data(), file, row.names=FALSE)
}
)
# PRR tab
table_pt_data <- reactive({
# if(input$table_selection=="All"){
# table <- master_table_pt
# }else if (input$table_selection=="PT seen before"){
# table<-master_table_pt%>%filter(count<5)
# }else {
# table<-master_table_pt%>%filter(count>5)
# }
table<-master_table_pt
input$search_button # hacky way to get eventReactive but also initial load
isolate({
data <- current_search()
print(data)
if (is.null(data$drug)) data$drug = ""
if (is.null(data$pt)) data$pt = ""
if (is.null(data$hlt)) data$hlt = ""
# PRR and ROR values of Inf means there are no other drugs associated with that specific adverse reaction, so denomimator is zero!
# prr_tab_df is simply the table displayed at the bottom
#if (all(data$drug != "")) table %<>% filter(drug_code == data$drug %>% as.data.frame())
drugs <- data$drug %>% as.data.frame()
names(drugs) <- 'drug_code'
if (all(drugs != "")) table %<>% semi_join(drugs,copy=TRUE)
pts <- data$pt %>%as.data.frame()
names(pts) <- 'event_effect'
if (all(pts != "")) table%<>% semi_join(pts,copy=TRUE)
if (data$filter_inf) table%<>% filter(PRR != Inf)
table %>% filter(count >= data$min_count) %>%
filter(expected_count >= data$min_exp) %>%
arrange(desc(median_IC), desc(LB95_IC), drug_code, event_effect) %>%
as.data.table() %>%
lapply(function(x) {if (is.numeric(x)) round(x,3) else x}) %>%
as.data.table()
})
})
# pass either datatable object or data to be turned into one to renderDataTable
output$table_pt <- DT::renderDataTable(DT::datatable(
table_pt_data(),
extensions = 'Buttons',
options = list(
scrollX = TRUE,
dom = 'Bfrtip',
buttons = list(list(extend = 'colvis',
text = 'Columns to display',
columns = 5:25)),
columnDefs = list(list(visible = FALSE,
targets = c(5:6, 9:10, 16:17, 19:20, 22:23)))
)))
#
table_hlt_data <- reactive({
input$search_button # hacky way to get eventReactive but also initial load
isolate({
data <- current_search()
if (is.null(data$drug)) data$drug = ""
if (is.null(data$pt)) data$pt = ""
if (is.null(data$hlt)) data$hlt = ""
# PRR and ROR values of Inf means there are no other drugs associated with that specific adverse reaction, so denomimator is zero!
# prr_tab_df is simply the table displayed at the bottom
table <- master_table_hlt
drugs <- data$drug %>% as.data.frame()
names(drugs) <- 'drug_code'
if (all(drugs != "")) table %<>% semi_join(drugs,copy=TRUE)
hlts <- data$hlt %>% as.data.frame()
names(hlts) <- 'event_effect'
if (all(hlts != "")) table %<>% semi_join(hlts,copy=TRUE)
if (data$filter_inf) table %<>% filter(PRR != Inf)
table %<>% filter(count >= data$min_count) %>%
filter(expected_count >= data$min_exp) %>%
arrange(desc(median_IC), desc(LB95_IC), drug_code, event_effect) %>%
as.data.table() %>%
lapply(function(x) {if (is.numeric(x)) round(x,3) else x}) %>%
as.data.table()
})
})
# pass either datatable object or data to be turned into one to renderDataTable
output$table_hlt <- DT::renderDataTable(DT::datatable(
table_hlt_data(),
extensions = 'Buttons',
options = list(
scrollX = TRUE,
dom = 'Bfrtip',
buttons = list(list(extend = 'colvis',
text = 'Columns to display',
columns = 5:25)),
columnDefs = list(list(visible = FALSE,
targets = c(5:6, 9:10, 16:17, 19:20, 22:23)))
)))
# time-series data
time_data_pt <- reactive({
cur_search <- current_search()
top_pairs <- table_pt_data() %>% head(10) %>% select(drug_code, event_effect) %>%
mutate(drug_code = as.character(drug_code), event_effect = as.character(event_effect))
timeplot_df <- count_quarter_pt %>% semi_join(top_pairs, by = c("ing" = "drug_code", "PT_NAME_ENG" = "event_effect"))
quarters_df <- quarters %>% cbind(n = 0)
if (nrow(top_pairs) > 0) {
pairs_df <- top_pairs %>% rename(ing = drug_code, PT_NAME_ENG = event_effect) %>% cbind(n = 0)
} else {
showModal(modalDialog(
title = list(icon("exclamation-triangle"), "No results found!"),
"There were no reports matching your query.",
size = "s",
easyClose = TRUE))
pairs_df <- data.frame(ing = NA, PT_NAME_ENG = NA, n = NA)# top_pairs %>% rename(ing = drug_code, PT_NAME_ENG = event_effect) %>%
}
filled_time_df <- full_join(pairs_df, quarters_df, by = "n") %>%
bind_rows(timeplot_df) %>%
count(ing, PT_NAME_ENG, quarter, wt = n) %>%
ungroup() %>%
mutate(label = paste0(ing, "_", PT_NAME_ENG)) %>%
select(label, quarter, nn)
if (cur_search$display_total_pt & is.null(cur_search$pt)) {
total_df <- count_quarter_pt
if (!is.null(cur_search$drug)) total_df %<>% filter(ing == cur_search$drug)
if (!is.null(cur_search$pt)) total_df %<>% filter(PT_NAME_ENG == cur_search$pt)
total_df %<>% count(quarter, wt = n) %>%
mutate(label = paste0("total for query")) %>%
select(label, quarter, nn)
filled_time_df <- total_df
}
filled_time_df %<>% rename(n = nn)
# filled_time_df <- timeplot_df %>%
# mutate(label = paste0(ing, "_", PT_NAME_ENG)) %>%
# select(-ing, -PT_NAME_ENG) %>%
# spread(label, count)
# filled_time_df[is.na(filled_time_df)] <- 0
# filled_time_df
})
output$current_pt_title<- renderText({
cur_search <- current_search()
paste("Non-Cumulative Report Count Time Plot for:",
paste0(if(is.null(cur_search$drug)) {"All Drugs"} else {cur_search$drug}, collapse = ", "), "&",
paste0(if(is.null(cur_search$pt)) {"Top 10 Reactions with Highest IC Estimates"} else {cur_search$pt}, collapse = ", "))
})
time_data_pt %>% mutate(qtr = as.yearqtr(quarter %>% as.character(), '%Y.%q')) %>%
ggvis(~qtr, ~n) %>%
add_axis("x", title = "Quarter", properties = axis_props(
label = list(angle = 330))) %>%
add_axis("y", title = "Report Count") %>%
add_tooltip(function(data){paste0("PT: ", data$label, "<br>",
"Count: ", as.character(data$n))},
"hover") %>%
layer_points(fill = ~label, stroke := "black") %>%
group_by(label) %>%
layer_paths(stroke = ~label) %>%
set_options(width = 'auto') %>% bind_shiny("timeplot_pt", "data")
# Time Plot
# output$timeplot_pt <- renderPlot({
# data <- current_search()
#
# current_drug <- ifelse(data$drug == "", "All Drugs", data$drug)
# current_rxn <- ifelse(data$pt == "", "Top 10 Reactions with Highest IC Estimates", data$pt)
# plottitle <- paste("Non-Cumulative Report Count Time Plot for:", current_drug, "&", current_rxn)
#
# df <- time_data_pt() %>%
# # mutate(qtr = (quarter%%1 - 0.1)*2.5 + quarter%/%1)
# # gvisLineChart(df,
# # xvar = "qtr",
# # yvar = names(df)[2:11]
# # options = list(
# # height = 350,
# # vAxis = "{title: 'Number of Reports'}",
# # hAxis = "{title: 'Month'}",
# # chartArea = "{top: 10, height: '80%', left: 120, width: '84%'}")
# # )
# mutate(qtr = as.yearqtr(quarter %>% as.character(), '%Y.%q'))
# # p <- ggplot(df, aes(x = qtr, y = n, label = label)) +
# # scale_x_yearqtr(breaks = seq(min(df$qtr), max(df$qtr), 0.25),
# # format = "%Y Q%q") +
# # geom_line(aes(colour=label)) + geom_point() +
# # ggtitle(plottitle) +
# # xlab("Quarter") +
# # ylab("Report Count") +
# # theme_bw() +
# # theme(plot.title = element_text(face="bold", hjust=0.1, size = rel(0.75)),
# # axis.text.x = element_text(angle=30, vjust=0.9, hjust=1, size=rel(0.75)),
# # axis.title = element_text(size = rel(0.75)),
# # axis.title.x = element_text(vjust = 0))
# # p <- plot_ly(df, x = ~qtr, y = ~n, mode = 'lines+markers') %>%
# # layout(title = plottitle,
# # font = list(size = 8))
#
#
# print(p)
#
# })
# time-series data
time_data_hlt <- reactive({
cur_search <- current_search()
top_pairs <- table_hlt_data() %>% head(10) %>% select(drug_code, event_effect) %>%
mutate(drug_code = as.character(drug_code), event_effect = as.character(event_effect))
timeplot_df <- count_quarter_hlt %>% semi_join(top_pairs, by = c("ing" = "drug_code", "HLT_NAME_ENG" = "event_effect"))
quarters_df <- quarters %>% cbind(n = 0)
if (nrow(top_pairs) > 0) {
pairs_df <- top_pairs %>% rename(ing = drug_code, HLT_NAME_ENG = event_effect) %>% cbind(n = 0)
} else {
showModal(modalDialog(
title = list(icon("exclamation-triangle"), "No results found!"),
"There were no reports matching your query.",
size = "s",
easyClose = TRUE))
pairs_df <- data.frame(ing = NA, HLT_NAME_ENG = NA, n = NA)
}
filled_time_df <- full_join(pairs_df, quarters_df, by = "n") %>%
bind_rows(timeplot_df) %>%
count(ing, HLT_NAME_ENG, quarter, wt = n) %>%
ungroup() %>%
mutate(label = paste0(ing, "_", HLT_NAME_ENG)) %>%
select(label, quarter, nn)
if (cur_search$display_total_hlt & is.null(cur_search$hlt)) {
total_df <- count_quarter_hlt
if (!is.null(cur_search$drug)) total_df %<>% filter(ing == cur_search$drug)
if (!is.null(cur_search$hlt)) total_df %<>% filter(HLT_NAME_ENG == cur_search$hlt)
total_df %<>% count(quarter, wt = n) %>%
mutate(label = paste0("total for query")) %>%
select(label, quarter, nn)
filled_time_df <- total_df
}
filled_time_df %<>% rename(n = nn)
})
# output$timeplot_hlt <- renderPlot({
# input$search_button # hacky way to get eventReactive but also initial load
# isolate({
# current_drug <- ifelse(is.null(input$search_drug),"All Drugs",input$search_drug)
# current_rxn <- ifelse(is.null(input$search_hlt),"Top 10 Reactions with Highest IC Estimates",input$search_hlt)
# })
# plottitle <- paste("Non-Cumulative Report Count Time Plot for:", current_drug, "&", current_rxn)
#
# df <- time_data_hlt() %>%
# mutate(qtr = as.yearqtr(quarter %>% as.character(), '%Y.%q'))
#
# p <- ggplot(df, aes(x = qtr, y = n)) +
# scale_x_yearqtr(breaks = seq(min(df$qtr), max(df$qtr), 0.25),
# format = "%Y Q%q") +
# ifelse(sum(df$n) == 0, geom_blank(), geom_line(aes(colour=label)) + geom_point()) +
# ggtitle(plottitle) +
# xlab("Quarter") +
# ylab("Report Count") +
# theme_bw() +
# theme(plot.title = element_text(lineheight=.8, face="bold"), axis.text.x = element_text(angle=30, vjust=0.9, hjust=1))
# print(p)
# })
output$current_hlt_title<- renderText({
cur_search <- current_search()
paste("Non-Cumulative Report Count Time Plot for:",
paste0(if(is.null(cur_search$drug)) {"All Drugs"} else {cur_search$drug}, collapse = ", "), "&",
paste0(if(is.null(cur_search$hlt)) {"Top 10 Reactions with Highest IC Estimates"} else {cur_search$hlt}, collapse = ", "))
})
time_data_hlt %>% mutate(qtr = as.yearqtr(quarter %>% as.character(), '%Y.%q')) %>%
ggvis(~qtr, ~n) %>%
add_axis("x", title = "Quarter", properties = axis_props(
label = list(angle = 330))) %>%
add_axis("y", title = "Report Count") %>%
add_tooltip(function(data){paste0("HLT: ", data$label, "<br>",
"Count: ", as.character(data$n))},
"hover") %>%
layer_points(fill = ~label, stroke := "black") %>%
group_by(label) %>%
layer_paths(stroke = ~label) %>%
set_options(width = 'auto') %>% bind_shiny("timeplot_hlt", "data")
}
|
59e381e1d2ba67275e9840280bb1a120a4b2fa60
|
3937cb382536a2326bc181bd872778aaf25c8bee
|
/man/french_dataset-class.Rd
|
58698c1a6e0cceb02ea9f0e6f2ce35e262062b97
|
[
"MIT"
] |
permissive
|
nareal/frenchdata
|
030f499bd6f390e368670470ad8be950a1a14aba
|
7918f15f41fadea53255e5bd376f7e945474f6dd
|
refs/heads/master
| 2023-08-21T13:11:36.108309
| 2021-09-10T17:54:13
| 2021-09-10T17:54:13
| 366,717,954
| 10
| 2
|
NOASSERTION
| 2021-09-08T17:13:51
| 2021-05-12T13:04:38
|
R
|
UTF-8
|
R
| false
| true
| 1,030
|
rd
|
french_dataset-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_french_data.R
\name{french_dataset-class}
\alias{french_dataset-class}
\title{\code{french_dataset} S3 class}
\description{
The \code{french_dataset} exists to hold the results of reading the files lists of Kenneth's French data library.
It provides a method to print the objects of this class.
}
\section{Properties of \code{french_dataset}}{
Objects of class \code{french_dataset} have:
\itemize{
\item A \code{class} attribute of \code{french_dataset}.
\item A base type of \code{"list"} with the following elements:
\itemize{
\item \code{info} - holds the information about when and were the information was retrieved.
\item \code{details_url} - url for the webpage with details on data set.
\item \code{subsets} a tibble with with the subsets contained in the downloaded file, the tibble contains a \code{name} and \code{data} column.
}
}
}
\section{Behavior of \code{french_dataset}}{
\itemize{
\item Pretty prints the object.
}
}
|
b018dd72609105900a942328c6449bc0ea0a08bf
|
9e3209e4bd9eaa05d9222a80f07ae995ccaea5eb
|
/hw03/code/binomial-functions.R
|
8f13402b74db6bc3ea51d57e5943f0658c5c45e2
|
[] |
no_license
|
lindsey-chung/homework
|
9be7f037f32a9d10178a06b2ff8b14c190cd6f21
|
7a934e879b1ed5cbb782ea4e7858a0a84c6126fe
|
refs/heads/master
| 2021-04-03T08:36:51.555653
| 2018-04-26T22:47:13
| 2018-04-26T22:47:13
| 124,458,131
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,621
|
r
|
binomial-functions.R
|
# ==============================================================
# Title: HW 03 Data Script
# Description: This script writes binomial probability functions
# Inputs
# Outputs: the following functions
# is_integer()
# is_positive()
# is_nonnegative()
# is_positive_integer()
# is_nonneg_integer()
# is_probability()
# bin_factorial()
# bin_combinations()
# bin_probaility()
# bin_distribution()
# Author: Lindsey Chung
# Date due: March 23, 2018
# ==============================================================
#' @title Integer Test
#' @description tests if the input is an integer or not
#' @param x
#' @return TRUE or FALSE
is_integer = function(x) {
if (x %% 1 == 0) {
return (TRUE)
} else {
return(FALSE)
}
}
#' @title Positivity Test
#' @description tests if the input is positive or not
#' @param x
#' @return TRUE or FALSE
is_positive = function(x) {
if (x > 0) {
return(TRUE)
} else {
return(FALSE)
}
}
#' @title Non-Negativity Test
#' @description tests if the input is not negative
#' @param x
#' @return TRUE or FALSE
is_nonnegative = function(x) {
if (x < 0){
return(FALSE)
} else {
return(TRUE)
}
}
#' @title Positive Integer Test
#' @description tests if the input is a positive integer
#' @param x
#' @return TRUE or FALSE
is_positive_integer = function(x) {
if (is_positive(x) == TRUE & is_integer(x) == TRUE) {
return(TRUE)
} else {
return(FALSE)
}
}
#' @title Non-Negative Integer Test
#' @description tests if the input is a non-negative integer
#' @param x
#' @return TRUE or FALSE
is_nonneg_integer = function(x) {
if (is_nonnegative(x) == TRUE & is_integer(x) == TRUE) {
return(TRUE)
} else {
return(FALSE)
}
}
#' @title Probability Test
#' @description tests if the input is a valid probability value
#' @param x
#' @return TRUE or FALSE
is_probability = function(x) {
if (x <= 1 & x >= 0) {
return(TRUE)
} else {
return(FALSE)
}
}
#' @title Factorial
#' @description finds the factorial of the input
#' @param x, a non-negative integer
#' @return the factorial
bin_factorial = function(x) {
if (x == 0) {
return(1)
} else {
fact = 1
for (i in 1:x) {
fact = fact * i
}
return(fact)
}
}
#' @title Combinations
#' @description finds the number of combinations of k successes in n trials
#' @param n, k
#' @return the number of combinations
bin_combinations = function(n, k) {
bin_factorial(n) / (bin_factorial(k) * bin_factorial(n-k))
}
#' @title Binomial Probability
#' @description finds the probability of getting a certain number
#' of successes in a number of trials
#' @param trials, success, prob
#' @return probability
bin_probability = function(trials, success, prob) {
if (is_nonneg_integer(trials) == FALSE) {
stop('number of trials must be a non-negative integer')
} else if (is_nonneg_integer(success) == FALSE) {
stop('number of successes must be a non-negative integer')
} else if (is_probability(prob) == FALSE) {
stop('probability must be a value in [0, 1]')
} else {
bin_combinations(trials, success) * prob^success * (1 - prob)^(trials - success)
}
}
#' @title Binomial Distribution
#' @description shows the probabilities of different numbers of successes
#' @param trials, prob
#' @return a data frame of numbers of successes and their probabilities
bin_distribution = function(trials, prob) {
success = c(0:trials)
probability = rep(0, trials)
for (i in 1:(trials + 1)) {
probability[i] = bin_probability(trials, i-1, prob)
}
cbind(success, probability)
}
|
29a322e4d1878aeaa8dea9cc58f4fa12a442d89e
|
5a8e7a53bdbdb29dfd64606b959a68abd9503739
|
/man/kde.polys.Rd
|
06ab3c989bd77ff0ec53b4df005e6536f76922d7
|
[] |
no_license
|
lsleezer/GISTools
|
fea72ededcc833a80f60c74f029d96f3ed458704
|
f112202e534df31eb9528a0df21f82016d7b9c10
|
refs/heads/master
| 2023-05-28T03:50:56.591106
| 2014-10-06T00:00:00
| 2014-10-06T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,096
|
rd
|
kde.polys.Rd
|
\name{Kernel Density Estimates From Points}
\alias{kde.points}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Kernel Density Estimates}
\description{
Given a set of points, a bandwidth, a grid density and a frame, produce a kernel density estimate}
\usage{
kde.points(pts,h,n=200,lims=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{pts}{A \code{SpatialPoints} or \code{SpatialPointsDataFrame} object.}
\item{h}{A real number - the bandwidth of the KDE}
\item{n}{An integer, the output grid density - ie result is nxn grid}
\item{lims}{A spatial object - the KDE grid will cover this, if provided}
}
\value{
A \code{SpatialPixelsDataFrame} containing the KDE.
}
\author{Chris Brunsdon}
\examples{
# Data for New Haven to use in example
data(newhaven)
# Do the KDE
breach.dens = kde.points(breach,lims=tracts)
# Plot the result
level.plot(breach.dens)
# Block out the part outside the study area
masker = poly.outer(breach.dens,tracts,extend=100); add.masking(masker)
# Plot census tract boundaries
plot(tracts,add=TRUE)
}
|
f14be8852b63615b3c3b8fc002e982e93142c91e
|
fd33f7df36e0a7fc63a9b707c153b63c78aa037e
|
/homework/homework4/pointwise.R
|
3006f688b7d2c0e88ff396431c9ba49ba50e43d8
|
[] |
no_license
|
BingzheWu/StaticalLearning
|
163de69ee3b4af0b4b4e0b25710045f4b8a6ca07
|
48ad7c564e28eaf3d54026201bcc7202f45bcc82
|
refs/heads/master
| 2016-09-01T05:57:29.053549
| 2015-10-26T02:26:32
| 2015-10-26T02:26:32
| 43,114,728
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 1,393
|
r
|
pointwise.R
|
sgn<-function(num){ #很简单的符号函数,正为1,非正为0
sgn<-rep(1,length(num))
for(i in 1:length(num)){
if(num[i]<=0){
sgn[i]<-0
}
}
sgn
}
dk<-function(x,knot){ #对应教材中的dk(x)
dk<-(sgn(x-knot)*(x-knot)^3-sgn(x-0.9)*(x-0.9)^3)/(0.9-knot)
dk
}
Nk<-function(x,knot){ #natural cubic spline的基函数
Nk<-(dk(x,knot)-dk(x,0.74))
Nk
}
povar<-function(H){ #求拟合值的var
povar<-diag(H%*%solve(t(H)%*%H)%*%t(H))
povar
}
x<-runif(50)
x<-sort(x)
H1<-matrix(c(rep(1,50),x),50,2)#样本矩阵
H2<-matrix(c(rep(1,50),x,x^2,x^3),50,4)
H3<-matrix(c(rep(1,50),x,x^2,x^3,sgn(x-0.33)*(x-0.33)^3,sgn(x-0.66)*(x-0.66)^3),50,6)
H4<-matrix(c(rep(1,50),x,Nk(x,0.1),Nk(x,0.26),Nk(x,0.42),Nk(x,0.58)),50,6)
plot(povar(H1)~x,type="b",col="orange",pch=16,ylim=c(0,0.6),ylab="Pointwise Variances")
mtext(text="orange--Global Linear",side=3,line=-1)
par(new=T)
plot(povar(H2)~x,type="b",col="red",pch=16,ylim=c(0,0.6),ylab="Pointwise Variances")
mtext(text="red--Global Cubic Polynomial",side=3,line=-2)
par(new=T)
plot(povar(H3)~x,type="b",col="green",pch=16,ylim=c(0,0.6),ylab="Pointwise Variances")
mtext(text="green--Cubic Spline - 2 knots",side=3,line=-3)
par(new=T)
plot(povar(H4)~x,type="b",col="blue",pch=16,ylim=c(0,0.6),ylab="Pointwise Variances")
mtext(text="blue--Natural Cubic Spline - 6 knots",side=3,line=-4)
|
6b6d446b2fff6d313844a16e2f8252469664a8a3
|
fe612f81a3118bf3ebef644bae3281bd1c156442
|
/man/h2o.setLevels.Rd
|
9bec48636510a3d9ecf19d1960a815bcdea58fed
|
[] |
no_license
|
cran/h2o
|
da1ba0dff5708b7490b4e97552614815f8d0d95e
|
c54f9b40693ae75577357075bb88f6f1f45c59be
|
refs/heads/master
| 2023-08-18T18:28:26.236789
| 2023-08-09T05:00:02
| 2023-08-09T06:32:17
| 20,941,952
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,146
|
rd
|
h2o.setLevels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frame.R
\name{h2o.setLevels}
\alias{h2o.setLevels}
\title{Set Levels of H2O Factor Column}
\usage{
h2o.setLevels(x, levels, in.place = TRUE)
}
\arguments{
\item{x}{A single categorical column.}
\item{levels}{A character vector specifying the new levels. The number of new levels must match the number of old levels.}
\item{in.place}{Indicates whether new domain will be directly applied to the column (in place change) or if a copy
of the column will be created with the given domain levels.}
}
\description{
Works on a single categorical vector. New domains must be aligned with the old domains.
This call has SIDE EFFECTS and mutates the column in place (change of the levels will also affect all the frames
that are referencing this column). If you want to make a copy of the column instead, use parameter in.place = FALSE.
}
\examples{
\dontrun{
library(h2o)
h2o.init()
iris_hf <- as.h2o(iris)
new_levels <- c("setosa", "versicolor", "caroliniana")
iris_hf$Species <- h2o.setLevels(iris_hf$Species, new_levels, in.place = FALSE)
h2o.levels(iris_hf$Species)
}
}
|
f327a17d720eb524ba1ab05ac98100ffce7fee3f
|
32ee2368f97561864223cee83a8e8cb24b6eca4e
|
/esf2.R
|
73f581e6b8239b17cdd151aa6f8bbdcc5c610c9d
|
[] |
no_license
|
wangzc1997/estimateDiffusionParameter
|
e557932ab24a010b43871704820fa2f8a803e7be
|
d3e7fc13911f0de5500ebb8cfee5fb68576e0253
|
refs/heads/master
| 2020-06-14T22:45:43.425935
| 2018-03-13T02:27:28
| 2018-03-13T02:27:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,459
|
r
|
esf2.R
|
estfc2<-function(Y) {
# first function
for (t in 1:10000) {
x1=X[t]
x2=X[t+1]
n=1
g1=0
s1=0
s2=0
for (m in 0:n) {
t1=choose(n,m)*gamma(Y[1]+Y[2]+n+m-2)/gamma(Y[2]+m)*((x1-1)^m)
s1=s1+t1
t2=choose(n,m)*gamma(Y[1]+Y[2]+n+m-2)/gamma(Y[2]+m)*((x2-1)^m)
s2=s2+t2
}
# eigenfunction
p2<-sqrt(gamma(Y[2]+n)*(2*n+Y[1]+Y[2]-1)*gamma(Y[2])
*gamma(Y[1])/(factorial(n)*gamma(Y[1]+Y[2]+n-1)
*gamma(Y[1]+Y[2])*gamma(Y[1]+n)))*s2
p1<-sqrt(gamma(Y[2]+n)*(2*n+Y[1]+Y[2]-1)*gamma(Y[2])
*gamma(Y[1])/(factorial(n)*gamma(Y[1]+Y[2]+n-1)
*gamma(Y[1]+Y[2])*gamma(Y[1]+n)))*s1
# eigenvalue
l=-Y[3]*n*(1+(n-1)/(Y[1]+Y[2]))*0.01
h=p2-exp(l)*p1
g1=g1+h
}
# second function
for (t in 1:10000) {
x1=X[t]
x2=X[t+1]
n=2
g2=0
s1=0
s2=0
for (m in 0:n) {
t1=choose(n,m)*gamma(Y[1]+Y[2]+n+m-2)/gamma(Y[2]+m)*((x1-1)^m)
s1=s1+t1
t2=choose(n,m)*gamma(Y[1]+Y[2]+n+m-2)/gamma(Y[2]+m)*((x2-1)^m)
s2=s2+t2
}
p2<-sqrt(gamma(Y[2]+n)*(2*n+Y[1]+Y[2]-1)*gamma(Y[2])*gamma(Y[1])/(factorial(n)*gamma(Y[1]+Y[2]+n-1)*gamma(Y[1]+Y[2])*gamma(Y[1]+n)))*s2
p1<-sqrt(gamma(Y[2]+n)*(2*n+Y[1]+Y[2]-1)*gamma(Y[2])*gamma(Y[1])/(factorial(n)*gamma(Y[1]+Y[2]+n-1)*gamma(Y[1]+Y[2])*gamma(Y[1]+n)))*s1
l=-Y[3]*n*(1+(n-1)/(Y[1]+Y[2]))*0.01
h=p2-exp(l)*p1
g2=g2+h
}
# third function
for (t in 1:10000) {
x1=X[t]
x2=X[t+1]
n=3
g3=0
s1=0
s2=0
for (m in 0:n) {
t1=choose(n,m)*gamma(Y[1]+Y[2]+n+m-2)/gamma(Y[2]+m)*((x1-1)^m)
s1=s1+t1
t2=choose(n,m)*gamma(Y[1]+Y[2]+n+m-2)/gamma(Y[2]+m)*((x2-1)^m)
s2=s2+t2
}
p2<-sqrt(gamma(Y[2]+n)*(2*n+Y[1]+Y[2]-1)*gamma(Y[2])
*gamma(Y[1])/(factorial(n)*gamma(Y[1]+Y[2]+n-1)
*gamma(Y[1]+Y[2])*gamma(Y[1]+n)))*s2
p1<-sqrt(gamma(Y[2]+n)*(2*n+Y[1]+Y[2]-1)*gamma(Y[2])
*gamma(Y[1])/(factorial(n)*gamma(Y[1]+Y[2]+n-1)
*gamma(Y[1]+Y[2])*gamma(Y[1]+n)))*s1
l=-Y[3]*n*(1+(n-1)/(Y[1]+Y[2]))*0.01
h=p2-exp(l)*p1
g3=g3+h
}
# vector of three functions
return(c(F1=g1,F2=g2,F3=g3))
}
# solve with different starting point
library(rootSolve)
fs<-multiroot(estfc2,c(1,1,1))
ss<-multiroot(estfc2,c(2,2,0.5))
# plug "true" values into function
Y=c(2,2,0.5)
V=estfc2(Y)
sum(V)
|
8fb69fa0a53540287e61943103060b4557b9beb2
|
aa11504f73b327c7c0249c80971ba76a7d9322b7
|
/Data Science Machine/modelingPip.R
|
281e7ba5982f6af17e3b86e14d56e2988646b3b3
|
[] |
no_license
|
hamzafar/dsmachine
|
d13fb1fd9e698ce2ed9028fee5c86a939db6e24f
|
cc8dadc941117892f11ba8ab190c5c753c022c8b
|
refs/heads/master
| 2020-12-26T02:21:45.375895
| 2017-01-11T17:24:13
| 2017-01-11T17:24:13
| 45,959,766
| 0
| 0
| null | 2015-11-11T04:29:43
| 2015-11-11T04:29:43
| null |
UTF-8
|
R
| false
| false
| 54
|
r
|
modelingPip.R
|
testPip <- dbReadTable(mydb, 'outcome')[1:5000,]
|
ba9cbb2eeffabe1ef1abaf942d665a66d7573f2a
|
2986385f34e53fccb02041e33ccf57ba7b6413b9
|
/DESCRIPTION.R
|
6032223d20652b112eccd2604a8045be4286fced
|
[] |
no_license
|
SDS410-Spring2020/CEEDS
|
b6ddfcd5c2e3e934f211f2b7b90f9be4b0892341
|
c64a6b6eb0355504901cbf2bcad81ac14136cde1
|
refs/heads/master
| 2022-04-03T13:40:38.290305
| 2020-02-25T19:12:25
| 2020-02-25T19:12:25
| 237,068,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 821
|
r
|
DESCRIPTION.R
|
Package: ceeds
Title: Center for the Environment, Ecological Design, and Sustainability
Version: 0.0.1.9000
Authors@R: c(
person("Benjamin S.", "Baumer", email = "ben.baumer@gmail.com",
role = c("aut", "cre")),
person(given = "Marta", family = "Garcia", role = "aut"),
person(given = "Mirella", family = "Hernandez", role = "aut"),
person(given = "Julia", family = "Lee", role = "aut")
)
Description: The Center for the Environment, Ecological Design, and
Sustainability at Smith College engages in many differents kinds of
programming. These functions facilitate the new CEEDS dashboard.
License: CC0
Depends:
R (>= 3.4.3)
Imports:
dplyr,
etl,
fs,
lubridate,
macleish (>= 0.3.3.9004),
magrittr,
readr,
purrr,
shiny
Encoding: UTF-8
LazyData: true
RoxygenNote: 6.1.1
Suggests:
testthat,
RSQLite
|
d4b16e0eaa734416aa05fbd8708b2e746b45c481
|
c11e775503ad697d157b169317403f0c14c77713
|
/rpackage/R/RcppExports.R
|
8401dca0c67aca96ec469b49215e76acc3fb78c7
|
[] |
no_license
|
magnusmunch/NIG
|
0df8e1fd004f643754a0e2b34503d43e2504428b
|
c92b5c23ff4f299d81813834294d45b34e398e58
|
refs/heads/master
| 2022-11-07T20:21:43.790758
| 2020-06-18T14:12:25
| 2020-06-18T14:12:25
| 161,490,141
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 933
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
.f.optim.mat <- function(alpha, lambda, nu, zeta, Cmat, Z, n, p, D, idsel, G, H, y, x, yty, Zpres) {
.Call(`_NIG_f_optim_mat`, alpha, lambda, nu, zeta, Cmat, Z, n, p, D, idsel, G, H, y, x, yty, Zpres)
}
.f.optim.list <- function(alpha, lambda, nu, zeta, Cmat, Z, n, p, D, G, H, y, x, yty, Zpres) {
.Call(`_NIG_f_optim_list`, alpha, lambda, nu, zeta, Cmat, Z, n, p, D, G, H, y, x, yty, Zpres)
}
.Sigma.unp <- function(aold, bold, xu, xr, u, r) {
.Call(`_NIG_Sigma_unp`, aold, bold, xu, xr, u, r)
}
.Sigma <- function(aold, bold, x) {
.Call(`_NIG_Sigma`, aold, bold, x)
}
.aux.var.unp <- function(aold, bold, y, xu, xr, u, r) {
.Call(`_NIG_aux_var_unp`, aold, bold, y, xu, xr, u, r)
}
.aux.var <- function(aold, bold, y, x, ytx) {
.Call(`_NIG_aux_var`, aold, bold, y, x, ytx)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.