blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f7a6dbb7c97122e2594d2149328b5d6ce1a4505
|
54f90d5a909bc8b969502db1de21098166e928cc
|
/double_fa_analyzer_2.R
|
2141b6c73169f42cb8bc8fe520a532d78bfcb569
|
[] |
no_license
|
GuZase/ddcfDNA
|
f430145c706afb2995255e208fc497ebe1ed42c9
|
4ed5a018d61d5485c3969c7138fbabe0f2d66e96
|
refs/heads/master
| 2023-04-27T21:13:03.297800
| 2019-04-26T17:13:27
| 2019-04-26T17:13:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,505
|
r
|
double_fa_analyzer_2.R
|
setwd("/home/delaram/delaram/data/CareDx/CareDx_2019_Feb_23")
library(stringr)
library(data.table)
library(GenomicRanges)
library(IRanges)
library(rlang)
library(ggplot2)
library(Rsamtools)
library(BiocManager)
library(parallel)
library(plyr)
library(UpSetR)
############################################## functions
pileupFreq <- function(pileupres) {
nucleotides <- levels(pileupres$nucleotide)
res <- split(pileupres, pileupres$seqnames)
res <- lapply(res, function (x) {split(x, x$pos)})
res <- lapply(res, function (positionsplit) {
nuctab <- lapply(positionsplit, function(each) {
chr = as.character(unique(each$seqnames))
pos = as.character(unique(each$pos))
tablecounts <- sapply(nucleotides, function (n) {sum(each$count[each$nucleotide == n])})
c(chr,pos, tablecounts)
})
nuctab <- data.frame(do.call("rbind", nuctab),stringsAsFactors=F)
rownames(nuctab) <- NULL
nuctab
})
res <- data.frame(do.call("rbind", res),stringsAsFactors=F)
rownames(res) <- NULL
colnames(res) <- c("seqnames","start",levels(pileupres$nucleotide))
res[3:ncol(res)] <- apply(res[3:ncol(res)], 2, as.numeric)
res}
clean.df <- function(df){
df = df[,-(8:10)]
df$depth = df$A + df$C + df$G + df$T + df$N
colnames(df) = c("chr" ,"position", "A" , "C" , "G" , "T", "N", "depth" )
tmp <- t(apply(df[,3:7], 1, sort))
df$F1 <- tmp[,5]/df$depth # Maximum frequency allele
df$F2 <- tmp[,4]/df$depth # Second maximum frequency allele
df$ALT.freq = (rowSums(tmp[,-5]))/df$depth
return(df)}
Clean<- function(df){
df$depth = df$A + df$C + df$G + df$T
tmp <- t(apply(df[,3:6], 1, sort))
df$F1 <- tmp[,4]/df$depth # Maximum frequency allele
df$F2 <- tmp[,3]/df$depth # Second maximum frequency allele
df$ALT.freq = (rowSums(tmp[,-4]))/df$depth
return(df)}
####################### gnomad
gnomad_genome <-fread("/media/pgdrive/apps/annovar/dls/gnom_genome_sub.txt", na.strings = ".", header = T, data.table = F)
colnames(gnomad_genome)[1] <- "Chr"
gnomad_genome2=subset(gnomad_genome, Ref%in%c('A','T','C','G')&Alt%in%c('A','T','C','G')&gnomAD_genome_ALL>0.35&gnomAD_genome_ALL<0.65)
colnames(gnomad_genome2)[1] <- "Chr"
gnomad_genome2$Chr=paste0('chr',gnomad_genome2$Chr)
gnom.GR = GRanges(seqnames=gnomad_genome2$Chr, IRanges(start=gnomad_genome2$Start,end=gnomad_genome2$End),
Ref=gnomad_genome2$Ref,Alt=gnomad_genome2$Alt,AF=gnomad_genome2$gnomAD_genome_ALL)
######################### amplicon info
amplicons=fread("/home/delaram/delaram/data/CareDx/CareDx_2019_Jan_08/bamsplit/amplicons.bed",na.strings = '.',header=F,data.table = F)
colnames(amplicons)=c('chr','start','end')
amplicons.GR = GRanges(seqnames=amplicons$chr,ranges=IRanges(start=amplicons$start,end=amplicons$end))
amp=as.data.frame(mergeByOverlaps(amplicons.GR,gnom.GR))
amp= amp[,c(1,2,3,7,14,15,16)]
colnames(amp)[1:4]=c('chr','amp.start','amp.end','gnom.pos')
ori.amp=amp
############
length(unique(paste0(ori.amp$gnom.pos,sep='_',ori.amp$chr))) #583 pos
dim(amp[!duplicated(paste0(amp$pos,sep='_',amp$chr))|amp$AF>0.01,])# 583 again> no duplicates
###############
amp$id=paste0(amp$chr,sep='_',amp$gnom.pos)
amp$amplicon= paste0(amp$chr,sep='_',amp$amp.start, sep='_', amp$amp.end)
amp$pos= amp$gnom.pos- amp$amp.start +1
dim(amp)
dim(subset(amp,!duplicated(id))) #583 unique positions in amplicons > AF cut-off is not enough
dim(subset(amp,!duplicated(amplicon)))
amp2 = subset(amp, pos>150 & pos< 350)
dim(amp2)
### 388 positions- cut-offs >> AF:(0.35, 0.65) - pos:(150,350) - unique amplicon names(one SNP in each position)
amp3 = subset(subset(amp2,!duplicated(amplicon)))
dim(amp3)
length(unique(amp3$amplicon))
############################# pileup section
p_param = PileupParam(max_depth = 50000, min_base_quality=10, min_mapq=5,
min_nucleotide_depth=0, min_minor_allele_depth=0,
distinguish_strands=TRUE, distinguish_nucleotides=TRUE,
ignore_query_Ns=TRUE, include_deletions=TRUE, include_insertions=FALSE,
left_bins=NULL, query_bins=NULL, cycle_bins=NULL)
l <- list.files(".",".bam$")
samples <- sub(".*_","", sub(".bam","",l))
#samples <- c('12707','12708','12709','12710','12711','12712')
bf <- mclapply(l, function(i) BamFile(i, index=paste0(i, ".bai")), mc.cores=detectCores()-2)
names(bf)<-samples
bed.pileup <- mclapply(bf, pileup, pileupParam=p_param, mc.core=detectCores()-2)
freq <- mclapply(bed.pileup, pileupFreq, mc.cores=detectCores()-2)
names(freq) <- paste(names(bf), ".freq", sep= "")
freq <- mclapply(freq, clean.df, mc.cores = detectCores()-2)
sapply(1:length(freq), function(i)colnames(freq[[i]])[1]<<-'Contig')
tmp <- mclapply(freq, function(i) {x=data.frame(str_split_fixed(i$Contig, "_", 3));colnames(x)=c('chr','start','end');return(x)})
freq <- sapply(1:length(freq), function(i)cbind(tmp[[i]],freq[[i]]),simplify =F)
sapply(1:length(freq), function(i)freq[[i]]$T.pos<<-as.numeric(as.character(freq[[i]]$start))+as.numeric(as.character(freq[[i]]$position))-1)
sapply(1:length(freq), function(i)freq[[i]]$chr<<- as.character(freq[[i]]$chr))
m <- sapply(1:length(freq), function(i)merge(freq[[i]],amp3,by.x =c('chr', 'T.pos'),by.y = c('chr', 'gnom.pos'),all.x=F,all.y=T,sort=F),simplify=F)
names(m)=samples
m <- sapply(1:length(m),function(i) m[[i]][,!colnames(m[[i]]) %in% c('amp.start','pos')],simplify = F)
sapply(1:length(m),function(i){x=as.data.frame(str_split_fixed(m[[i]]$Contig, "_", 4));m[[i]]$fasta<<-x$V4})
sapply(1:length(m),function(i){ m[[i]]$fasta <<-substr(as.character(m[[i]]$fasta),1,2)})
## ~15 positions in amp3 file are not included in freq
lapply(m, function(i) paste(sum(is.na(i$A)),sum(is.na(i$T)),sum(is.na(i$C)),sum(is.na(i$G))))
lapply(1:length(m), function(i){m[[i]]$start<<-as.numeric(as.character(m[[i]]$start));
m[[i]]$end<<-as.numeric(as.character(m[[i]]$end))
m[[i]]$Contig<<-as.numeric(as.character(m[[i]]$Contig))
m[[i]]$position<<-as.numeric(as.character(m[[i]]$position))})
lapply(m, function(i) paste(sum(is.na(i$A)),sum(is.na(i$T)),sum(is.na(i$C)),sum(is.na(i$G))))
m = lapply(m, function(x){replace(x, is.na(x), 0)} )
lapply(m, function(i) paste(sum(is.na(i$A)),sum(is.na(i$T)),sum(is.na(i$C)),sum(is.na(i$G))))
saveRDS(m,'/media/pgdrive/users/delaram/data/DoubleFastaTab_illumina_Feb23.rds')
#m = readRDS('/media/pgdrive/users/delaram/data/DoubleFastaTab2.rds')
############ #pos with more strict cut-offs
m1 = lapply(m, subset, AF>0.4 & AF<0.6) ## filters NA as well
lapply(m1, nrow)
m1 = lapply(m1, subset, position> 180 & position< 220)
lapply(m1, nrow)
test = lapply(1:length(m1), function(i) unique.data.frame(m1[[i]][,c('chr','T.pos')]))
lapply(test, nrow) ## ~350 less but better
################################ ddply + add attribute
m.s = lapply(m, subset, select=c('chr','T.pos','A','T','C','G'))
lapply(1:length(m.s), function(i){m.s[[i]]$T.pos<<-as.character(m.s[[i]]$T.pos); m.s[[i]]$chr<<-as.character(m.s[[i]]$chr)})
lapply(m.s, nrow)
m.f = lapply(m.s,ddply, .(chr, T.pos), numcolwise(sum))
lapply(m.f, nrow) ## same as unique >> correct > 388
lapply(1:length(m.f), function(i) m.f[[i]]$Sample<<-samples[i])
data=do.call(rbind, m.f)
# before merge: total #pos=2328 after merge: each sample #pos=2328 >> no multiple ALt?
data.f=merge(data,amp3,by.x =c('chr', 'T.pos'),by.y = c('chr', 'gnom.pos'),all.x=T,all.y=F,sort=F)
paste0(nrow(data.f), sep=' ', sum(table(data.f$Alt))) #### NO NA value(all pos in gnomad)
data.f= Clean(data.f)
data.f$m1=NA; data.f$m2=NA
sapply(1:nrow(data.f), function(i){
tmp=t(apply(data.f[i,3:6],1,sort))
data.f[i,]$m1<<-colnames(tmp)[4]
ifelse(data.f[i,'ALT.freq']>0, data.f[i,]$m2<<-colnames(tmp)[3] , data.f[i,]$m2<<-NA)})
data.f$alt.C <- sapply(1:nrow(data.f),function(i){i=data.f[i,];base=c('A','C','G','T'); l =!base%in%c(i$Ref); do.call(sum,i[base[l]]) } ) / data.f$depth
data.f$E <- sapply(1:nrow(data.f),function(i){i=data.f[i,];base=c('A','C','G','T'); l =!base%in%c(i$Alt, i$Ref); do.call(sum,i[base[l]]) } ) / data.f$depth
data.f$ref_error <- !(data.f$m1==data.f$Ref & (data.f$m2==data.f$Alt|is.na(data.f$m2)) )& !(data.f$m1==data.f$Alt & (data.f$m2==data.f$Ref|is.na(data.f$m2) ) )
data.f$c_error <- (data.f$alt.C>0.05 & data.f$alt.C<0.35)|(data.f$alt.C>0.65 & data.f$alt.C<0.95)
saveRDS(data.f,'/media/pgdrive/users/delaram/data/dFa_finalTAB_illumina_feb23.rds')
#data.f = readRDS('/media/pgdrive/users/delaram/data/dFasta_finalTAB.rds')
## many are removed since we do not have all the 388 positions as a result of
### hard filtering in base calling step
data.f2 = subset(data.f, depth>400)
data.f2$label=sapply(1:nrow(data.f2), function(i){
if(data.f2[i,'ref_error'] & data.f2[i,'c_error'])return('both')
else if(data.f2[i,'ref_error']) return('ref-alt error')
else if(data.f2[i,'c_error']) return('CareDx error')
else return('non')})
as.data.frame(table(data.f2$label))
data.f.er <- data.f2[ !(data.f2$m1==data.f2$Ref & (data.f2$m2==data.f2$Alt|is.na(data.f2$m2))) & !(data.f2$m1==data.f2$Alt & (data.f2$m2==data.f2$Ref|is.na(data.f2$m2))) ,]
data.f.cor <- data.f2[ (data.f2$m1==data.f2$Ref & (data.f2$m2==data.f2$Alt| is.na(data.f2$m2))) | (data.f2$m1==data.f2$Alt & (data.f2$m2==data.f2$Ref|is.na(data.f2$m2))) ,]
pdf('DoubleFastaPlot_illumina_feb23.pdf')
ggplot(data.f2, aes(F2+1e-5, color=Sample))+geom_density()+theme_bw()+ggtitle('total data')
ggplot(data.f2, aes(F2+1e-5, color=Sample))+geom_density()+scale_x_log10()+theme_bw()+ggtitle('total data(log)')
ggplot(data.f.cor, aes(F2+1e-5, color=Sample))+geom_density()+scale_x_log10()+theme_bw()+ggtitle('non.ref.error(log) subset')
#ggplot(data.f.er, aes(F2+1e-5, color=Sample))+geom_density()+scale_x_log10()+theme_bw()+ggtitle('ref.error(log) subset')
ggplot(data.f2, aes(y=F2+1e-5, x=Sample))+scale_y_log10()+geom_violin(aes(fill=Sample))+geom_boxplot(width=0.17)+theme_bw()
ggplot(data.f2, aes(y=F2+1e-5, x=Sample))+scale_y_log10()+geom_boxplot(width=0.7,aes(fill=Sample))+theme_bw()
ggplot(data.f2, aes(E+1e-5, color=Sample))+geom_density()+theme_bw()+ggtitle('total data')
ggplot(data.f2, aes(E+1e-5, color=Sample))+geom_density()+scale_x_log10()+theme_bw()+ggtitle('total data(log)')
df.sub = split(data.f2,data.f2$Sample)
lapply(df.sub, nrow) ## number of targeted positions ~ 360 remained
df.sub = lapply(df.sub, function(i) i[order(i$alt.C),])
df.sub = lapply(df.sub, function(i){ i$index=1:nrow(i); i})
#pdf('depth.filter_corrected.pdf')
lapply(df.sub, function(t){ggplot(t, aes(x=index,y=alt.C, color=label))+geom_point(size=0.9)+
theme_bw()+ggtitle(t[1,'Sample'])+
geom_text(hjust = 0, nudge_x = 0.05,check_overlap = TRUE,size=1.6,colour='black',
aes(label=ifelse(c_error,paste0(chr,sep='_',amp.start,sep='_',amp.end),'')))
})
#dev.off()
data.f3=do.call(rbind,df.sub)
data.f3$Sample = as.character(data.f3$Sample)
ggplot(data.f3, aes(x=index,y=alt.C, color=Sample))+geom_point(size=0.9)+theme_bw()+scale_color_brewer(palette="Accent")+ggtitle('total data')
x = lapply(df.sub, function(i) i[i$c_error,])
x = lapply(x, function(i) sapply(1:nrow(i), function(j) paste(i[j,'chr'],i[j,'T.pos'],sep='_',collapse = NULL)))
uni = unique(unlist(x))
tab = as.data.frame(uni)
tab[,2:7] =sapply(1:length(x),function(j) {sapply(1:nrow(tab), function(i) ifelse( tab[i,'uni']%in% x[[j]],1, 0))})
colnames(tab)[1]=c('posID')
colnames(tab)[2:7]= names(x)
upset(tab,sets = names(x) ,
matrix.color = "#990000",mainbar.y.label = 'CareDx error intersection',
sets.bar.color = c('darkslategrey','cyan4','cyan3','cyan3','cyan2','cyan1'),
sets.x.label = c('(alt.C>0.05 & alt.C<0.35)|(alt.C>0.65 & alt.C<0.95)'),
keep.order = F,nintersects=NA, point.size = 2.6,line.size = 0.7)
#################
data.f3$ref_error=ifelse(data.f3$ref_error,1,0)
data.f3$c_error=ifelse(data.f3$c_error,1,0)
tmp= data.f3[,colnames(data.f3)%in%c('chr','T.pos','alt.C','E','ref_error','Sample','c_error')]
tmp=split.data.frame(tmp,f=tmp$Sample,drop=FALSE)
n=lapply(tmp, function(i){colnames(i)[4:7]=paste(colnames(i)[4:7],sep='_',i[1,'Sample'],collapse=NULL)})
sapply(1:length(tmp), function(i)colnames(tmp[[i]])[4:7]<<-n[[i]] )
sapply(1:length(tmp),function(i) tmp[[i]]$name<<-paste(tmp[[i]]$chr,sep='_',tmp[[i]]$T.pos,collapse=NULL))
sapply(1:length(tmp), function(i) tmp[[i]]<<-tmp[[i]][,-c(1,2,3)])
sapply(1:length(tmp),function(i)rownames(tmp[[i]])<<-tmp[[i]]$name)
name=sapply(1:length(tmp),function(i)tmp[[i]]$name)
ints=Reduce(intersect,name) # 364 positiosn present in all of the samples
sapply(1:length(tmp), function(i) tmp[[i]]<<-tmp[[i]][,colnames(tmp[[i]])!='name'])
tmp=do.call(cbind,lapply(1:length(tmp), function(i) tmp[[i]][rownames(tmp[[i]])%in%ints,]))
tmp$ref_error.Sum=rowSums(tmp[,colnames(tmp)%in%paste0('ref_error_',samples)])
tmp$c_error.Sum=rowSums(tmp[,colnames(tmp)%in%paste0('c_error_',samples)])
#saveRDS(object = tmp, file = '/home/delaram/delaram/data/CareDx/DoubleFastaErrorTab.rds')
e=subset(tmp,ref_error.Sum>0)
ec=subset(tmp,c_error.Sum>0)
#saveRDS(ec,file= '/home/delaram/delaram/data/CareDx/careDxErrors_SingleFa_feb12.rds')
#ec = readRDS('/home/delaram/delaram/data/CareDx/careDxErrors.rds')
#ggplot(tmp, aes(x=ref_error.Sum, y=c_error.Sum))+geom_point(alpha = 1/5,colour='blue')#geom_bin2d(bins=8)
tmp.p <- ddply(tmp, .(ref_error.Sum, c_error.Sum), summarise, num=length(ref_error.Sum))
ggplot(tmp.p, aes(ref_error.Sum, c_error.Sum))+geom_point(aes(size=num))+scale_size_continuous(trans="log2")
par(mfrow=c(1,2))
barplot(table(tmp$c_error.Sum),main = 'c_error',col='red')
barplot(table(tmp$ref_error.Sum),main='ref_error',col='blue')
par(mfrow=c(1,1))
e.tmp=tmp[,colnames(tmp)%in%paste0('E_',samples)]
thr=1
#e.tmp=subset(e.tmp,E_12707<thr&E_12708<thr&E_12709<thr&E_12710<thr&E_12711<thr&E_12712<thr)
plot(e.tmp)
alt.c.tmp=tmp[,colnames(tmp)%in%paste0('alt.C_',samples)]
plot(alt.c.tmp,col='dark blue')
dev.off()
####### QC
nrow(amplicons)*2 ## total number of initial Contigs (double fasta)
nrow(amplicons) ## total number of initial Contigs (single fasta)
lapply(1:length(bed.pileup), function(i) length(unique(bed.pileup[[i]]$seqnames))) ## number of unique contigs in bam file
lapply(1:length(bed.pileup), function(i) nrow(amplicons)*2 -length(unique(bed.pileup[[i]]$seqnames))) ##contigs with 0 aligned read(double)
lapply(1:length(bed.pileup), function(i) nrow(amplicons) -length(unique(bed.pileup[[i]]$seqnames))) ##contigs with 0 aligned read (single)
lapply(1:length(freq), function(i) length(unique(freq[[i]]$Contig))) ## same num as above
lapply(1:length(freq), function(i) nrow(freq[[i]])/length(unique(freq[[i]]$Contig))) ## ~90 positions for each contig
lapply(1:length(freq), function(i) table(freq[[i]]$Contig) ) ## exact
lapply(freq, function(i) sum(i$depth==0)) ## num positions with zero depth(not removed)
lapply(1:length(freq), function(i) table(round(freq[[i]]$ALT.freq,2))) ## many ~0 ALT.freq
lapply(1:length(freq), function(i) paste0(nrow(freq[[i]]),sep=' ',nrow(m[[i]]))) # more pos after merge> many ALT positions in gnomad
sapply(1:length(m),function(i) table(as.character(m[[i]]$fasta))/nrow(m[[i]]))#ratio of aligned read to 2 ref files
lapply(1:length(m), function(i) range(as.numeric(m[[i]]$position)) ) #range of snp positions in amplicons
test = lapply(m, subset, AF>0.4 & AF<0.6)
lapply(test, dim)
test2 = lapply(test, subset, position> 150 & position< 250)
lapply(test2, dim)
test3 = lapply(1:length(test2), function(i) unique.data.frame(test2[[i]][,c('chr','T.pos')]))
lapply(test3, dim)
|
03a850e33ee7e44816838acc5644157d86fef861
|
b9db037ee7bc2ebf9c228ad1f66fecabccfa70be
|
/tests/testthat/test_add_feature_weights.R
|
4ef0f9089d75f4607b645fe9e9a8faf6813882be
|
[] |
no_license
|
IsaakBM/prioritizr
|
924a6d8dcc7c8ff68cd7f5a2077de2fa1f300fe7
|
1488f8062d03e8736de74c9e7803ade57d6fcc29
|
refs/heads/master
| 2020-12-10T06:23:19.437647
| 2019-12-22T00:04:20
| 2019-12-22T00:04:20
| 233,524,401
| 1
| 0
| null | 2020-01-13T06:13:19
| 2020-01-13T06:13:18
| null |
UTF-8
|
R
| false
| false
| 14,578
|
r
|
test_add_feature_weights.R
|
context("add_feature_weights")
test_that("compile (compressed formulation, single zone)", {
# generate optimization problem
data(sim_pu_raster, sim_features)
b <- floor(raster::cellStats(sim_pu_raster, "sum")) * 0.25
p <- problem(sim_pu_raster, sim_features) %>%
add_max_cover_objective(budget = b) %>%
add_feature_weights(seq(10, 14))
o <- compile(p)
# check that objective has been correctly applied
n_pu <- length(sim_pu_raster[[1]][!is.na(sim_pu_raster)])
n_f <- raster::nlayers(sim_features)
scaled_costs <- p$planning_unit_costs()
scaled_costs <- scaled_costs * (-0.01 / sum(scaled_costs, na.rm = TRUE))
expect_equal(o$modelsense(), "max")
expect_equal(o$obj(), c(scaled_costs, seq(10, 14)))
expect_equal(o$sense(), c(rep(">=", n_f), "<="))
expect_equal(o$rhs(), c(rep(0, n_f), b))
expect_equal(o$col_ids(), c(rep("pu", n_pu), rep("present", n_f)))
expect_equal(o$row_ids(), c(rep("spp_present", n_f), "budget"))
expect_true(all(o$A()[seq_len(n_f),
seq_len(n_pu)] == p$data$rij_matrix[[1]]))
expect_equal(o$A()[n_f + 1, ],
c(p$planning_unit_costs(), rep(0, n_f)))
expect_true(all(o$A()[seq_len(n_f), n_pu + seq_len(n_f)] ==
Matrix::sparseMatrix(i = seq_len(n_f), j = seq_len(n_f),
x = rep(-1, n_f), giveCsparse = FALSE)))
expect_equal(o$lb(), rep(0, n_f + n_pu))
expect_equal(o$ub(), rep(1, n_f + n_pu))
})
test_that("solve (compressed formulation, single zone)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# create data
budget <- 4.23
cost <- raster::raster(matrix(c(1, 2, NA, 4), nrow = 1))
locked_out <- 1
features <- raster::stack(raster::raster(matrix(c(2, 1, 1, 0), nrow = 1)),
raster::raster(matrix(c(10, 10, 10, 10), nrow = 1)),
raster::raster(matrix(c(0, 0, 0, 2), nrow = 1)))
# create problem
p <- problem(cost, features) %>%
add_max_utility_objective(budget = budget) %>%
add_feature_weights(c(1, 1, 100)) %>%
add_locked_out_constraints(locked_out) %>%
add_default_solver(gap = 0)
# solve problem
s1 <- solve(p)
s2 <- solve(p)
# test for correct solution
expect_equal(raster::values(s1), c(0, 0, NA, 1))
expect_equal(raster::values(s1), raster::values(s2))
})
test_that("compile (expanded formulation, single zone)", {
# generate optimization problem
data(sim_pu_raster, sim_features)
b <- floor(raster::cellStats(sim_pu_raster, "sum")) * 0.25
p <- problem(sim_pu_raster, sim_features) %>%
add_max_cover_objective(budget = b) %>%
add_feature_weights(seq(10, 14))
o <- compile(p, compressed_formulation = FALSE)
# check that constraints and metadata have been correctly applied
n_pu <- length(sim_pu_raster[[1]][!is.na(sim_pu_raster)])
n_f <- raster::nlayers(sim_features)
rij <- rij_matrix(sim_pu_raster, sim_features)
scaled_costs <- p$planning_unit_costs()
scaled_costs <- scaled_costs * (-0.01 / sum(scaled_costs, na.rm = TRUE))
expect_equal(o$modelsense(), "max")
expect_equal(o$obj(), c(scaled_costs, rep(0, n_pu * n_f), seq(10, 14)))
expect_equal(o$sense(), c(rep("<=", n_pu * n_f), rep( ">=", n_f), "<="))
expect_equal(o$rhs(), c(rep(0, n_f * n_pu), rep(0, n_f), b))
expect_equal(o$col_ids(), c(rep("pu", n_pu), rep("pu_ijz", n_pu * n_f),
rep("present", n_f)))
expect_equal(o$row_ids(), c(rep("pu_ijz", n_f * n_pu),
rep("spp_present", n_f), "budget"))
expect_equal(o$lb(), rep(0, n_pu + (n_f * n_pu) + n_f))
expect_equal(o$ub(), rep(1, n_pu + (n_f * n_pu) + n_f))
# check that problem matrix is correct
row <- 0
for (i in seq_len(n_f)) {
for (j in seq_len(n_pu)) {
row <- row + 1
curr_row <- rep(0, n_pu + (n_pu * n_f) + n_f)
curr_row[j] <- -1
curr_row[n_pu + ( (i - 1) * n_pu) + j] <- 1
expect_equal(o$A()[row, ], curr_row)
}
}
for (i in seq_len(n_f)) {
curr_row <- rep(0, n_pu + (n_pu * n_f) + n_f)
curr_row[(i * n_pu) + seq_len(n_pu)] <- rij[i, ]
curr_row[n_pu + (n_f * n_pu) + i] <- -1
expect_equal(o$A()[(n_f * n_pu) + i, ], curr_row)
}
expect_equal(o$A()[(n_pu * n_f) + n_f + 1, ], c(p$planning_unit_costs(),
rep(0, n_f * n_pu),
rep(0, n_f)))
})
test_that("solve (expanded formulation, single zone)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# create data
budget <- 4.23
cost <- raster::raster(matrix(c(1, 2, NA, 4), nrow = 1))
locked_out <- 1
features <- raster::stack(raster::raster(matrix(c(2, 1, 1, 0), nrow = 1)),
raster::raster(matrix(c(10, 10, 10, 10), nrow = 1)),
raster::raster(matrix(c(0, 0, 0, 2), nrow = 1)))
# create problem
p <- problem(cost, features) %>%
add_max_utility_objective(budget = budget) %>%
add_feature_weights(c(1, 1, 100)) %>%
add_locked_out_constraints(locked_out) %>%
add_default_solver(gap = 0)
# solve problem
s <- solve(p, compressed_formulation = FALSE)
# test for correct solution
expect_equal(raster::values(s), c(0, 0, NA, 1))
})
test_that("invalid inputs (single zone)", {
# check that invalid arguments result in errors
expect_error({
problem(sim_pu_raster, sim_features) %>%
add_feature_weights(seq_len(4))
})
expect_error({
problem(sim_pu_raster, sim_features) %>%
add_feature_weights(c(-1, 1, 2, 3, 4))
})
expect_error({
problem(sim_pu_raster, sim_features) %>%
add_feature_weights(c(1, NA, 2, 3, 4))
})
expect_error({
problem(sim_pu_raster, sim_features) %>%
add_feature_weights(c(1, Inf, 2, 3, 4))
})
})
test_that("compile (compressed formulation, multiple zones)", {
# generate optimization problem
data(sim_pu_zones_stack, sim_features_zones)
b <- min(floor(raster::cellStats(sim_pu_zones_stack, "sum")) * 0.25)
p <- problem(sim_pu_zones_stack, sim_features_zones) %>%
add_max_cover_objective(budget = b) %>%
add_feature_weights(matrix(10 + seq_len(15), ncol = 3))
o <- compile(p)
# check that constraints and metadata have been correctly applied
n_pu <- p$number_of_planning_units()
n_f <- p$number_of_features()
n_z <- p$number_of_zones()
scaled_costs <- p$planning_unit_costs()
scaled_costs <- scaled_costs * (-0.01 / sum(scaled_costs, na.rm = TRUE))
expect_equal(o$modelsense(), "max")
expect_equal(o$obj(), c(scaled_costs, seq(11, 25)))
expect_equal(o$sense(), c(rep(">=", n_f * n_z), "<=",
rep("<=", n_pu)))
expect_equal(o$rhs(), c(rep(0, n_f * n_z), b, rep(1, n_pu)))
expect_equal(o$col_ids(), c(rep("pu", n_pu * n_z),
rep("present", n_f * n_z)))
expect_equal(o$row_ids(), c(rep("spp_present", n_f * n_z),
"budget", rep("pu_zone", n_pu)))
expect_equal(o$lb(), rep(0, (n_pu * n_z) + (n_f * n_z)))
expect_equal(o$ub(), rep(1, (n_pu * n_z) + (n_f * n_z)))
# check that problem matrix is correct
m <- matrix(0, nrow = (n_f * n_z) + 1 + n_pu,
ncol = (n_pu * n_z) + (n_z * n_f))
counter <- 0
for (z in seq_len(n_z)) {
for (f in seq_len(n_f)) {
counter <- counter + 1
m[counter, ((z - 1) * n_pu) + seq_len(n_pu)] <- p$data$rij[[z]][f, ]
m[counter, (n_z * n_pu) + ((z - 1) * n_f) + f] <- -1
}
}
counter <- counter + 1
m[counter, seq_len(n_pu * n_z)] <- c(p$planning_unit_costs())
for (i in seq_len(n_pu)) {
m[i + counter, i] <- 1
m[i + counter, n_pu + i] <- 1
m[i + counter, (2 * n_pu) + i] <- 1
}
m <- as(m, "Matrix")
expect_true(all(o$A() == m))
})
test_that("solve (compressed formulation, multiple zones)", {
skip_on_cran()
skip_if_not(default_solver_name() != "lpsymphony")
skip_if_not(any_solvers_installed())
# create data
budget <- 20
cost <- raster::stack(
raster::raster(matrix(c(5, 6, 7, 8, NA, NA), nrow = 1)),
raster::raster(matrix(c(11, 12, 13, 14, NA, 15), nrow = 1)))
features <- raster::stack(
# zone 1
raster::raster(matrix(c(1, 1, 1, 1, 1, 1), nrow = 1)),
raster::raster(matrix(c(1, 1, 1, 1, 1, 1), nrow = 1)),
raster::raster(matrix(c(2, 1, 1, 1, 1, 1), nrow = 1)),
# zone 2
raster::raster(matrix(c(1, 1, 1, 1, 1, 1), nrow = 1)),
raster::raster(matrix(c(1, 1, 1, 1, 1, 1), nrow = 1)),
raster::raster(matrix(c(1, 1, 1, 1, 1, 3), nrow = 1)))
targs <- tibble::tibble(feature = c("layer.1", "layer.2", "layer.3"),
zone = list("1", "1", c("1", "2")),
sense = rep(">=", 3),
type = "absolute",
target = c(1, 1, 5),
weight = c(1, 1, 100))
# create problem
p <- problem(cost, zones(features[[1:3]], features[[4:6]],
feature_names = targs$feature)) %>%
add_max_features_objective(budget = budget) %>%
add_manual_targets(targs[, -6]) %>%
add_feature_weights(matrix(targs$weight, ncol = 1)) %>%
add_default_solver(gap = 0)
# solve problem
s <- solve(p)
# test for correct solution
expect_equal(raster::values(s[[1]]), c(1, 0, 0, 0, NA, NA))
expect_equal(raster::values(s[[2]]), c(0, 0, 0, 0, NA, 1))
})
test_that("compile (expanded formulation, multiple zones)", {
# generate optimization problem
data(sim_pu_zones_stack, sim_features_zones)
b <- min(floor(raster::cellStats(sim_pu_zones_stack, "sum")) * 0.25)
p <- problem(sim_pu_zones_stack, sim_features_zones) %>%
add_max_cover_objective(budget = b) %>%
add_feature_weights(matrix(10 + seq_len(15), ncol = 3))
o <- compile(p, FALSE)
# check that constraints and metadata have been correctly applied
n_pu <- p$number_of_planning_units()
n_f <- p$number_of_features()
n_z <- p$number_of_zones()
scaled_costs <- p$planning_unit_costs()
scaled_costs <- scaled_costs * (-0.01 / sum(scaled_costs, na.rm = TRUE))
expect_equal(o$modelsense(), "max")
expect_equal(o$obj(), c(scaled_costs, rep(0, n_pu * n_f * n_z), seq(11, 25)))
expect_equal(o$sense(), c(rep("<=", n_f * n_z * n_pu),
rep(">=", n_f * n_z), "<=",
rep("<=", n_pu)))
expect_equal(o$rhs(), c(rep(0, n_pu * n_z * n_f),
rep(0, n_f * n_z), b, rep(1, n_pu)))
expect_equal(o$col_ids(), c(rep("pu", n_pu * n_z),
rep("pu_ijz", n_pu * n_z * n_f),
rep("present", n_f * n_z)))
expect_equal(o$row_ids(), c(rep("pu_ijz", n_pu * n_z * n_f),
rep("spp_present", n_f * n_z),
"budget", rep("pu_zone", n_pu)))
expect_equal(o$lb(), rep(0, (n_pu * n_z) + (n_pu * n_z * n_f) + (n_f * n_z)))
expect_equal(o$ub(), rep(1, (n_pu * n_z) + (n_pu * n_z * n_f) + (n_f * n_z)))
# check that problem matrix is correct
m <- matrix(0, nrow = (n_pu * n_z * n_f) + (n_f * n_z) + 1 + n_pu,
ncol = (n_pu * n_z) + (n_pu * n_z * n_f) + (n_z * n_f))
counter <- 0
for (z in seq_len(n_z)) {
for (i in seq_len(n_f)) {
for (j in seq_len(n_pu)) {
counter <- counter + 1
m[counter, ((z - 1) * n_pu) + j] <- -1
m[counter, (n_pu * n_z) +
((z - 1) * n_pu * n_f) +
((i - 1) * n_pu) +
j] <- 1
}
}
}
for (z in seq_len(n_z)) {
for (f in seq_len(n_f)) {
counter <- counter + 1
for (pu in seq_len(n_pu)) {
col <- (n_pu * n_z) + ((z - 1) * n_f * n_pu) +
((f - 1) * n_pu) + pu
m[counter, col] <- p$data$rij[[z]][f, pu]
}
col <- (n_pu * n_z) + (n_pu * n_f * n_z) + ((z - 1) * n_f) + f
m[counter, col] <- -1
}
}
counter <- counter + 1
m[counter, seq_len(n_pu * n_z)] <- c(p$planning_unit_costs())
for (i in seq_len(n_pu)) {
m[i + counter, i] <- 1
m[i + counter, n_pu + i] <- 1
m[i + counter, (2 * n_pu) + i] <- 1
}
m <- as(m, "Matrix")
expect_true(all(o$A() == m))
})
test_that("solve (expanded formulation, multiple zones)", {
skip_on_cran()
skip_on_travis()
skip_on_appveyor()
skip_if_not(any_solvers_installed())
# create data
budget <- 20
cost <- raster::stack(
raster::raster(matrix(c(5, 6, 7, 8, NA, NA), nrow = 1)),
raster::raster(matrix(c(11, 12, 13, 14, NA, 15), nrow = 1)))
features <- raster::stack(
# zone 1
raster::raster(matrix(c(1, 1, 1, 1, 1, 1), nrow = 1)),
raster::raster(matrix(c(1, 1, 1, 1, 1, 1), nrow = 1)),
raster::raster(matrix(c(2, 1, 1, 1, 1, 1), nrow = 1)),
# zone 2
raster::raster(matrix(c(1, 1, 1, 1, 1, 1), nrow = 1)),
raster::raster(matrix(c(1, 1, 1, 1, 1, 1), nrow = 1)),
raster::raster(matrix(c(1, 1, 1, 1, 1, 3), nrow = 1)))
targs <- tibble::tibble(feature = c("layer.1", "layer.2", "layer.3"),
zone = list("1", "1", c("1", "2")),
sense = rep(">=", 3),
type = "absolute",
target = c(1, 1, 5),
weight = c(1, 1, 100))
# create problem
p <- problem(cost, zones(features[[1:3]], features[[4:6]],
feature_names = targs$feature)) %>%
add_max_features_objective(budget = budget) %>%
add_manual_targets(targs[, -6]) %>%
add_feature_weights(matrix(targs$weight, ncol = 1)) %>%
add_default_solver(gap = 0)
# solve problem
s <- solve(p)
# test for correct solution
expect_equal(raster::values(s[[1]]), c(1, 0, 0, 0, NA, NA))
expect_equal(raster::values(s[[2]]), c(0, 0, 0, 0, NA, 1))
})
test_that("invalid inputs (multiple zones)", {
data(sim_pu_zones_stack, sim_features_zones)
expect_error({
problem(sim_pu_zones_stack, sim_features_zones) %>%
add_max_cover_objective(budget = c(1, -5, 1))
})
expect_error({
problem(sim_pu_zones_stack, sim_features_zones) %>%
add_max_cover_objective(budget = c(1, NA, 1))
})
expect_error({
problem(sim_pu_zones_stack, sim_features_zones) %>%
add_max_cover_objective(budget = c(NA, NA, NA))
})
expect_error({
problem(sim_pu_zones_stack, sim_features_zones) %>%
add_max_cover_objective(budget = c(1, Inf, 9))
})
expect_error({
problem(sim_pu_zones_stack, sim_features_zones) %>%
add_max_cover_objective(budget = c(1, Inf, 9))
})
})
|
19dbfef965e70f5fb6256b390bcbbb05e3bf3d43
|
dadfa5ac29f15b2ed6387c02a137c8aa5b491e83
|
/R Programming Advanced Analytics in R/Creating a TimeSeries plot.R
|
b82175a51320627999a1d858c0585db2376829f6
|
[] |
no_license
|
guilhermeaugusto9/R-Userful-Scripts
|
70bb21e712d5dad634d1fc79b0274dd24eae1671
|
327ab34138016b991d6dd26a707359c90340cb4a
|
refs/heads/master
| 2020-12-04T02:55:19.420406
| 2020-01-03T12:24:52
| 2020-01-03T12:24:52
| 231,580,679
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,201
|
r
|
Creating a TimeSeries plot.R
|
# THis tutorial starts at the line 400
#LISTS IN R
#Deliverbable a lista with the faloowing components
#Character: Machine list
#Vector : (min, mean, max) , utilization for the month (excluding unknown hours
# Logical: Has utilization ever fallen below 90% ? TRUE/FALSE
#Vector: ALL hours where utilization is unknown (NA'S)
#Dataframe for this machine
#Plot for all machines
# selecionando a pasta em que estão os arquivos
getwd()
setwd("C:/Users/guilherme.augusto/Documents/R Programming Material/R Programming Advanced Analytics in R")
getwd()
# lendo o arquivo em um DF
util<- read.csv("P3-Machine-Utilization.csv")
# analizando as primeiras linhas
head(util,n =15)
# analizando a estrutura
str(util)
# resumo estatístico
summary(util)
# crianod uma coluna para utilização
util$Utilization <- 1-util$Percent.Idle
head(util, n = 15)
# primeiramente olhamos se o formato é dd/MM/YYYY OU MM/dd/YYYY
# existe uma convenção mundial em computação que se chama POXIs que garante a compatibilidade de horas datas e otras coisas, fazendo com que um programa rode
#em qualquer computador
# no R existe uma função para isso POSIXct
#POSIXct mede o numero de segundos que passaram desde 1970 é um numero INT
# para ficar tudo igual iremos converter as datas em POSIXct()
as.POSIXct(util$Timestamp, format = "%d/%m/%Y %H:%M")
# adicionando a uma coluna a hora padrão
util$PosixTime <- as.POSIXct(util$Timestamp, format = "%d/%m/%Y %H:%M")
tail(util)
# primeiramente vamos apagar a coluna com a data errada
util$Timestamp <- NULL
# para mudar a ordem vc declara o df de novo com a ordem
util <- util[, c(4,1,2,3)]
head(util, 12)
# what is a List
summary(util)
# Fazendo um subsseting para a maquina RL1
RL1<- util[util$Machine == "RL1",]
summary(RL1)
str(RL1)
# Transformando a coluna Machine em Factor de novo, isso é sempre necessário quanoo vc faz um subsseting
RL1$Machine <- factor(RL1$Machine)
summary(RL1)
# iniciando a construção da lista
# o que iremos fazer
#Character: Machine list
#Vector : (min, mean, max) , utilization for the month (excluding unknown hours
# Logical: Has utilization ever fallen below 90% ? TRUE/FALSE
# creating the vector (min, mean, max) , utilization for the month (excluding unknown hours o na.rm = TRUE faz essa útima parte
util_stats_rl1 <- c(min(RL1$Utilization, na.rm = TRUE),
mean(RL1$Utilization, na.rm = TRUE),
max(RL1$Utilization, na.rm = TRUE))
util_stats_rl1
# Logical: Has utilization ever fallen below 90% ? TRUE/FALSE
which(RL1$Utilization <0.90) # Esse código mostra quais as linhas em que acontece
# para mostrar quantas vezes é só adicionar o lenght
length(which(RL1$Utilization <0.90))
# para que o resultado seja lógico ou seja Verdadeiro ou falso é só adicioar uma condição
length(which(RL1$Utilization <0.90)) >0
# adicionamos isso a um obj para poder utilizar na lista posteriormente
util_under_90_flag <- length(which(RL1$Utilization <0.90)) >0
util_under_90_flag
# para construir uma lista é muito simplessss
#existe uma função list que faz o trabalho
list_rl1 <- list("RL1", util_stats_rl1, util_under_90_flag)
list_rl1
# visualização é um pouco diferente, para facilitar o entendimento é como se fosse um vetor vertical , cada topido da lista tem um index [[1]], [[2]] ,[[3]] e abaixo
# um [1] com o preenchimento
# então uma lista de 3 elementos como a nossa fica assim
# [[1]]
# [1] elemento 1 da lista , posição 1
#[[2]]
#[1] elemento 2 da lista, posição 1
#[[3]]
#[1] elemento 3 da lista, poisição 1
# checando os nomes de uma lista
names(list_rl1)
# Para passar os nomes iremos setar, de acordo com a ordem
names(list_rl1) <- c("Machines","Stats", "LowThreshold")
list_rl1
# Another way to name
rm(list_rl1) # apagando para renomear
list_rl1 <- list(Machines = "RL1",Stats = util_stats_rl1,LowThreshold = util_under_90_flag) # criando a lista novamente com os nomes antes
list_rl1 # checando se ela foi criada
# adding and deleting components in a list
list_rl1
list_rl1[4] <- "New Information"
list_rl1
# another way to add a component via $
# we will add
# vector with all houres where utilization is unknow NA's
RL1[is.na(RL1$Utilization),] # primeiro filtramos
# iremos adicionar na lista apenas os horários em que esta NA
RL1[is.na(RL1$Utilization),"PosixTime"]
# agora iremos adicionar um novo elemento a lista
list_rl1$UnknownHours<- RL1[is.na(RL1$Utilization),"PosixTime"]
list_rl1
# removing an component
list_rl1[4] <- NULL
list_rl1
# é importante notar que a numeração na lista muda, é ressetada, ou seja, se reoganiza
# adicionando um data frame na list
list_rl1$data <- RL1
list_rl1
# a função summary irá mostrar um resumo de como é essa lista
summary(list_rl1)
# para mostrar a estrutura da lista
str(list_rl1)
#-------------------------------- THE TUTORIAL STARTS HERE -----------------------------------------------
# existem 3 maneiras principais de acessar os dados em uma lista
# 1 [] irá sempre retornar uma lista
# 2 [[]] irá sempre retornar o objeto ou seja o que esta dentro dessa posição da lista
# 3 $ faz a mesma coisa do [[]] mas de uma maneira mais elegante
list_rl1
# 1 [] irá sempre retornar uma lista
list_rl1[1] # apresentou o nome da posição e o que esta dentro
# 2 [[]] irá sempre retornar o objeto ou seja o que esta dentro dessa posição da lista
list_rl1[[1]] # apresentou apenas o que esta dentro
# 3 $ faz a mesma coisa do [[]] mas de uma maneira mais elegante
list_rl1$Machines # apresentou apenas o que esta dentro
list_rl1[2]
# para comprovar que o list_rl1 é uma lista
typeof(list_rl1)
# para saber o tipo de uma coisa dentro da lista é preciso usar o [[ ]] ou $
typeof(list_rl1[[2]])
# how acess the 3rd elemtnet of the vector in [[2]]
list_rl1[[2]][3] # Primeiro voce acessa a lista com o [[]] e depois acessa o elemento do vetor com []
# ou
list_rl1$Stats[3] # acessando a lista com $ e depois o elemento do vector com []
# para fazer um subsetting podemos utilizar [ ]
list_rl1[1] # ira trazer o primeiro elemento da lista
# mas é possível trazer mais de um
# trazendo as 3 primeiras elementos da lista
list_rl1[1:3]
# trazendo o elemento 1 e 4
list_rl1[c(1,4)]
# criando um subsetting com os elementos nomeados como "Machine" e Stats
sublist <- list_rl1[c("Machines", "Stats")]
sublist
# é muito importante lembrar que o [[ ]] não serve para o subsetting
#-------------------------------- THE TUTORIAL STARTS HERE -----------------------------------------------
library(ggplot2)
w <- ggplot(data = util) # criando um objeto apenas com a base de dados
w + geom_line( aes( x = PosixTime, # criando um gráfico de linhas o eixo X será o tempo
y = Utilization, # o eixo y será a utilização
colour = Machine), # as cores de cada linha são as maquinas
size = 1 # esse será o tamanho da linha
) +
facet_grid(Machine~.) + # para deixar um gráfico de cada maquina em cada linha usamos o facet_grid
geom_hline(yintercept = 0.90, # criando uma linha que marca o ponto de abaixo de 90% de utilização da máquina
colour = "Gray", # a linha será cinza
size = 1.2, # explicitando o tamanho que a lnha ficará
linetype = 3) # optamos por colocar a linha pontilhada ou seja tipo 3
# colocando o gráfico anterior em um objeto
myplot <- w + geom_line( aes( x = PosixTime, # criando um gráfico de linhas o eixo X será o tempo
y = Utilization, # o eixo y será a utilização
colour = Machine), # as cores de cada linha são as maquinas
size = 1 # esse será o tamanho da linha
) +
facet_grid(Machine~.) + # para deixar um gráfico de cada maquina em cada linha usamos o facet_grid
geom_hline(yintercept = 0.90, # criando uma linha que marca o ponto de abaixo de 90% de utilização da máquina
colour = "Gray", # a linha será cinza
size = 1.2, # explicitando o tamanho que a lnha ficará
linetype = 3) # optamos por colocar a linha pontilhada ou seja tipo 3
# é possivel adicionar inclusive o gráfico a lista
list_rl1$Plot <- myplot
list_rl1
str(list_rl1)
summary(list_rl1)
|
33df3513b1e0b2df1d660e09eed3b07c69c018bd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Compounding/examples/pgfIthomas.Rd.R
|
e3736bb0852391f86902776e5cc96562917d6ced
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
pgfIthomas.Rd.R
|
library(Compounding)
### Name: pgfIthomas
### Title: Function pgfIthomas
### Aliases: pgfIthomas
### ** Examples
params<-c(5,.4)
pgfIthomas(.9,params)
## The function is currently defined as
pgfIthomas <- function(s,params) {
xval<-length(s)
for (i in 1:length(s)) {
func<-function(x) pgfthomas(x,params)-s[i]
xval[i]<-uniroot(func,lower=0,upper=1)$root
}
print(xval)
xval
}
|
009576e7444dc1abbc077e92eafb12b28a7d9da2
|
3805a17d29ec60517c95ea63cd4a393d530d5b15
|
/man/raw_track_get_similar.Rd
|
01646a748b5e6cada18b5631161a76461d89b29f
|
[] |
no_license
|
bkkkk/lastfmr
|
da73596e853989cfe116f52244c617b3f6ae78da
|
8617f03a7485c4bc2c1b4baa3c46595e6c959a3c
|
refs/heads/master
| 2022-09-05T11:35:18.732279
| 2022-08-31T06:54:54
| 2022-08-31T06:54:54
| 125,021,275
| 0
| 0
| null | 2021-11-20T07:19:08
| 2018-03-13T08:57:09
|
R
|
UTF-8
|
R
| false
| true
| 1,633
|
rd
|
raw_track_get_similar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods-track.R
\name{raw_track_get_similar}
\alias{raw_track_get_similar}
\title{Search for similar tracks}
\usage{
raw_track_get_similar(
track = NULL,
artist = NULL,
mbid = NULL,
autocorrect = TRUE,
lang = NULL
)
}
\arguments{
\item{track}{Required unless mbid is provided. The track name as a string.}
\item{artist}{Required unless mbid is provided. The artist name as a string.}
\item{mbid}{(Optional) MusicBrainz ID for the track as a string. Can be provided instead of artist and track names}
\item{autocorrect}{(Optional) Whether to correct the artist/album/track name names provided in the response. Defaults to TRUE if not provided.}
\item{lang}{(Optional) The biography language.}
}
\value{
A \link{lastfm} API object.
}
\description{
The search is performed by providing a source track by:
}
\details{
\preformatted{1. Artist and track name OR
2. MusicBrainz ID
}
One and only one of the above must be provided. If both are provided this
defaults to using the artist and track name.
}
\section{Language Setting}{
Biographies for albums and artists are returned in English by
default. To get the biography in other languages, provide a \href{https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes}{ISO 639-1 country code}
via the \code{lang} argument of supporting functions.
}
\section{Name Autocorrection}{
Endpoints that take artist/track/album names try to auto-correct misspelled names
in the search query. This behaviour can be overriden by calling the function
with argument \code{autocorrect} set to \code{FALSE}.
}
|
6711ac7d57cd8a502923802c72050abb7d75147d
|
13d258a894224d5836517ce1a61500e64ce3cb0e
|
/scripts/getTopGenres.R
|
6cd46a69ac3c0b7d1a3b6e49352d366c3674ed9e
|
[] |
no_license
|
tomcopple/lastfm
|
ffb478b6651718d362ad3869a2a120b3b1ac6b88
|
9d467cf51ef0e2b2d8b05609f5ced842322d2ce5
|
refs/heads/master
| 2023-09-04T10:21:31.278361
| 2023-08-26T14:26:42
| 2023-08-26T14:26:42
| 72,945,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,066
|
r
|
getTopGenres.R
|
# Send list of artists to lastfm, get popular tags, then collate to get summary of genres.
# NB Needs lastfm to exist in the global environment, but doesn't check for it.
getTopGenres <- function(howManyArtists = 100) {
library(tidyverse);library(lubridate);library(httr);library(jsonlite);library(zoo)
# Save any data in Dropbox
dirData <- "~/Dropbox/R/lastfm"
# Set user = username, api = api_key
source("scripts/setLastfm.R")
user <- setUser()
api <- setAPI()
# Need a list of artists to look up; decide how many in function call
topArtists <- lastfm %>%
count(artist) %>%
top_n(howManyArtists) %>%
arrange(desc(n))
# Initialise an empty dataframe
responseTags <- data_frame()
# Then send each artist to lastfm to get top tag. Only keeping the top.
for(i in topArtists$artist) {
# Can't remember why I needed to do this, think it might some weird encoding.
getArtist <- gsub("\ ", "+", i)
getArtist <- gsub("&", "%26", getArtist)
# Put together GET url
baseurl <- paste0(
"http://ws.audioscrobbler.com/2.0/?method=artist.gettoptags&artist=", getArtist,
"&api_key=", api, "&format=json"
)
# Just send a courtesy message to console
print(paste0("Hang on, just getting info for ", i, " [",
length(topArtists$artist) - match(i, topArtists$artist),
"]"))
# Send GET request
responseRaw <- httr::GET(baseurl)
httr::warn_for_status(responseRaw)
# Convert JSON to text then a dataframe
responseClean <- responseRaw %>%
httr::content(., as = "text") %>%
jsonlite::fromJSON(., simplifyDataFrame = T, flatten = T)
# Convert response to a dataframe, then rbind to responseTags
if(!is.numeric(responseClean$error) &
!is_empty(responseClean$toptags$tag)) {
responseTags <- rbind(
responseTags,
data.frame(artist = i,
tag = tolower(head(responseClean$toptags$tag$name, 1)))
)
}
}
# Merge scrobbles with tags
lastTags <- full_join(
x = lastfm, y = responseTags
) %>%
na.omit()
# Then going to plot changing genres over time, say top 10
top10tags <- lastTags %>%
# Seen live isn't very helpful
filter(tag != "seen live") %>%
count(tag) %>%
top_n(10)
# Then get daily plays of each tag since the beginning, with a rolling monthly average
daily <- lastTags %>%
mutate(date = lubridate::date(date)) %>%
filter(tag %in% top10tags$tag) %>%
select(tag, date) %>%
count(tag, date) %>%
spread(key = tag, value = n, fill = 0) %>%
right_join(
x = .,
y = data.frame(
date = seq.Date(from = date(min(lastTags$date)),
to = date(max(lastTags$date)),
by = 1)
)
) %>%
gather(., -date, key = tag, value = count) %>%
mutate(count = ifelse(is.na(count), 0, count)) %>%
group_by(tag) %>%
# NB Changing to 6 months (180 days) as was too volatile otherwise
mutate(monthlyPlays = rollsum(count, k = 180, fill = NA, align = "right")) %>%
filter(!is.na(monthlyPlays)) %>%
as_data_frame()
# Draw a plotly graph
library(plotly)
top10graph <- ggplot(daily, aes(x = date, y = monthlyPlays, fill = tag)) +
geom_area(alpha = 0.6, size = 1.2, position = "fill") +
scale_fill_brewer("", palette = "Spectral") +
scale_x_date(date_labels = "%Y") +
labs(x = "", y = "Monthly count",
title = "Top 10 genres over time")
top10plotly <- ggplotly(top10graph) %>%
layout(margin = list(l = 60))
print(top10plotly)
}
|
e68284d5a3f3f667b082cef3e0f40426bc39fd0e
|
52ac04f7c1918eb52ae4462720f6087c2446e316
|
/process_platelets_final.R
|
e40eadee6686a66903d3cb0ef95f7a3570e60daf
|
[] |
no_license
|
UCSF-DSCOLAB/combes_et_al_COVID_2020
|
fe448c4ecae4a0ccec6b2f054ff9036c41d56f05
|
47ae7eea41bf8e5fc85be91ce1ebaaea378cc8c5
|
refs/heads/master
| 2023-06-12T19:35:32.670793
| 2021-07-07T22:43:24
| 2021-07-07T22:43:24
| 323,254,169
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,211
|
r
|
process_platelets_final.R
|
## Author: Nicholas Kuhn
# Call libraries
library(assertthat) # For sanity checks
library(ggplot2) # For pretty plots
library(cowplot) # For pretty plots
library(dittoSeq) # For pretty, colorblind friendly plots
library(dplyr) # For inline modification of matrices
library(grid) # For plotting multiple plots in one frame
library(gridExtra) # For plotting multiple plots in one frame
library(reshape2) # For "melting" dataframesb
library(scales) # To access break formatting functions
library(ggrepel)
library(RColorBrewer)
library(pheatmap)
library(Seurat)
# Load the data
load("~/merged_colossal_paper_final_res0.6_platelets_reHarmony.RData")
platelet_reHarmony <- platelets
platelet_reHarmony <- RunUMAP(platelet_reHarmony, dims = 1:30, n.neighbors = 30, min.dist = 0.3, spread = 1, verbose = FALSE, seed.use = 21212, reduction = 'harmony')
platelet_reHarmony <- FindNeighbors(platelet_reHarmony, dims = 1:30, k.param = 20, verbose = FALSE, reduction = 'harmony')
platelet_reHarmony_0.4 <- FindClusters(platelet_reHarmony, verbose = TRUE, algorithm = 1, resolution = 0.4, random.seed = 21212)
##### Figure 3B
DimPlot(platelet_reHarmony_0.4 , reduction='umap', label = T) + labs(color = "0.4")
platelet_reHarmony_0.4_Markers <- FindAllMarkers(platelet_reHarmony_0.4,
test.use='poisson',
only.pos=TRUE,
min.pct=0.25,
logfc.threshold=0.4,
assay='RNA',
latent.vars = 'LIBRARY'
)
platelet_reHarmony_0.4_Markers_padj0.1 <- platelet_reHarmony_0.4_Markers[which(platelet_reHarmony_0.4_Markers$p_val_adj<0.1),]
platelet_reHarmony_0.4_Markers_padj0.1 <- platelet_reHarmony_0.4_Markers_padj0.1[order(platelet_reHarmony_0.4_Markers_padj0.1$avg_logFC,decreasing = TRUE),]
platelet_reHarmony_0.4_Markers_padj0.1 <- platelet_reHarmony_0.4_Markers_padj0.1[order(platelet_reHarmony_0.4_Markers_padj0.1$cluster,decreasing = FALSE),]
platelet_reHarmony_0.4_Markers_padj0.1_Top10 <- platelet_reHarmony_0.4_Markers_padj0.1 %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
platelet_reHarmony_0.4_Markers_padj0.1_Top5 <- platelet_reHarmony_0.4_Markers_padj0.1 %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
Idents(platelet_reHarmony_0.4) <- 'seurat_cluster_final'
DotPlot(platelet_reHarmony_0.4,
features = unique(platelet_reHarmony_0.4_Markers_padj0.1_Top5$gene),
cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 8), text = element_text(size = 14)) + coord_flip()
### add column harmony_cluster_final with annotation
Idents(platelet_reHarmony_0.4) <- 'seurat_clusters'
platelet_reHarmony_0.4$harmony_cluster_final <- platelet_reHarmony_0.4$seurat_clusters
### rename clusters in column "seurat_cluster_final"
annotations <- c(
"Platelet_ACTB", # 0
"Platelet_HIST1H2AC", # 1
"Platelet_PPBP", # 2
"Platelet_H3F3B", # 3
"Platelet_TMSB4X", #4
"Platelet_RGS18" #5
)
names(annotations) <- levels(platelet_reHarmony_0.4)
platelet_reHarmony_0.4 <- RenameIdents(platelet_reHarmony_0.4, annotations)
platelet_reHarmony_0.4@meta.data$harmony_cluster_final <- Idents(platelet_reHarmony_0.4)
table(platelet_reHarmony_0.4@meta.data$harmony_cluster_final)
Idents(platelet_reHarmony_0.4) <- 'harmony_cluster_final'
reorder_clusters <- c('Platelet_H3F3B', 'Platelet_HIST1H2AC', 'Platelet_ACTB', 'Platelet_PPBP', 'Platelet_TMSB4X', 'Platelet_RGS18')
levels(platelet_reHarmony_0.4) <- reorder_clusters
##### Figure 3A
DotPlot(platelet_reHarmony_0.4,
features = unique(platelet_reHarmony_0.4_Markers_padj0.1_Top5$gene),
cols = "RdBu", assay = "RNA") + theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 8), text = element_text(size = 14)) + coord_flip()
##### Figures 3D and S5A
VlnPlot(platelet_reHarmony_0.4, features = c('BCL2L1'), pt.size = 0, assay = 'RNA', ncol = 1) + NoLegend()
VlnPlot(platelet_reHarmony_0.4, features = c('TBXA2R'), pt.size = 0, assay = 'RNA', ncol = 1) + NoLegend()
VlnPlot(platelet_reHarmony_0.4, features = c('P2RX1'), pt.size = 0, assay = 'RNA', ncol = 1) + NoLegend()
VlnPlot(platelet_reHarmony_0.4, features = c('PTGIR'), pt.size = 0, assay = 'RNA', ncol = 1) + NoLegend()
FeaturePlot(platelet_reHarmony_0.4, features = c('BCL2L1'), cols = c("grey", "red"), pt.size = 0.3, min.cutoff = 0.2)
########################################################
##################plot frequencies of clusters by Qualitative score
########################################################
######## add annotation based on clinical scores
COMET_10X_CLINICAL_SCORES_PAPER <- read.csv("~/COMET_10X_CLINICAL_SCORES_PAPER.csv", sep=',', header=T)
platelet_reHarmony_0.4@meta.data$Qualitative_score <- COMET_10X_CLINICAL_SCORES_PAPER$Qualitative_score[match(platelet_reHarmony_0.4@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES_PAPER$SAMPLE.by.SNPs)]
#create file 'y' with table containing SAMPLE.by.SNPs as rows and seurat_clusters as columns with corresponding event #s
y <- table(platelet_reHarmony_0.4@meta.data[,c('SAMPLE.by.SNPs', 'harmony_cluster_final')])
#load reshape2
library(reshape2)
#melt table into one column by id='SAMPLE.by.SNPs; call 'ClustersByCovidStatus'
ClustersByQualitativeScore <- melt(y, id='SAMPLE.by.SNPs')
#make data.frame from sobj that contains SAMPLE.by.SNPs event #s; call 'temp'
temp <- table(platelet_reHarmony_0.4@meta.data$SAMPLE.by.SNPs)
temp <- data.frame(temp)
#add a column to ClustersByQualitativeScore called 'total' that contains total # of cells per SAMPLE.by.SNPs pulled from 'temp' data.frame in column 'Freq'
ClustersByQualitativeScore$total <- temp$Freq[match(ClustersByQualitativeScore$SAMPLE.by.SNPs, temp$Var1)]
#add a column to ClustersByQualitativeScore called 'freq' that gives the fraction of cells of each harmony_cluster_final per total # of cells in sample
ClustersByQualitativeScore$freq <- ClustersByQualitativeScore$value/ClustersByQualitativeScore$total
ggplot(ClustersByQualitativeScore, aes(x=harmony_cluster_final, y=freq))+geom_point(stat = 'identity')
#generate temp2 dataframe that has Qualitative_score matched to SAMPLE.by.SNPs
temp2 <- platelet_reHarmony_0.4@meta.data[,c('SAMPLE.by.SNPs', 'Qualitative_score')]
temp2 <- data.frame(temp2)
#add column to ClustersByQualitativeScore called 'Qualitative_score' that adds Ctrl/moderate/severe/critical to ClustersByQualitativeScore matched to SAMPLE.by.SNPs pulled from 'temp2' data.frame in column 'Qualitative_score'
ClustersByQualitativeScore$Qualitative_score <- temp2$Qualitative_score[match(ClustersByQualitativeScore$SAMPLE.by.SNPs, temp2$SAMPLE.by.SNPs)]
#make 'harmony_cluster_final' numbers factors, not values; or else ggplot gets confused
#and reorder clusters with "levels" command
ClustersByQualitativeScore$harmony_cluster_final <- factor(ClustersByQualitativeScore$harmony_cluster_final, levels=c('Platelet_ACTB', 'Platelet_HIST1H2AC', 'Platelet_PPBP', 'Platelet_H3F3B', 'Platelet_TMSB4X', 'Platelet_RGS18'))
write.table(ClustersByQualitativeScore, file="ClustersByQualitativeScore.tsv", row.names=T, col.names=T, quote=F, sep="\t")
#plot
mycolorsseverity <- setNames(c("grey40", "orange", "orangered2"), c('CTRL', 'MILD', 'SEVERE'))
e <- ggplot(ClustersByQualitativeScore, aes(x=harmony_cluster_final, y=freq))
##### Figure 3C
e + geom_boxplot(
aes(fill = Qualitative_score),
outlier.size = 0.5,
position = position_dodge(0.8)) +
labs(title = NULL,x="harmony_cluster_final", y = "frequency") +
theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14), panel.background = element_rect(fill = "white",size = 2, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid'), axis.line = element_line(colour = "black")) +
theme_classic() +
scale_fill_manual(values=mycolorsseverity)
########################################################
##################plot frequencies of clusters by status
########################################################
#create file 'y' with table containing SAMPLE.by.SNPs as rows and seurat_clusters as columns with corresponding event #s
y <- table(plateletCLEAN_harmony_0.4@meta.data[,c('SAMPLE.by.SNPs', 'seurat_cluster_final')])
#load reshape2
library(reshape2)
#melt table into one column by id='SAMPLE.by.SNPs; call 'ClustersByCovidStatus'
ClustersByCovidStatus <- melt(y, id='SAMPLE.by.SNPs')
#make data.frame from sobj that contains SAMPLE.by.SNPs event #s; call 'temp'
temp <- table(plateletCLEAN_harmony_0.4@meta.data$SAMPLE.by.SNPs)
temp <- data.frame(temp)
#add a column to ClustersByCovidStatus called 'total' that contains total # of cells per SAMPLE.by.SNPs pulled from 'temp' data.frame in column 'Freq'
ClustersByCovidStatus$total <- temp$Freq[match(ClustersByCovidStatus$SAMPLE.by.SNPs, temp$Var1)]
#add a column to ClustersByCovidStatus called 'freq' that gives the fraction of cells of each seurat_cluster_final per total # of cells in sample
ClustersByCovidStatus$freq <- ClustersByCovidStatus$value/ClustersByCovidStatus$total
ggplot(ClustersByCovidStatus, aes(x=seurat_cluster_final, y=freq))+geom_point(stat = 'identity')
#generate temp2 dataframe that has covid_status matched to SAMPLE.by.SNPs
temp2 <- plateletCLEAN_harmony_0.4@meta.data[,c('SAMPLE.by.SNPs', 'covid_status')]
temp2 <- data.frame(temp2)
#add column to ClustersByCovidStatus called 'covid_status' that adds neg/neg/ctrl to ClustersByCovidStatus matched to SAMPLE.by.SNPs pulled from 'temp2' data.frame in column 'covid_status'
ClustersByCovidStatus$covid_status <- temp2$covid_status[match(ClustersByCovidStatus$SAMPLE.by.SNPs, temp2$SAMPLE.by.SNPs)]
#make 'seurat_cluster_final' numbers factors, not values; or else ggplot gets confused
#and reorder clusters with "levels" command
ClustersByCovidStatus$seurat_cluster_final <- factor(ClustersByCovidStatus$seurat_cluster_final, levels=c('H3F3B', 'PPBP', 'ACTB', 'TMSB4X', 'HIST1H2AC', 'RGS18'))
#plot
mycolorsstatus <- setNames(c("grey40", "dodgerblue4", "firebrick3"), c('ctrl', 'neg', 'pos'))
e <- ggplot(ClustersByCovidStatus, aes(x=seurat_cluster_final, y=freq))
e + geom_boxplot(
aes(fill = covid_status),
outlier.size = 0.5,
position = position_dodge(0.8)) +
labs(title = NULL,x="seurat_cluster_final", y = "frequency") +
theme(axis.text.x=element_text(angle=45, hjust = 1, size = 10), axis.text.y=element_text(size = 10), text = element_text(size = 14), panel.background = element_rect(fill = "white",size = 2, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid'), axis.line = element_line(colour = "black")) +
theme_classic() +
scale_fill_manual(values=mycolorsstatus)
dev.off()
######## ISG Score
### load functions 'get_genes_s3' and 'saturate'
get_genes_s3 <- function(genes, sobj, drop=FALSE) {
###
# DESCRIPTION
# Get a dataframe of gene values per cell from the input seurat version3 object
#
# INPUTS
# genes: A character vector containing gene names
# sobj: The Seurat object
# OUTPUT
# A dataframe of input genes as rows and all cells as columns
###
gene_idx <- sapply(genes, function(x) { match(x, rownames(sobj@assays[["RNA"]]@data)) })
if (sum(is.na(gene_idx)) > 0) {
print("The following genes are not in the gene list.")
print(names(gene_idx[is.na(gene_idx)]))
if (drop){
drop_names <- names(gene_idx[is.na(gene_idx)])
genes <- genes[!genes%in%drop_names]
} else {
return(1)
}
}
genes <- as.data.frame(as.matrix(sobj@assays[[sobj@active.assay]]@data[genes,]))
}
saturate <- function(vec, sat=0, binary=FALSE){
###
# DESCRIPTION
# A Function to convert a vector of scores into a saturated vectore of scores. A saturated vector is one where all values below the
# provided "saturation" (percentile of data) are set to 0. If the binary flag is specified, all values greater than or equal to the
# saturation will be set to 1.
#
# INPUTS
# vec: A numeric vector of scores
# sat: A value to saturate the vector at (float (0.0-1.0) or percent (1.0-100.0))
# binary: A flag to indicate if we should make the output vector a binary one.
#
# OUTPUT
# A vector of saturated scores
###
sat = if (sat > 1.0) sat/100 else sat
z <- quantile(vec, sat)
for (i in 1:length(vec)){
if (vec[i] < z) {
vec[i] = 0
} else if(binary) {
vec[i] = 1
}
}
vec
}
platelet_reHarmony_0.4@meta.data$Qualitative_score <- COMET_10X_CLINICAL_SCORES_PAPER$Qualitative_score[match(platelet_reHarmony_0.4@meta.data$SAMPLE.by.SNPs,COMET_10X_CLINICAL_SCORES_PAPER$SAMPLE.by.SNPs)]
# Adding IF-responsive gene signature scores and plotting
# A list of genes in the signature
ISG_genes = c('MT2A', 'ISG15', 'LY6E', 'IFIT1', 'IFIT2', 'IFIT3', 'IFITM1', 'IFITM3', 'IFI44L', 'IFI6', 'MX1', 'IFI27')
# Get the cell values from the Seurat object
gene_df <- get_genes_s3(ISG_genes, platelet_reHarmony_0.4, drop = T)
# Define the score vector. In this case, we call the score for a cell the mean value of all genes in the vector
score <- colMeans(gene_df)
# Add score columns to the metadata column of the Seurat object
platelet_reHarmony_0.4@meta.data$ISG_Score_0 <- saturate(vec=score, sat=0, binary=FALSE) # saturate at 0%
# Make a featureplot
##### Figure S5C
FeaturePlot(platelet_reHarmony_0.4, features = "ISG_Score_0",
cols =c("light grey", "red"), pt.size = 0.1, min.cutoff = 0.1)
##### Figure 3G
VlnPlot(platelet_reHarmony_0.4, features = "ISG_Score_0", group.by = "covid_status", split.by = 'Qualitative_score', pt.size = 0, col = mycolorsseverity) + NoLegend()
###Wilcoxon rank sum test on ISG score
#subset all groups
Idents(platelet_reHarmony_0.4) <- 'covid_status'
platelet_reHarmony_0.4_pos <- subset(platelet_reHarmony_0.4, idents = 'pos')
platelet_reHarmony_0.4_neg <- subset(platelet_reHarmony_0.4, idents = 'neg')
Idents(platelet_reHarmony_0.4_pos) <- 'Qualitative_score'
platelet_reHarmony_0.4_pos_mild <- subset(platelet_reHarmony_0.4_pos, idents = 'MILD')
platelet_reHarmony_0.4_pos_severe <- subset(platelet_reHarmony_0.4_pos, idents = 'SEVERE')
Idents(platelet_reHarmony_0.4_neg) <- 'Qualitative_score'
platelet_reHarmony_0.4_neg_mild <- subset(platelet_reHarmony_0.4_neg, idents = 'MILD')
platelet_reHarmony_0.4_neg_severe <- subset(platelet_reHarmony_0.4_neg, idents = 'SEVERE')
#stats
NM_ISG <- platelet_reHarmony_0.4_neg_mild@meta.data$ISG_Score_0
PM_ISG <- platelet_reHarmony_0.4_pos_mild@meta.data$ISG_Score_0
my_data <- data.frame(group = c(rep("NEG_MILD",length(NM_ISG)),
rep("POS_MILD",length(PM_ISG))),
ISG_score = c(NM_ISG, PM_ISG))
group_by(my_data, group) %>%
summarise(
count = n(),
median = median(ISG_score, na.rm = TRUE),
IQR = IQR(ISG_score, na.rm = TRUE))
p <- ggplot(my_data, aes(x=group, y=ISG_score)) +
geom_violin()
p
wilcox.test(ISG_score ~ group, data = my_data,
exact = FALSE)
|
af81358cbb992bd6adfbf16f0af7c494ac81bdc3
|
eed3efe915e04af5b5f20acbbc7920f07477d812
|
/BCEL/man/BCEL-package.Rd
|
c0d361357afe550a951407bbb75a7545583b7e43
|
[] |
no_license
|
yanzhong07/BCEL
|
acff2653c48136b7077b6ec494224da630821938
|
c697383f9d9ceeb51697ca2d62bbdf0d0ebf778b
|
refs/heads/master
| 2022-07-21T07:13:42.432591
| 2022-06-29T05:54:37
| 2022-06-29T05:54:37
| 292,409,966
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
rd
|
BCEL-package.Rd
|
\name{BCEL-package}
\alias{BCEL-package}
\alias{BCEL}
\docType{package}
\title{
\packageTitle{BCEL}
}
\description{
\packageDescription{BCEL}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{BCEL}
\packageIndices{BCEL}
Main functions:
exclusivelasso_project(): proximal operator of exclusive lasso.
exclusivelasso(): regression with exclusive lasso penalty.
bcel(): biclustering with exclusive lasso penalty, written with Rcpp.
bcel_stable(): stable method to select models.
}
\author{
\packageAuthor{BCEL}
Maintainer: \packageMaintainer{BCEL}
}
\references{
}
\keyword{ package }
\seealso{
}
\examples{
}
|
2e86a2bc1a5443d98b33fe681953070fade0b1af
|
6dde5e79e31f29db901c81e4286fea4fa6adbc48
|
/R/AmpPhaseDecomp.R
|
cd008bcdcb90f369cd5b207b2bd4a7eff175be03
|
[] |
no_license
|
cran/fda
|
21b10e67f4edd97731a37848d103ccc0ef015f5a
|
68dfa29e2575fb45f84eb34497bb0e2bb795540f
|
refs/heads/master
| 2023-06-08T07:08:07.321404
| 2023-05-23T22:32:07
| 2023-05-23T22:32:07
| 17,696,014
| 23
| 19
| null | 2022-03-13T17:58:28
| 2014-03-13T04:40:29
|
R
|
UTF-8
|
R
| false
| false
| 1,810
|
r
|
AmpPhaseDecomp.R
|
AmpPhaseDecomp <- function(xfd, yfd, hfd, rng=xrng)
{
# Computes the amplitude-phase decomposition for a registration.
# Arguments:
# XFD ... FD object for unregistered functions
# YFD ... FD object for registered functions
# HFD ... FD object for warping functions
# Returns:
# MS.amp ... mean square for amplitude variation
# MS.pha ... mean square for amplitude variation
# RSQR ... squared correlation measure of prop. phase variation
# C ... constant C
# Last modified 6 January 2020 by Jim Ramsay
xbasis <- xfd$basis
nxbasis <- xbasis$nbasis
nfine <- max(201,10*nxbasis)
xrng <- xbasis$rangeval
if (rng[1] < xrng[1] || rng[2] > xrng[2]) stop(
"RNG is not within the range of the other arguments.")
if (rng[1] >= rng[2]) stop("Elements of rng are not increasing.")
tfine <- seq(rng[1],rng[2],len=nfine)
delta <- tfine[2] - tfine[1]
Dhfine <- eval.fd(tfine, hfd, 1)
xfine <- eval.fd(tfine, xfd, 0)
yfine <- eval.fd(tfine, yfd, 0)
mufine <- apply(xfine, 1, mean)
etafine <- apply(yfine, 1, mean)
N <- dim(xfine)[2]
rfine <- yfine - outer(etafine,rep(1,N))
intetasqr <- delta*trapz(etafine^2)
intmusqr <- delta*trapz(mufine^2)
covDhSy <- rep(0,nfine)
for (i in 1:nfine) {
Dhi <- Dhfine[i,]
Syi <- yfine[i,]^2
covDhSy[i] <- cov(Dhi, Syi)
}
intcovDhSy <- delta*trapz(covDhSy)
intysqr <- rep(0,N)
intrsqr <- rep(0,N)
for (i in 1:N) {
intysqr[i] <- delta*trapz(yfine[,i]^2)
intrsqr[i] <- delta*trapz(rfine[,i]^2)
}
C <- 1 + intcovDhSy/mean(intysqr)
MS.amp <- C*mean(intrsqr)
MS.pha <- C*intetasqr - intmusqr
RSQR <- MS.pha/(MS.amp+MS.pha)
return(list("MS.amp" = MS.amp, "MS.pha" = MS.pha, "RSQR" = RSQR, "C" = C))
}
trapz = function(x) {
n = length(x)
intx = sum(x) - 0.5*(x[1]+x[n])
return(intx)
}
|
8dfa1fbf49645bec698735fcc662a94427794dd5
|
ee77fb8b2c8a4de8aad82de953a462ae9b38668e
|
/man/validate_aflelo_model.Rd
|
8213abcd8d37d2675488b57c9a6f8450b95d27ad
|
[] |
no_license
|
lazappi/aflelo
|
700a281c9eb086887128f3ea968de8d0bbbd87a4
|
a5c3d964140fbcd092b13a6672fffd474fb67692
|
refs/heads/master
| 2021-04-12T12:24:49.305211
| 2018-04-19T11:45:22
| 2018-04-19T11:45:22
| 126,563,142
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 392
|
rd
|
validate_aflelo_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{validate_aflelo_model}
\alias{validate_aflelo_model}
\title{Validate AFLELO Model}
\usage{
validate_aflelo_model(model)
}
\arguments{
\item{model}{The aflelo_model to validate}
}
\value{
Raises an error if invalid, otherwise returns the aflelo_model
}
\description{
Validate an AFLELO Model object
}
|
068570301b5c9dad3b3a0359bb7db845867fe90f
|
9d4522c3ddcbd5bb4f4db1de74b94f93268ddf38
|
/plot4.R
|
9bd7cd28c4f8fdb5a7adb7c7220a3d5127a40975
|
[] |
no_license
|
ayoademob/ExData_Plotting1
|
4a06aae577994c2a2cdf5088fdae1e8f29d0a3da
|
85417907196707122f405ecb39e07263db34af60
|
refs/heads/master
| 2021-01-15T12:41:23.896207
| 2015-10-12T23:11:27
| 2015-10-12T23:11:27
| 44,068,618
| 0
| 0
| null | 2015-10-11T20:34:59
| 2015-10-11T20:34:59
| null |
UTF-8
|
R
| false
| false
| 2,321
|
r
|
plot4.R
|
# First run the getConsumptionData() in the loadData.R script
# pass the returned dataset to the plot functions
# must have ggplot2 installed e.g. install.packages("ggplot2")
plot4 <- function(rawDT){
# Load libraries
library(ggplot2)
library(scales) # to access breaks/formatting functions
library(grid)
# Construct the plot
rawDT = data.frame(Datetime = strptime(paste(rawDT[,Date],rawDT[,Time]), format="%Y-%m-%d %H:%M:%S")
,rawDT
)
# Divide drawing canvas into 4 (2 rows and 2 cols), set the margin
# Column filled first
par(mfcol = c(2, 2), mar = c(5, 4, 2, 1))
# Plot
with (rawDT,{plot( Datetime,Global_active_power # Plot 1
, type='l'
, xlab=''
, ylab='Global Active Power (kilowatts)'
)
plot( c(Datetime,Datetime,Datetime) # Plot 2
, c(Sub_metering_1, Sub_metering_2, Sub_metering_3)
, type='n'
, xlab=''
, ylab='Energy sub metering'
)
# plot the lines
lines(Datetime,Sub_metering_1)
lines(Datetime,Sub_metering_2, col='red')
lines(Datetime,Sub_metering_3, col='blue')
legend( 'topright'
, legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3')
, col=c('black','red','blue')
, lty=c(1,1,1)
, lwd = 1
, cex = 0.5
, y.intersp = 0.2
)
plot( Datetime,Voltage # Plot 3
, type='l'
, xlab='datetime'
, ylab='Voltage'
)
plot( Datetime,Global_reactive_power # Plot 4
, type='l'
, xlab='datetime'
, ylab='Global_reactive_power'
)
}
)
# save it to a PNG file with a width of 480 pixels and a height of 480 pixels.
dev.copy(png,'plot4.png', width = 480, height = 480)
dev.off()
#rawDT
}
|
d6e136dcd354fa1400d2d9c69c1bfd84e80a2975
|
9d4e36cab5ff517d4d4138d4031200a5e4b002e1
|
/man/csd.Rd
|
464391aef14d199fa80a61e6edb20a3ea619d116
|
[] |
no_license
|
FranzKrah/ClimInd
|
9a4d248e329c9d57b57fc2669bc242c01fe2ee06
|
0a7be3c7e437201407ec549401d3547ce3496888
|
refs/heads/master
| 2023-04-05T00:27:36.346935
| 2021-04-09T23:00:03
| 2021-04-09T23:00:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 600
|
rd
|
csd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/indecis_indices.R
\name{csd}
\alias{csd}
\title{Maximum consecutive summer days}
\usage{
csd(data, data_names = NULL, time.scale = YEAR, na.rm = FALSE)
}
\arguments{
\item{data}{daily maximum temperature, Celsius}
\item{data_names}{names of each period of time}
\item{time.scale}{month, season or year}
\item{na.rm}{logical. Should missing values (including NaN) be removed?}
}
\value{
days
}
\description{
Maximum number of consecutive summer days (TX > 25 Celsius)
}
\examples{
data(data_all)
csd(data=data_all$tx)
}
|
7a0e01414d6a26948d271227531967b3e7d2aada
|
9a710be47265c0a9e8654a71b0a32e58bdb0b7c5
|
/R/Visualizer.R
|
3ab86da350f2c7e116b9e8519037b07057030e32
|
[] |
no_license
|
shaowenguang/guidance
|
0c62e3b6152c6a495966c2deee2c21422c61e548
|
31cebf6e03f56ed2d327dfc24ab8b451a4d1a33a
|
refs/heads/master
| 2020-04-21T14:40:16.588236
| 2019-05-16T15:19:29
| 2019-05-16T15:19:29
| 169,642,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,986
|
r
|
Visualizer.R
|
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes_string
#' @importFrom ggplot2 geom_density
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 element_text
#' @importFrom ggplot2 element_blank
#' @importFrom ggplot2 element_line
#' @importFrom ggplot2 labs
#' @importFrom ggplot2 ggtitle
# Visualize peptide profiles
#'
#' @description A function to visualize profiles of peptides corresponding to
#' protein of interest. The profiles include log2 transformed peptide intensity,
#' correlation between peptides represented in heatmap, average intensity and
#' probability of being a representative peptide.
#'
#' @param case data table or data frame in wide representation. The data typically
#' contains \code{"PeptideIon"}, \code{"ProteinName"} and sample names in columns and
#' measurements of each peptide or precursor ions in rows. For a clear visualization
#' of peptide profile, recommend to prepare a data table filtered on peptides
#' corresponding to a protein of interest
#'
#' @param cutoff_prob a numeric value denoting posterior probability threshold
#' to keep or remove peptides.
#'
#' @export
#'
#' @examples
#' global_level = "PeptideIon"
#' d_feature <- calc_features(peptideIon_st)
#' d_feature_select <- perform_selection(d_feature)
#'
#' prot_name <- c("1/sp|O00429|DNM1L_HUMAN")
#' test_prot <- d_feature_select[d_feature_select$ProteinName==prot_name, ]
#' p <- plot_protein_profile(test_prot)
#'
plot_protein_profile <- function(case, cutoff_prob=0.3,
bool_isPeptideIon = TRUE) {
a_reorder <- compute_cor(case = case, cutoff_prob=cutoff_prob)
p_heatmap <- ggplot(melt(a_reorder, value.name = "Correlation",
varnames=c("peptideIon", "PeptideIon")),
aes(x=peptideIon , y=PeptideIon, fill=Correlation)) +
geom_tile() + scale_fill_gradient2(limits=c(-1, 1), low="red", mid="black",
high="green", space = "Lab", midpoint=0) +
geom_text(aes(peptideIon , PeptideIon, label=Correlation), col="white", size=2.5) +
theme(axis.text.x=element_blank(), axis.text.y=element_blank())
if(bool_isPeptideIon == TRUE) {
case$PeptideIon <- factor(case$PeptideIon, levels=case[colnames(a_reorder),
on="PeptideIon"]$PeptideIon)
limits <- aes(ymax = case$feature_mean_intensity_all +
case$feature_cv_intensity_all * case$feature_mean_intensity_all,
ymin = case$feature_mean_intensity_all -
case$feature_cv_intensity_all*case$feature_mean_intensity_all)
#p_bar_int <- ggplot(case, aes(x=PeptideIon, y=feature_mean_intensity_all, fill=ProteinName)) + geom_bar(stat="identity") + coord_flip() + geom_errorbar(limits, width=0.5) + guides(fill=FALSE)
p_bar_int <- ggplot(case, aes(x=PeptideIon, y=feature_mean_intensity_all, fill=ProteinName)) +
geom_bar(stat="identity") + coord_flip() + geom_errorbar(limits, width=0.5)
#p_bar_prob <- ggplot(case, aes(x=PeptideIon, y=prob, fill=ProteinName)) + geom_bar(stat="identity") + coord_flip()
#wenguang: please note that coord_flip() and coord_cartesian() are exclusive.
p_bar_prob <- ggplot(case, aes(x=PeptideIon, y=prob, fill=Selection)) +
geom_bar(stat="identity") + coord_flip(ylim=c(0,1))
tmp_injections <- gsub("Intensity_", "", names(case)[grepl("^Intensity", names(case))])
case_long <- melt(case, id.vars=c("PeptideIon", "Selection"),
measure.vars = patterns("^Intensity_", "^Score_"),
value.name=c("Intensity", "Score"), variable.name="Injections")
setattr(case_long$Injections, "levels", tmp_injections)
case_long[, Extraction := "Original"]
case_long[case_long$Score == 2.0, ]$Extraction <- "Requant"
p_matplot <- ggplot(case_long, aes(x=Injections, y=log2(Intensity))) +
geom_line(aes(colour=PeptideIon, group=PeptideIon, linetype=Selection)) +
geom_point(aes(shape=Extraction, solid=F), size=3) +
geom_text(data=case_long[case_long$Injections==tail(case_long$Injections, n=1), ],
aes(colour=PeptideIon, label=PeptideIon, vjust=-2, hjust=1), size=3) +
theme(axis.text.x = element_text(angle = 45))
} else {
case$aggr_Fragment_Annotation <- factor(case$aggr_Fragment_Annotation,
levels=case[colnames(a_reorder),
on="aggr_Fragment_Annotation"]$aggr_Fragment_Annotation)
limits <- aes(ymax = case$feature_mean_intensity_all +
case$feature_cv_intensity_all * case$feature_mean_intensity_all,
ymin = case$feature_mean_intensity_all -
case$feature_cv_intensity_all*case$feature_mean_intensity_all)
#p_bar_int <- ggplot(case, aes(x=aggr_Fragment_Annotation, y=feature_mean_intensity_all, fill=ProteinName)) + geom_bar(stat="identity") + coord_flip() + geom_errorbar(limits, width=0.5) + guides(fill=FALSE)
p_bar_int <- ggplot(case, aes(x=aggr_Fragment_Annotation,
y=feature_mean_intensity_all, fill=ProteinName)) +
geom_bar(stat="identity") + coord_flip() + geom_errorbar(limits, width=0.5)
#p_bar_prob <- ggplot(case, aes(x=aggr_Fragment_Annotation, y=prob, fill=ProteinName)) + geom_bar(stat="identity") + coord_flip()
p_bar_prob <- ggplot(case, aes(x=aggr_Fragment_Annotation, y=prob, fill=Selection)) +
geom_bar(stat="identity") + coord_flip(ylim=c(0,1))
tmp_injections <- gsub("aggr_Peak_Area_", "",
names(case)[grepl("^aggr_Peak_Area", names(case))])
case_long <- melt(case, id.vars=c("aggr_Fragment_Annotation", "PeptideIon", "Selection"),
measure.vars = patterns("^aggr_Peak_Area_", "^Score_"),
value.name=c("aggr_Peak_Area", "Score"), variable.name="Injections")
setattr(case_long$Injections, "levels", tmp_injections)
case_long[, Extraction := "Original"]
case_long[case_long$Score == 2.0, ]$Extraction <- "Requant"
p_matplot <- ggplot(case_long, aes(x=Injections, y=log2(aggr_Peak_Area))) +
geom_line(aes(colour=PeptideIon, group=aggr_Fragment_Annotation,
linetype=Selection)) +
geom_point(aes(shape=Extraction, solid=F), size=3) +
geom_text(data=case_long[case_long$Injections==tail(case_long$Injections, n=1), ],
aes(colour=PeptideIon, label=aggr_Fragment_Annotation,
vjust=-2, hjust=1), size=3) +
theme(axis.text.x = element_text(angle = 45))
}
p_plot <- grid.arrange( p_matplot,
arrangeGrob(p_heatmap, p_bar_int, p_bar_prob, ncol=3), nrow=2 )
return(p_plot)
}
#' Visualize peptide intensity
#'
#' @description Visualize peptide intensities in different samples by a line graph.
#' Colors denote different peptides, dotted represent removed peptides based on
#' posterior probability of LDA algorithm and triangle dots denote requant
#' measurements (m-score = 2).
#'
#' @param case data table or data frame in wide representation. The data typically
#' contains \code{"PeptideIon"}, \code{"ProteinName"} and sample names in columns and
#' measurements of each peptide or precursor ions in rows. For a clear visualization
#' of peptide profile, recommend to prepare a data table filtered on peptides
#' corresponding to a protein of interest
#'
#' @param cutoff_prob a numeric value denoting posterior probability threshold
#' to keep or remove peptides.
#'
#' @export
#'
#' @examples
#' global_level = "PeptideIon"
#' d_feature <- calc_features(peptideIon_st)
#' d_feature_select <- perform_selection(d_feature)
#'
#' prot_name <- c("1/sp|O00429|DNM1L_HUMAN")
#' test_prot <- d_feature_select[d_feature_select$ProteinName==prot_name, ]
#' p <- plot_peptide_intensity(test_prot)
#'
plot_peptide_intensity <- function(case, cutoff_prob=0.3,
bool_isPeptideIon = TRUE) {
a_reorder <- compute_cor(case = case, cutoff_prob=cutoff_prob)
if(bool_isPeptideIon == TRUE) {
case$PeptideIon <- factor(case$PeptideIon, levels=case[colnames(a_reorder),
on="PeptideIon"]$PeptideIon)
tmp_injections <- gsub("Intensity_", "", names(case)[grepl("^Intensity", names(case))])
case_long <- melt(case, id.vars=c("PeptideIon", "Selection"),
measure.vars = patterns("^Intensity_", "^Score_"),
value.name=c("Intensity", "Score"), variable.name="Injections")
setattr(case_long$Injections, "levels", tmp_injections)
case_long[, Extraction := "Original"]
case_long[case_long$Score == 2.0, ]$Extraction <- "Requant"
p_matplot <- ggplot(case_long, aes(x=Injections, y=log2(Intensity))) +
geom_line(aes(colour=PeptideIon, group=PeptideIon, linetype=Selection)) +
geom_point(aes(shape=Extraction, solid=F), size=3) +
geom_text(data=case_long[case_long$Injections==tail(case_long$Injections, n=1), ],
aes(colour=PeptideIon, label=PeptideIon, vjust=-2, hjust=1), size=3) +
theme(axis.text.x = element_text(angle = 45))
} else {
case$aggr_Fragment_Annotation <- factor(case$aggr_Fragment_Annotation,
levels=case[colnames(a_reorder),
on="aggr_Fragment_Annotation"]$aggr_Fragment_Annotation)
tmp_injections <- gsub("aggr_Peak_Area_", "",
names(case)[grepl("^aggr_Peak_Area", names(case))])
case_long <- melt(case, id.vars=c("aggr_Fragment_Annotation", "PeptideIon", "Selection"),
measure.vars = patterns("^aggr_Peak_Area_", "^Score_"),
value.name=c("aggr_Peak_Area", "Score"), variable.name="Injections")
setattr(case_long$Injections, "levels", tmp_injections)
case_long[, Extraction := "Original"]
case_long[case_long$Score == 2.0, ]$Extraction <- "Requant"
p_matplot <- ggplot(case_long, aes(x=Injections, y=log2(aggr_Peak_Area))) +
geom_line(aes(colour=PeptideIon, group=aggr_Fragment_Annotation,
linetype=Selection)) +
geom_point(aes(shape=Extraction, solid=F), size=3) +
geom_text(data=case_long[case_long$Injections==tail(case_long$Injections, n=1), ],
aes(colour=PeptideIon, label=aggr_Fragment_Annotation,
vjust=-2, hjust=1), size=3) +
theme(axis.text.x = element_text(angle = 45))
}
return(p_matplot)
}
#' Visualize correlation heatmap
#'
#' @description A function to visualize heatmap representing correlation between
#' different peptides corresponding to either the same or different proteins.
#'
#' @param case data table or data frame in wide representation. The data typically
#' contains \code{"PeptideIon"}, \code{"ProteinName"} and sample names in columns and
#' measurements of each peptide or precursor ions in rows.
#'
#' To visualize profile of peptides corresponding to the same protein,
#' recommend to prepare a data table filtered on peptides
#' corresponding to a protein of interest
#'
#' @param cutoff_prob a numeric value denoting posterior probability threshold
#' to keep or remove peptides.
#'
#' @export
#'
#' @examples
#' global_level = "PeptideIon"
#' d_feature <- calc_features(peptideIon_st)
#' d_feature_select <- perform_selection(d_feature)
#'
#' prot_name <- c("1/sp|O00429|DNM1L_HUMAN")
#' test_prot <- d_feature_select[d_feature_select$ProteinName==prot_name, ]
#' p <- plot_cor_heatmap(test_prot)
#'
plot_cor_heatmap <- function(case, cutoff_prob=0.3) {
a_reorder <- compute_cor(case = case, cutoff_prob=cutoff_prob)
p_heatmap <- ggplot(melt(a_reorder, value.name = "Correlation",
varnames=c("peptideIon", "PeptideIon")),
aes(x=peptideIon , y=PeptideIon, fill=Correlation)) +
geom_tile() + scale_fill_gradient2(limits=c(-1, 1), low="red", mid="black",
high="green", space = "Lab", midpoint=0) +
geom_text(aes(peptideIon , PeptideIon, label=Correlation), col="white", size=2.5) +
theme(axis.text.x=element_blank())
return(p_heatmap)
}
#' Visualize intensity and posterior probability in barplots
#'
#' @description A function to visualize average peptide intensity and
#' posterior probability of being the representative peptide via barplots.
#'
#' @param case data table or data frame in wide representation. The data typically
#' contains \code{"PeptideIon"}, \code{"ProteinName"} and sample names in columns and
#' measurements of each peptide or precursor ions in rows.
#'
#' To visualize profile of peptides corresponding to the same protein,
#' recommend to prepare a data table filtered on peptides
#' corresponding to a protein of interest
#'
#' @param cutoff_prob a numeric value denoting posterior probability threshold
#' to keep or remove peptides.
#' @param plot_intensity a logical indicating whether to plot intensity barplots
#' @param plot_prob a logical indicating whether to plot posterior probability
#' barplots
#'
#' @export
#'
#' @examples
#' global_level = "PeptideIon"
#' d_feature <- calc_features(peptideIon_st)
#' d_feature_select <- perform_selection(d_feature)
#'
#' prot_name <- c("1/sp|O00429|DNM1L_HUMAN")
#' test_prot <- d_feature_select[d_feature_select$ProteinName==prot_name, ]
#' p <- plot_bar_intensity_n_probability(test_prot)
#'
plot_bar_intensity_n_probability <- function(case, cutoff_prob=0.3,
bool_isPeptideIon = TRUE,
plot_intensity = T,
plot_prob = T) {
a_reorder <- compute_cor(case = case, cutoff_prob=cutoff_prob)
if(bool_isPeptideIon == TRUE) {
case$PeptideIon <- factor(case$PeptideIon, levels=case[colnames(a_reorder),
on="PeptideIon"]$PeptideIon)
limits <- aes(ymax = case$feature_mean_intensity_all +
case$feature_cv_intensity_all * case$feature_mean_intensity_all,
ymin = case$feature_mean_intensity_all -
case$feature_cv_intensity_all*case$feature_mean_intensity_all)
if(plot_intensity == TRUE){
#p_bar_int <- ggplot(case, aes(x=PeptideIon, y=feature_mean_intensity_all, fill=ProteinName)) + geom_bar(stat="identity") + coord_flip() + geom_errorbar(limits, width=0.5) + guides(fill=FALSE)
p_bar_int <- ggplot(case, aes(x=PeptideIon, y=feature_mean_intensity_all, fill=ProteinName)) +
geom_bar(stat="identity") + coord_flip() + geom_errorbar(limits, width=0.5)
}
if(plot_prob == TRUE){
#p_bar_prob <- ggplot(case, aes(x=PeptideIon, y=prob, fill=ProteinName)) + geom_bar(stat="identity") + coord_flip()
#wenguang: please note that coord_flip() and coord_cartesian() are exclusive.
p_bar_prob <- ggplot(case, aes(x=PeptideIon, y=prob, fill=Selection)) +
geom_bar(stat="identity") + coord_flip(ylim=c(0,1))
}
} else {
case$aggr_Fragment_Annotation <- factor(case$aggr_Fragment_Annotation,
levels=case[colnames(a_reorder),
on="aggr_Fragment_Annotation"]$aggr_Fragment_Annotation)
limits <- aes(ymax = case$feature_mean_intensity_all +
case$feature_cv_intensity_all * case$feature_mean_intensity_all,
ymin = case$feature_mean_intensity_all -
case$feature_cv_intensity_all*case$feature_mean_intensity_all)
if(plot_intensity == TRUE){
#p_bar_int <- ggplot(case, aes(x=aggr_Fragment_Annotation, y=feature_mean_intensity_all, fill=ProteinName)) + geom_bar(stat="identity") + coord_flip() + geom_errorbar(limits, width=0.5) + guides(fill=FALSE)
p_bar_int <- ggplot(case, aes(x=aggr_Fragment_Annotation,
y=feature_mean_intensity_all, fill=ProteinName)) +
geom_bar(stat="identity") + coord_flip() + geom_errorbar(limits, width=0.5)
p_plot <- p_bar_int
}
if(plot_prob == TRUE){
#p_bar_prob <- ggplot(case, aes(x=aggr_Fragment_Annotation, y=prob, fill=ProteinName)) + geom_bar(stat="identity") + coord_flip()
p_bar_prob <- ggplot(case, aes(x=aggr_Fragment_Annotation, y=prob, fill=Selection)) +
geom_bar(stat="identity") + coord_flip(ylim=c(0,1))
p_plot <- plot_prob
}
}
if(plot_intensity == TRUE && plot_prob == TRUE){
p_plot <- grid.arrange(p_bar_int, p_bar_prob)
}
return(p_plot)
}
#' Plot density distribution of peptide features
#'
#' @description A function to visualize distribution of computed features through
#' density plots. These features include average intensity, coefficient of variance
#' and standard deviation.
#'
#' @param data data table or data frame in wide representation. The data typically
#' contains \code{PeptideIon}, \code{ProteinName} and sample names in columns and
#' measurements of each peptide or precursor ions in rows. The function is
#' particularly useful after \code{calc_features()} step to visualize distribution of
#' computed feature statistics.
#'
#' @param feature a character vector denoting a peptide feature for density
#' plots. Examples include \code{"feature_mean_intensity_all"},
#' \code{"feature_cv_intensity_all"}, \code{"scaled_median_PCC"} and etc.
#' @param fill a character vector denoting a column name to color by
#' @param font.size font size of x and y labels
#' @param title title of density plot
#'
#' @export
#'
#' @examples
#' global_level = "PeptideIon"
#' d_feature <- calc_features(peptideIon_st)
#'
#' p <- plot_density(d_feature, feature = "feature_mean_intensity_all")
#'
plot_density <- function(data, feature = "feature_mean_intensity_all",
fill = NULL,
font.size = 12, title = feature){
if(feature == "feature_mean_intensity_all"){
data[[feature]] <- log2(data[[feature]])
}
ggplot(data, aes_string(x=feature, fill=fill)) +
geom_density(alpha=0.8) + theme(
axis.text=element_text(size=font.size),
axis.title=element_text(size=font.size),
legend.position="none",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()
) + labs(fill="") +
theme(axis.line.x = element_line(color="black"),
axis.line.y = element_line(color="black")) +
ggtitle(title)
}
compute_cor <- function(case, cutoff_prob=0.3){
if("aggr_Fragment_Annotation" %in% names(case)) {
# the input is transition level data
bool_isPeptideIon <- 0
index_intensity <- which(grepl("^aggr_Peak_Area_", names(case)))
} else {
# the input is peptide level data
bool_isPeptideIon <- 1
index_intensity <- which(grepl("^Intensity_", names(case)))
}
case[, Selection := "Removed"]
case[case$prob > cutoff_prob, ]$Selection <- "Kept"
a <- round(cor(t(case[, index_intensity, with=F]), use="p", method="p"), 2)
if(bool_isPeptideIon == TRUE) {
rownames(a) <- case$PeptideIon
colnames(a) <- case$PeptideIon
} else {
rownames(a) <- case$aggr_Fragment_Annotation
colnames(a) <- case$aggr_Fragment_Annotation
}
a[is.na(a)] <- 0
a_reorder <- reorder_cormat(a)
return(a_reorder)
}
|
85b00605ad7d5994f48d96edeba0ed6166fbf70f
|
6e00706c34f29f0aab07719b3fee50eec28abc5c
|
/man/meta_interaction_term.Rd
|
90473e8f63935e8e80ac3b6531d5ebf7b75e97cc
|
[] |
no_license
|
th-zelniker/TIMI
|
21f53786b5ee6e0d2ffefc19cd8dc36c424340e6
|
53ce5a7e195b6646b70424a80829a32734a41a0e
|
refs/heads/master
| 2022-10-13T23:26:24.055586
| 2022-09-23T14:11:35
| 2022-09-23T14:11:35
| 135,347,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 886
|
rd
|
meta_interaction_term.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meta_interaction.R
\name{meta_interaction_term}
\alias{meta_interaction_term}
\title{Calculate the interaction term incl. 95 Percent-CI when the RR or HR for two subgroups are given.}
\usage{
meta_interaction_term(
Group_A_HR,
Group_A_CI_lower,
Group_A_CI_upper,
Group_B_HR,
Group_B_CI_lower,
Group_B_CI_upper
)
}
\arguments{
\item{Group_A_HR}{HR or RR of Group A}
\item{Group_A_CI_lower}{Lower CI of Group A}
\item{Group_A_CI_upper}{Upper CI of Group A}
\item{Group_B_HR}{HR or RR of Group A}
\item{Group_B_CI_lower}{Lower CI of Group B}
\item{Group_B_CI_upper}{Upper CI of Group B}
}
\value{
Interaction term as HR/RR incl. 95 percent -CI
}
\description{
Calculate the interaction term incl. 95 Percent-CI when the RR or HR for two subgroups are given.
}
|
53f16d749239c991bf6cce789a3d92802d4dfa56
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/PoweR/R/plot.discrepancy.R
|
3823ab473f055a4bc7331667fff18c38b1600911
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,361
|
r
|
plot.discrepancy.R
|
plot.discrepancy <- function(x, legend.pos = NULL, ...) {
Fx <- x
if(!inherits(Fx, "Fx")) stop("Method is only for Fx objects!")
## Retrieve informations from Fx computed by calcFx()
Fxi <- Fx$Fx.mat
xi <- Fx$x
law <- Fx$law
statnames <- Fx$statnames
N <- Fx$N
## Now we plot these (xi,Fxi-xi) on the same graph
# we trace the 45 degree line
plot(c(0, 1), c(min(Fxi - xi), max(Fxi - xi)), type = "n", main = paste("P value discrepancy plots", "\n N = ", N, " & m = ", length(xi), sep = ""),
sub = paste("Data: ", law, sep = ""), xlab = "Nominal Size", ylab = "Size Discrepancy", ...)
lines(x = c(0, 1), y = c(0, 0), lty = "dotted", col = 1)
# then add theses P value discrepancy plots
for (i in 1:length(statnames)) {
points(xi, Fxi[, i] - xi, type = "l", col = i + 1, ...)
}
# legend
if (is.character(legend.pos)) legend(x = legend.pos, legend = statnames, col = 2:(length(statnames) + 1), lty = 1)
if (is.null(legend.pos)) {
legend.x <- 0.7
legend.y <- 0.9 * max(Fxi - xi)
legend(x = legend.x, y = legend.y, legend = statnames, col = 2:(length(statnames) + 1), lty = 1)
} else {
legend(x = legend.pos[1], y = legend.pos[2], legend = statnames, col = 2:(length(statnames) + 1), lty = 1)
}
}
|
91251469ac2175ac7e74a4af244356df3d8762dc
|
1671090de75c79510a1bb0c2af13694e588e899d
|
/kim_Rstudy/postcrawling.R
|
5e4ebe855b813cc332e45702ffa25d36c5b99479
|
[] |
no_license
|
sam351/TIL
|
c8d4dd7088003a22488cdaf8ad2a3b1192bc0b10
|
4c82ae553a4512bcc26658de9803b908a6d0b60a
|
refs/heads/master
| 2022-12-10T17:57:47.003905
| 2020-12-15T13:39:54
| 2020-12-15T13:39:54
| 226,819,223
| 0
| 0
| null | 2022-12-08T03:44:47
| 2019-12-09T08:15:03
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 225
|
r
|
postcrawling.R
|
library(httr)
library(rvest)
url <- "http://unico2013.dothome.co.kr/crawling/post.php"
res <- POST(url, encode = "form", body = list(name = "R", age = 27))
nodes <- html_nodes(read_html(res), "h1")
print(html_text(nodes))
|
635e238f01c08b40b213d2111ffe907a4836c3ec
|
4edb70f7ad1b1fb7aff633ec9a2c38e62ff2d9e3
|
/mcb/figures/src/fig1.R
|
0dbc420daa596d894135a40c1c91dc28032cb0cf
|
[] |
no_license
|
zhuchcn/egg_study
|
472a67f3b6f4f086d3c825cde650ecb60ab9aeb0
|
6f420d6aea901c183bd9bcd7f79056231ceab2ad
|
refs/heads/master
| 2020-03-23T04:09:31.828577
| 2019-10-01T22:38:26
| 2019-10-01T22:38:26
| 141,053,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,090
|
r
|
fig1.R
|
setwd(dirname(parent.frame(n = 2)$ofile))
source("global.R")
load("../data/data.rda")
# Figure 1A
# alpha diversity boxplot before and after egg and white
library(Metabase)
library(phyloseq)
otu = as_phyloseq(data$mcb$count$data$otu)
alpha_div = estimate_richness(otu, measures = "Shannon")
alpha_div$Timepoint = sample_data(otu)$Timepoint
alpha_div$Treatment = sample_data(otu)$Treatment
alpha_div$Subject = sample_data(otu)$StudyID
p1A = ggplot(alpha_div, aes(x = Timepoint, y = Shannon))+
geom_boxplot()+
geom_point(aes(color = Subject), size = point.size)+
geom_line(aes(group = Subject, color = Subject))+
scale_color_manual(values = colorRampPalette(pal_npg()(10))(19))+
facet_grid(cols = vars(Treatment))+
labs(title = "Shannon Diversity")+
theme_boxplot()+
theme(legend.position = "none")
# Figure 1B
# beta diversity PCoA plot with weighted unifrac
ps = as_phyloseq(data$mcb$percent$data$otu)
ps_tree = merge_phyloseq(ps, tree)
ps_ord = ordinate(ps_tree, "PCoA", "wunifrac")
ps_df = data.frame(ps_ord$vectors[,1:2],
StudyID = sample_data(ps_tree)$StudyID,
Treatment = sample_data(ps_tree)$Treatment,
Timepoint = sample_data(ps_tree)$Timepoint) %>%
mutate(StudyID = gsub("EG", "", StudyID))
p1B = ggplot(data = ps_df)+
geom_point(aes(x = Axis.1, y = Axis.2, color = StudyID), size = point.size)+
scale_color_manual(values = colorRampPalette(pal_npg()(10))(19)) +
guides(color = guide_legend(nrow = 2))+
labs(title = "Weighted Unifrac PCoA")+
theme_scatter()
legend = cowplot::get_legend(p1B)
p1B = p1B + theme(legend.position = "none")
# Figure 1C
# stacked taxonomy bar plot at phylum level before and after egg and white
row_sums = data.frame(
phylum = featureNames(data$mcb$percent$data$phylum),
value = rowMeans(conc_table(data$mcb$percent$data$phylum))
) %>%
arrange(desc(value)) %>%
mutate(
phylum = phylum %+% " [" %+% ifelse(
value > 10^-4, round(value *100, 2), "<0.01"
) %+% "%]"
) %>%
mutate(
phylum = factor(phylum, levels = phylum)
)
set.seed(19)
p1C = ggplot(row_sums, aes(x = "", y = value, fill = phylum))+
geom_bar(width = 1, stat = "identity")+
scale_fill_manual(
values = colorRampPalette(pal_npg()(10))(12)[sample(1:12,12)]
)+
coord_polar("y")+
guides(fill = guide_legend(title = ""))+
my_theme()+
theme(
axis.line = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
legend.text = element_text(size = title.size - 8),
legend.key.size = unit(0.4, "inches")
)
p1C
p = plot_grid(
plot_grid(
NULL, p1C, p1A, p1B, labels = c("A", "B", "C", "D"), ncol = 2,
label_size = title.size
),
legend,
nrow = 2,
rel_heights = c(1, 0.1)
)
ggsave("../png/fig1.png", p, width = 18, height = 16, units = "in", dpi = 300)
|
352ffa601c1a086c6351b5cdc19f71b95c264ea2
|
ff71205c6230bc579c380b2b38247934ec44074c
|
/Project 3/Question 1.R
|
97160aa648cfbf82592395b12d0b8871881afd40
|
[] |
no_license
|
lamawmouk/Data-Mining-II
|
a05f01ffc1a30267a7394b4753fb46de19803d94
|
a96231b5be0ba28d5c8393454ebb7f7ae174e9de
|
refs/heads/master
| 2023-01-08T16:58:51.847201
| 2020-11-08T04:43:39
| 2020-11-08T04:43:39
| 310,982,071
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,419
|
r
|
Question 1.R
|
# Delete all the objects in the workspace
rm(list=ls())
#Problem 1
setwd("C:/Users/Administrator/W_all_pt1/Desktop/hw3")
#load package which has nci data
#install.packages("ElemStatLearn") #package 'ElemStatLearn' is not available (for R version 3.6.3)
#library(ElemStatLearn)
load(("C:/Users/Administrator/W_all_pt1/Desktop/hw3/nci.RData"))
head(nci)
unique(colnames(nci))
#scale the nci data centre and divide by std i.e. z-score of columnns- everthing on same scale for SOM -
nci.scaled <- scale(t(nci))
#Fit an SOM
#install.packages("kohonen")
library(kohonen)
set.seed(123)
#Create the grid - can create it in the function grid
som_grid <- somgrid(xdim = 5, ydim = 5, topo = "hexagonal") #Set a grid size 5 by 5/ topology(hexqgonal/rectangular)
#fit the SOM using the grid
nci.som <- som(nci.scaled, grid = som_grid, rlen = 3000) # number of times run through iterations
names(nci.som)
codes <- nci.som$codes[[1]] # 5 by 5 have 25 V1 is a prototype/ this is stored in list structure
codes
unit<-nci.som$unit.classif
unit # First one is close to protoype V2=2
changes<-nci.som$changes
changes # give change of the code book vectors as go through the iterations and tell us it we are converging
?plot.kohonen
#Plot the SOM -defauclt is code- 5 by 5 hexogonal grid- all the variables and all code book vectors for rach protoype; scaled data and only magnitude
x11()
plot(nci.som, type="codes", main = "Nci Data")
x11()
plot(nci.som, type = "changes", main = "Nci Data")
x11()
plot(nci.som, type = "count", main = "Nci Data") # count tells us how many different items/cancer map into the different units
#Get impression of the distrubtion of the data points over where they map over the grid
x11() #circles are the prototypes
plot(nci.som, type = "mapping",label=row.names(nci.scaled),cex=0.5) #Like plot with colors ,but can count the dots- gives idea where points sit wrt to prototypes
#Look at a U matrix plot
coolBlueHotRed <- function(n, alpha = 1){rainbow(n, end=4/6, alpha = alpha)[n:1]}
x11() #add palette -not U-matrix it is dimension of SOM-gives impression of distance to nehgiobred- blue are closest ones- the green less/red are highly disimilar
plot(nci.som, type = "dist.neighbours", palette.name = coolBlueHotRed)
#Distane neighbour plot; know class labels=cluster=classes
# component plane plots #visualize orginal data over the SOM we fit
#for (i in 1:64){
#x11()# there are 64 varibales;type is property which is variable;property is codes; how varibales change as look thru map
#plot(nci.som, type = "property", property=codes[,i], main = colnames(codes)[i]) #column names of code matrix
#}
#Cluster the informations
d <- dist(codes) #create a distance matrix- d dissmialirty btw codes using euldeian distance
#Perform the clustering
hc <- hclust(d)
#Visualize the dendogram- derived from the dissimialrity between the code book vectors
x11()
plot(hc)
#Create an object called som_cluster by cutting the tree
som_cluster <- cutree(hc, h=130) # cut at the height
# plot the SOM with the found clusters
#there are three groups here -know that from dendogram
#create 3 clolors for background
my_pal <- c("red", "blue", "yellow")
#assign colors to the prototypes
my_bhcol <- my_pal[som_cluster]
x11()
plot(nci.som, type = "mapping", label=row.names(nci.scaled),cex= 0.5,col = "black", bgcol = my_bhcol)
#SOM can create boundaries
add.cluster.boundaries(nci.som, som_cluster)
|
5bfce59108234999f8cff9e6fa79cec4ad7c4636
|
bd0bb262fda22a09140f77b8eb6db5b5b951d895
|
/f_sim_pop_dyn.R
|
84bad77cff62e07ee0c0cb8ceddd55d9d46fa0c7
|
[] |
no_license
|
erikbsolbu/ecy22
|
470ae9a26be9f5bf70e92475a484e1d95274c9ce
|
871f18869f7538841ab9da4a1f12cc749cdbfaff
|
refs/heads/main
| 2023-04-08T10:37:06.115137
| 2022-03-24T12:03:04
| 2022-03-24T12:03:04
| 456,653,719
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,424
|
r
|
f_sim_pop_dyn.R
|
# Simulate discrete version of population dynamic model
# n_sp: number of species
# tmax: number of time points
# gamma: strength of density regulation
# ss2: species specific response to environmental variation
# sr2: variation in growth rate among species (heterogeneity)
# sE2: general response to environmental variation
# r0: mean growth rate
f_sim_pop_dyn <- function(n_sp = 100,
tmax = 50,
gamma = 0.01,
ss2 = 0.06,
sr2 = 0.00001,
sE2 = 0.04,
r0 = 0.1){
# Draw growth rate for each species
r <- rnorm(n_sp, r0, sqrt(sr2))
# Compute inital log abundance for each species, which is set equal to the
# mean log carrying capacity, lnK = r0 / gamma
x0 <- rep(r0 / gamma, n_sp)
# Draw general responses to environmental variation for each time step
sc <- rnorm(tmax, 0, sqrt(sE2))
# Set initial log abundance vector
x <- c()
x <- cbind(x, x0)
# For each time step:
for (i in 1:tmax){
# The next log abundance is
x <- cbind(x,
# The previous log abunadnce
x[, i] +
# Plus the density regulated growth rate
r - gamma * x[, i] +
# Plus species specific response to environmental variation
# (unique for each species)
rnorm(nrow(x), 0, sqrt(ss2)) +
# Plus general response to environmental variation
sc[i])
# If the new log abundance is less than zero, set the log abundance to zero
# The species is assumed to be extinct and there is no probability of
# returning to the community.
x[, i + 1] <- ifelse(x[, i + 1] < 0, 0, x[, i + 1])
}
# Name each column as a year
colnames(x) <- paste("y", 0:tmax, sep = "_")
# Transform the data to tibble format
x <- as_tibble(x) %>%
# Each row is a species
add_column(species = as.factor(1:nrow(x))) %>%
# Long format
pivot_longer(cols = -species,
values_to = "log_abundance",
names_to = "year") %>%
dplyr::select(-year) %>%
# Add new numeric year variable
group_by(species) %>%
mutate(year = 1:length(log_abundance)) %>%
ungroup()
# Return simulated log abundances
return(x)
}
|
fc4394d230670cb63af00022e0342c641cba5a7e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/officer/examples/docx_summary.Rd.R
|
d0c360310862f4872ab35db8a71c27aff1b1d100
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
docx_summary.Rd.R
|
library(officer)
### Name: docx_summary
### Title: get Word content in a tidy format
### Aliases: docx_summary
### ** Examples
example_pptx <- system.file(package = "officer",
"doc_examples/example.docx")
doc <- read_docx(example_pptx)
docx_summary(doc)
|
ffd999013d97e97f4b9da033bcef4243c92f05f8
|
8adaa7c9a5505cb3592846f9ee6c1351483e49df
|
/src/H1/extra.R
|
c5a8c7a2fdcf38a1b0ad8730e95ba1a7bfe50027
|
[] |
no_license
|
yyyliu/crowdsourced_data_analysis
|
c5a0f32d195964b877019c8bf28e0c8dcc98ba9b
|
ded93041cc4c66512ad4175b575c001e3b2db236
|
refs/heads/master
| 2023-01-31T14:28:44.071570
| 2020-12-15T00:22:37
| 2020-12-15T00:22:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 748
|
r
|
extra.R
|
# here are the code for computing model fit and uncertainty
# it works for lm only
# --- (O)
# cross validation
fit <- cross_validation(df, model, '{{DV}}')
nrmse = fit / (max(df${{DV}}) - min(df${{DV}}))
# wrangle results
result <- tidy(model, conf.int = TRUE) %>%
filter(term == '{{IV}}') %>%
add_column(NRMSE = nrmse)
# get predictions
disagg_fit <- pointwise_predict(model, df) %>%
select(
observed = '{{DV}}',
expected = fit
) %>%
# remove rows where model can't make a prediction
drop_na(expected)
# get uncertainty
uncertainty <- sampling_distribution(model, '{{IV}}') %>%
select(estimate = coef)
write_csv(disagg_fit, '../results/disagg_fit_{{_n}}.csv')
write_csv(uncertainty, '../results/uncertainty_{{_n}}.csv')
|
9abb8de96360434bf165a36938ff665e7c2b0b8c
|
ae364e75fa67793917d78042759ff1c951c2ca8c
|
/MSE_model_2.R
|
52554cd460e08eb143ea1560da04af6443aef469
|
[] |
no_license
|
joewbull/MSE_offsets
|
a89bd11c8abe78f291c9643dd690bcfa6fc43b07
|
d2d168fb2d968683196141048d375dc7bd0e2c28
|
refs/heads/master
| 2020-08-04T08:44:32.418316
| 2019-10-01T11:23:19
| 2019-10-01T11:23:19
| 212,077,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,993
|
r
|
MSE_model_2.R
|
####################################
## Management Strategy Evaluation ##
####################################
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
install.packages("scatterplot3d")
install.packages("plot3D")
install.packages("akima")
install.packages("rgl")
library(scatterplot3d)
library(plot3D)
library(akima)
library(rgl)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#SENSITIVITY ANALYSIS AKA TESTING OF DIFFERENT 'SCENARIOS'
iterations <- 1000
sensitivity.mat <- matrix(data=NA, nrow=iterations, ncol=12) #for however many iterations of the simulation run throughs; and on each row, 5 parameters, 2 continuous outcome metrics, and 2 binomial outcome metrics
colnames(sensitivity.mat) <- c("r","lambda","es.threshold","LC","CE","NNLb","NNLes","NNLbBIN","NNLesBIN","impacts","avoidance","offsets")
for(i in 1:iterations){
r <- runif(1,0.25,0.75) #this time, running with 50% uncertainty around US values. Originally, taking an approximate range from numbers given in the figure in Pianka (1970), but may need changing
log.lambda <- runif(1,0,2) #essentially means development impacts result in loss of all biology over a time period from between 0 and 100 years...but weighted towards 100 year development impacts
log.lambda <- log.lambda * -1
lambda <- 10^log.lambda
#lambda <- runif(1,0.0015,0.0045) #this is the sensitivity range assuming 50% uncertainty in US wetlands value
#es.threshold <- runif(1,2,98) #so the threshold can be anywhere in the range, from B and ES being directly related to ES almost not depending on B at all
#es.threshold <- round(es.threshold)
CE <- runif(1,5,90) #based on the criteria for Critical Habitat for different components in IFC PS6
#CE <- runif(1,85,95) #arbitrary - avoidance criteria between 5% and 30% of remaining biology
CE <- round(CE)
LC <- runif(1,CE,99) #offsets required for anything from just above avoidance threshold to everything above avoidance threshold
LC <- round(LC)
sensitivity.mat[i,1] <- r
sensitivity.mat[i,2] <- lambda
sensitivity.mat[i,3] <- es.threshold
sensitivity.mat[i,4] <- LC
sensitivity.mat[i,5] <- CE
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#LOOP TO CARRY OUT MONTE CARLO SIMULATION FOR TESTING STRATEGY
monte.carlo <- 50
mc.mat <- matrix(data=NA, nrow=0, ncol=0)
for(k in 1:monte.carlo){
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#EXTRA LOOP TO CREATE 3D PLOTS (BLOCK OUT FOR MONTE CARLO AND SENSITIVITY ANALYSIS)
#extra.loop <- 40
#extra.results.mat <- matrix(data=NA, nrow=extra.loop, ncol=extra.loop)
#lambda <- 0.0001
#log.10.lambda <- log(lambda, base = 10)
#for(k in 1:extra.loop){
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#LOOP TO CAPTURE PARAMETER BEHAVIOUR ACROSS DIFFERENT STRATEGIES
model.runs <- 96 #increase CE from 1 to 97 (keep below LC)
CE <- 1 #parameter being tested
## alternative tests
#model.runs <- 17
#min.com <- 0.0
#max.com <- 0.2
#model.runs <- 40
#r <- 0.0001
#log.10.r <- log(r, base = 10)
#es.threshold <- 1 #parameter being tested
#m <- 100/es.threshold
results.mat <- matrix(data=NA, nrow=model.runs, ncol=5)
colnames(results.mat) <- c("CE","AbsoluteB","NetB","MagDevOff","AbsoluteDev")
#colnames(results.mat) <- c("AvCom","AbsoluteB","NetB","MagDevOff","AbsoluteDev")
for(j in 1:model.runs){
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#SIMULATION MODEL
#set.seed(33) #for the stochastic version of the model, this function can be used to kee the 'random' number sequence constant
time <- 100 #100 for basic version, 50 for US
obs.mat <- matrix(data=NA, nrow=time, ncol=7)
colnames(obs.mat) <- c("time","biology","obs.biology","impacts","compliance","offsets","avoidance")
#PARAMETERS
#initial biology levels and parameters for a logistic growth function
biology <- 47 #47 for the US case study, basic version is 99
r <- 0.5 #block out when running sensitivity analysis, 0.5 for the US case study, basic value is 0.01 (in scenario 2 is 0.00)
K <- 100
#parameters for exponential biological decay function resulting from development impacts
lambda <- 0.003 #block out when running sensitivity analysis, 0.003 for the US case study, basic value is 0.01
delta.impacts <- 0
#parameter for offset store, and rate at which offsets are implemented
offset.store <- 0.0 #the offsets 'owed' to the system as a result of previous development impacts
offset.impl.rate <- 0.8 #the amount of the remaining offset store implemented every year. Basic value is 0.2, US value is high due to banking (say 0.8)
#parameters for linear relationship between biology and ES, up until a certain threshold value of biology (at which point es = 100)
es.threshold <- 80 #block out when running sensitivity analysis
m <- 100/es.threshold
c <- 0
#sd for the uncertainty distribution, observations made of the state of biology PLUS uncertainty in compliance with the rules
stdev.biology <- 10
min.com <- 0.3 #min degree of compliance with the rules set by the policymaker (0.3 basic case)
max.com <- 0.5 #max degree of compliance with the rules set by the policymaker (0.5 basic case)
av.com <- (min.com + max.com)/2
#rule making parameters
#block out when running sensitivity analysis
LC <- 99 #upper threshold, above which no mitigation is necessary ('Least Concern')
#CE <- 95 #lower threshold, above which offsets are required, below which avoidance is required ('Critically Endangered')
dummy1 <- 0 #dummy variable - ignore
#model looped for specified number of time steps
for(t in 1:time){
#-----------------------------------------------------------------------------#
# Operating model: biodiversity trends
#logistic growth in biodiversity (or subtract delta biology from biology a few lines down for logistic decline)
delta.biology <- r*biology*(1-(biology/K))
delta.biology <- rnorm(1,mean=delta.biology,sd=(0.2*delta.biology)) #stochasticity in biodiversity change
B1 <- biology #dummy variables to be used in calculation of ecosystem services (see also B2)
biology <- biology + delta.biology
#fix for problems when running logarithmic values of r and l (3D plots)
#if(biology>=100){biology <- 99.99}
#if(biology<=0){biology <- 0.01}
B2 <- biology
#-----------------------------------------------------------------------------#
# Observation model: monitoring of biodiversity and developer compliance
#when there is some uncertainty in observation of biology
obs.biology <- rnorm(1,mean=biology,sd=stdev.biology) #uncertainty in observed diversity
#records actual and observed diversity
obs.mat[t,1] <- t
obs.mat[t,2] <- biology
obs.mat[t,3] <- obs.biology
#measure of the degree of uncertainty in compliance with the rules
degree.compliance <- sample(seq(from = min.com, to = max.com, by = 0.05), size = 1, replace = TRUE) #compliance is somewhere between the chosen value of the parameters 'min.com' and 'max.com' (total compliance = 1)
#-----------------------------------------------------------------------------#
# Implementation model: Developer Impacts and Mitigation
#if no mitigation required, exponential decay of biology as a result of development
if(dummy1==0){
delta.impacts <- -1*lambda*biology
biology <- biology + delta.impacts + (offset.store*offset.impl.rate*degree.compliance) #biology is impacted by development and any required offsets from the store
offset.store <- offset.store - (offset.store*offset.impl.rate) #the store is reduced by the offsets specified (ignoring compliance)
obs.mat[t,6] <- 0
obs.mat[t,7] <- 0
}
#if offsets are required, development occurs but also implement any biodiversity offsets required from previous timestep
if(dummy1==1){
offset.store <- offset.store + (-1 * delta.impacts) #last years impacts are added to the offset store
delta.impacts <- -1*lambda*biology #this years development
biology <- biology + (offset.store*offset.impl.rate*degree.compliance) + delta.impacts
offset.store <- offset.store - (offset.store*offset.impl.rate)
obs.mat[t,6] <- offset.store*offset.impl.rate*degree.compliance #obs.mat records the offset that actually occurs
obs.mat[t,7] <- 0 #avoidance = 0
}
#if avoidance is required, development this year is not allowed to occur
if(dummy1==2){
delta.impacts <- -1*lambda*biology #impacts which would have occurred, but are to be avoided
delta.impacts <- (1-degree.compliance)*delta.impacts #the amount of impacts which do occur because of a lack of compliance
biology <- biology + delta.impacts + (offset.store*offset.impl.rate*degree.compliance) #biology is affected by the impacts caused due to a lack of compliance, and offsets from the store
offset.store <- offset.store - (offset.store*offset.impl.rate)
obs.mat[t,6] <- offset.store*offset.impl.rate*degree.compliance #offsets from the store
obs.mat[t,7] <- -1 * delta.impacts #obs.mat records the impacts that actually occur
}
obs.mat[t,4] <- delta.impacts
obs.mat[t,5] <- degree.compliance
#-----------------------------------------------------------------------------#
# Ecosystem services #NOTE - not being studied in this version of the model, but could be incorporated at a later date
#ecosystem services are tied to biology, and fall linearly once biology crosses a specified threshold
#if(biology>=es.threshold){ecosystem.services <- 100}
#if(biology<es.threshold){ecosystem.services <- 100 - (m * (es.threshold-biology))}
#or we can try a different version in which ES are a deriviative of B (in this case, rate of change of biology from t1 to t2)
if(t==1){ecosystem.services <- 100}
if(t>=2){ecosystem.services <- (B2 - B1)/1}
#obs.mat[t,5] <- ecosystem.services
#-----------------------------------------------------------------------------#
# Decision model: policymaker sets rules for next time step
#rules for implementing offsets or avoidance, based on remaining state of OBSERVED biology
if(obs.biology>LC){dummy1<-0}
if(obs.biology<LC){
if(obs.biology>CE){dummy1<-1}
if(obs.biology<CE){dummy1<-2}
}
#rules based on remaining benefits (ES)
#if(biology>es.threshold){dummy1<-0}
#if(biology<=es.threshold){dummy1<-1} #offset if biology ever falls below the ES threshold
#if(ecosystem.services > 0){dummy1<-0}
#if(ecosystem.services < 0){
# mag <- ecosystem.services*ecosystem.services
# if(mag<1){dummy1<-1}
# if(mag>1){dummy1<-2}
#}
#rules based on pressures (impacts)
#rules based on actions (avoidance and offsets)
#-----------------------------------------------------------------------------#
} #end of the time loop, main model
#outcomes at the end of the simulation, for the different types of metric. This enables sensitivity analysis
#state i.e. state of biology (NG = +ve)
biology.dev.cf <- (100*100*exp(r*time))/(K+(100*exp(r*time)-1)) #final value of biology if no development counterfactual had occurred, assuming the logistic growth function
nnl.outcome.biology <- obs.mat[time,2] - biology.dev.cf
#biology.dev.cf <- 100*exp(-1*lambda*time) #final value of biology if development only counterfactual had occurred, assuming the exponential decay function. We do not use this as this is not a sensible counterfactual (see Bull et al., 2014; Maron et al., in prep.)
#nnl.outcome.biology <- obs.mat[time,2] - biology.dev.cf
#benefit i.e. difference in es provision (NG = +ve)
#if(biology.dev.cf>=es.threshold){es.dev.cf <- 100}else{es.dev.cf <- m*biology.dev.cf}
#nnl.outcome.es <- obs.mat[time,5] - es.dev.cf
#mean.nnl.outcome.es <- mean(obs.mat[,5]) #may need to consider these too, because es provision is as much about provision during each time step as about the final provision
#cumulative.nnl.outcome.es <- sum(obs.mat[,5])
#compliance
average.compliance <- mean(obs.mat[,5])
#pressure i.e. total cumulative impacts on biodiversity
cumulative.impacts <- sum(obs.mat[,4])
#actions
cumulative.avoidance <- sum(obs.mat[,7])
cumulative.offsets <- sum(obs.mat[,6])
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
results.mat[j,1] <- CE
results.mat[j,2] <- obs.mat[time,2]
results.mat[j,3] <- nnl.outcome.biology
results.mat[j,4] <- cumulative.offsets #for US case study only
#results.mat[j,4] <- (-1 * cumulative.impacts) + cumulative.offsets
results.mat[j,5] <- (-1 * cumulative.impacts) #basic case
#extra.results.mat[j,k] <- nnl.outcome.biology
CE <- CE + 1
#min.com <- min.com + 0.05
#max.com <- max.com + 0.05
#log.10.r <- log.10.r + 0.1
#r <- 10^log.10.r
#es.threshold <- es.threshold + 1
#m <- 100/es.threshold
} #end of parameter test loop
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#r <- 0.0001
#log.10.r <- log(r, base = 10)
#log.10.lambda <- log.10.lambda + 0.1
#lambda <- 10^log.10.lambda
#} #end of extra (3D plot) loop (BLOCK OUT IF RUNNING MONTE CARLO OR SENSITIVITY ANALYSIS)
#write.csv(extra.results.mat, file="MSE_output_3Dplot.csv")
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
if(k==1){mc.mat <- results.mat}
if(k!=1){mc.mat <- cbind(mc.mat,results.mat)}
} #end of monte carlo loop
#process monte carlo simulation data
#process.mat <- matrix(data=0, nrow = model.runs, ncol = 4)
#colnames(process.mat) <- c("CE","AbsoluteB","NetB","MagDevOff")
#process.mat[,1] <- c(1:model.runs)
#process.mat[1,2] <- mean(mc.mat[1,2],mc.mat[1,6],mc.mat[1,10])
#testing <- as.matrix(subset(mc.mat, "AbsoluteB"))
#testing <- rowMeans(mc.mat, select="AbsoluteB")
#include.list <- "AbsoluteB"
#testing <- subset(mc.mat, select=c(AbsoluteB,AbsoluteB))
#testing <- apply(mc.mat, MARGIN="AbsoluteB", FUN=mean)
#testing <- dimnames(mc.mat)
write.csv(mc.mat, file="MSE_output_USoutcomes.csv")
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#place dependent variable results into matrix for sensitivity analysis
sensitivity.mat[i,6] <- nnl.outcome.biology
sensitivity.mat[i,7] <- nnl.outcome.es
sensitivity.mat[i,8] <- ifelse(nnl.outcome.biology>0, 1, 0)
sensitivity.mat[i,9] <- ifelse(nnl.outcome.es>0, 1, 0)
sensitivity.mat[i,10] <- cumulative.impacts
sensitivity.mat[i,11] <- cumulative.avoidance
sensitivity.mat[i,12] <- cumulative.offsets
} #end of loop, sensitivity analysis
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#PLOTS
#plot out the time series for last run of the model (check)
plot(obs.mat[,1],obs.mat[,2],type="l", xlab="time", ylab="biology", xlim=c(0,100), ylim=c(0,100))
plot(obs.mat[,1],obs.mat[,4],type="l", xlab="time", ylab="impacts")
plot(obs.mat[,1],obs.mat[,5],type="l", xlab="time", ylab="compliance")
plot(obs.mat[,1],obs.mat[,6],type="l", xlab="time", ylab="offsets")
plot(obs.mat[,1],obs.mat[,7],type="l", xlab="time", ylab="avoidance")
#plot out relationships between key parameters and dependent variables, where:
#1=r, 2=lambda, 3=es.threshold, 4=LC, 5=CE
#6=NNLb, 7=NNLes
#10=impacts, 11=avoidance, 12=offsets
plot(sensitivity.mat[,5],sensitivity.mat[,12])
abline(lm(sensitivity.mat[,5]~sensitivity.mat[,12]), col="red")
#plot results of parameter tests
plot(results.mat[,1], results.mat[,2], type="l", xlab="CE", ylab="Absolute Biodiversity")
plot(results.mat[,1], results.mat[,3], type="l", xlab="CE", ylab="Net Biodiversity")
plot(results.mat[,1], results.mat[,4], type="l", xlab="CE", ylab="Magnitude Development and Offsets")
#3 dimensional plots
#biology vs development vs avoidance threshold
scatterplot3d(sensitivity.mat[,1],sensitivity.mat[,2],sensitivity.mat[,5], xlab="logistic growth rate", ylab="exponential decay rate", zlab="avoidance threshold", angle = 45, grid=TRUE, pch=1)
scatterplot3d(sensitivity.mat[,1],sensitivity.mat[,2],sensitivity.mat[,6], xlab="logistic growth rate", ylab="exponential decay rate", zlab="NNLb", angle = 45, grid=TRUE, pch=1)
#surf3d(sensitivity.mat[,1],sensitivity.mat[,2],sensitivity.mat[,5])
#X <- sensitivity.mat[,1]
#Y <- sensitivity.mat[,2]
#Z <- sensitivity.mat[,5]
#surf3D(X,Y,Z)
#-----------------------------------------------------------------------------#
#Write out to CSV file
write.csv(results.mat, "offsets_CE_lpoint7.csv")
#-----------------------------------------------------------------------------#
#SENSITIVITY ANALYSES
sensitivity.DF <- as.data.frame(sensitivity.mat)
#continuous sensitivity analyses
sens.b.results <- glm(NNLb ~ r + lambda + es.threshold + LC + CE, family=poisson(link="log"), data=sensitivity.DF)
summary(sens.b.results)
sens.es.results <- glm(NNLes ~ r + lambda + es.threshold + LC + CE, family=poisson(link="log"), data=sensitivity.DF)
summary(sens.es.results)
#binomial sensitivity analyses
sens.b.results <- glm(NNLbBIN ~ r + lambda + es.threshold + LC + CE, family=binomial(link="logit"), data=sensitivity.DF)
summary(sens.b.results)
sens.es.results <- glm(NNLesBIN ~ r + lambda + es.threshold + LC + CE, family=binomial(link="logit"), data=sensitivity.DF)
summary(sens.es.results)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
#END
|
bf2f6e3d2940afae9c128fa3073b8826b3689fd0
|
6296202cdd3cefaa5f4ebebfcbe8feebe80a0308
|
/run_analysis.r
|
58f66813b8f8eff2942d9cca25a914d1d939644a
|
[] |
no_license
|
arousos/samsungData
|
62ede3ac7fb35ea32d7ae989d021da88e24c2dab
|
90c6f5ec8be8023c7c69f2033c743c236923b478
|
refs/heads/master
| 2020-04-06T04:14:55.209289
| 2015-04-26T17:19:50
| 2015-04-26T17:19:50
| 34,621,385
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,673
|
r
|
run_analysis.r
|
# Download and Extract the original data
zipURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(zipURL, destfile = "UCI HAR Dataset.zip", mode="wb")
dataXtest <- read.table(unzip("UCI HAR Dataset.zip", "UCI HAR Dataset/test/X_test.txt"))
dataYtest <- read.table(unzip("UCI HAR Dataset.zip", "UCI HAR Dataset/test/y_test.txt"))
dataStest <- read.table(unzip("UCI HAR Dataset.zip", "UCI HAR Dataset/test/subject_test.txt"))
dataXtrain <- read.table(unzip("UCI HAR Dataset.zip", "UCI HAR Dataset/train/X_train.txt"))
dataYtrain <- read.table(unzip("UCI HAR Dataset.zip", "UCI HAR Dataset/train/y_train.txt"))
dataStrain <- read.table(unzip("UCI HAR Dataset.zip", "UCI HAR Dataset/train/subject_train.txt"))
actiLabels <- read.table(unzip("UCI HAR Dataset.zip", "UCI HAR Dataset/activity_labels.txt"))
featLabels <- read.table(unzip("UCI HAR Dataset.zip", "UCI HAR Dataset/features.txt"))
# Apply original variable names to dataframe
colnames(dataXtest) <- featLabels$V2
colnames(dataXtrain) <- featLabels$V2
# Apply descriptive activity names to activity data
library(plyr)
actiTest <- join(dataYtest, actiLabels)
actiTrain <- join(dataYtrain, actiLabels)
# Combine activity and subject data with dataframe and apply descriptive column names
dataTest <- cbind(dataStest, actiTest$V2, dataXtest)
colnames(dataTest)[1] <- "subject"
colnames(dataTest)[2] <- "activity"
dataTrain <- cbind(dataStrain, actiTrain$V2, dataXtrain)
colnames(dataTrain)[1] <- "subject"
colnames(dataTrain)[2] <- "activity"
# Combine test and train data into single dataframe of all subjects
## This should fulfill criteria 1 in the assignment
dataFull <- rbind(dataTest, dataTrain)
# Subset the variables that are means and standard deviations and create a dataframe of just those variables
## This should fulfill criteria 2 in the assignment
vars <- names(dataFull)
varsSub <- grep("mean|std", vars)
dataSub <- dataFull[,varsSub]
# Bind back in the activity names to the subsetted dataframe
## This should fulfill critera 3 in the assignment
dataSub <- cbind(dataFull$subject, dataFull$activity, dataSub)
colnames(dataSub)[1] <- "subject"
colnames(dataSub)[2] <- "activity"
# Apply descriptive variable names to dataframe
## This should fulfill critera 4 in the assignment
names(dataSub) <- gsub("mean\\(\\)", "Mean", names(dataSub))
names(dataSub) <- gsub("std\\(\\)", "StandardDeviation", names(dataSub))
names(dataSub) <- gsub("mad\\(\\)", "MedianAbsoluteDeviation", names(dataSub))
names(dataSub) <- gsub("max\\(\\)", "MaximumValue", names(dataSub))
names(dataSub) <- gsub("min\\(\\)", "MinimumValue", names(dataSub))
names(dataSub) <- gsub("sma\\(\\)", "SignalMagnitudeArea", names(dataSub))
names(dataSub) <- gsub("energy\\(\\)", "EnergyMeasure", names(dataSub))
names(dataSub) <- gsub("iqr\\(\\)", "InterquartileRange", names(dataSub))
names(dataSub) <- gsub("entropy\\(\\)", "SignalEntropy", names(dataSub))
names(dataSub) <- gsub("arCoeff\\(\\)", "AutorregresionCoefficients", names(dataSub))
names(dataSub) <- gsub("correlation\\(\\)", "Correlation", names(dataSub))
names(dataSub) <- gsub("maxInds", "MaximumFrequencyMagnitudeIndex", names(dataSub))
names(dataSub) <- gsub("meanFreq\\(\\)", "MeanFrequency", names(dataSub))
names(dataSub) <- gsub("skewness\\(\\)", "DomainSignalSkewness", names(dataSub))
names(dataSub) <- gsub("kurtosis\\(\\)", "DomainSignalKurtosis", names(dataSub))
names(dataSub) <- gsub("bandsEnergy\\(\\)", "FrequencyIntervalEnergy", names(dataSub))
names(dataSub) <- gsub("angle", "Angle", names(dataSub))
names(dataSub) <- gsub("Gyro","Gyroscope", names(dataSub))
names(dataSub) <- gsub("Acc","Accelerometer", names(dataSub))
names(dataSub) <- gsub("Mag","Magnitude", names(dataSub))
names(dataSub) <- gsub("-","", names(dataSub))
# Melt the data to cast it, and we now have wide-form tidy data
## This should fulfill criteria 5 of the assignment, but we'll convert it to narrow tidy data to make it more compact
library(reshape2)
dataMelt <- melt(dataSub, id=names(dataSub[,1:2]), measure.vars=names(dataSub[,3:ncol(dataSub)]))
colnames(dataMelt)[3] <- "metric"
colnames(dataMelt)[4] <- "metricValue"
castData <- dcast(dataMelt, subject+activity~metric, mean)
# Or re-melt it for narrow-form tidy data
tidyMelt <- melt(castData, id=names(castData[,1:2]), measure.vars=names(castData[,3:ncol(castData)]))
colnames(tidyMelt)[3] <- "metric"
colnames(tidyMelt)[4] <- "metricValue"
# Order the tidied data to make it look nicer
library(dplyr)
tidyMelt <- arrange(tidyMelt, subject, activity)
# Export the tidy data for use
write.table(tidyMelt, file="samsungDataTidy.txt", sep = " ", row.names=FALSE)
|
e3df06c203326e8eab4a9afd272c38a8afbb7cb7
|
433947a5e18c57628d86ee9182fdba44f5ee6748
|
/assignment5/facebook/facebook.R
|
dce4e3060027d1b7b72a72e7f4d38695fb46edb9
|
[
"MIT"
] |
permissive
|
maturban/cs595-f13
|
e4210eed78a3fbdd30a8d56ae66f417620a82461
|
ba4f396bfb2412712c9d90d5015f1717e2725477
|
refs/heads/master
| 2021-01-16T20:35:55.697415
| 2013-12-06T05:17:00
| 2013-12-06T05:17:00
| 12,801,710
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,801
|
r
|
facebook.R
|
x <- c(1, 160)
y <- c(6, 4000)
plot(x,y, type="n", xlab="Friends", ylab="Number of Friends" ,log="y")
x1 <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152)
y1 <- c(6, 12, 14, 17, 20, 21, 23, 24, 26, 27, 30, 31, 31, 32, 32, 34, 34, 37, 37, 40, 43, 44, 45, 45, 48, 51, 53, 57, 58, 58, 60, 64, 66, 66, 67, 68, 68, 69, 75,
78, 78, 79, 80, 84, 85, 85, 86, 89, 90, 92, 93, 93, 93, 93, 94, 95, 99, 99, 103, 103, 108, 108, 109, 109, 113, 114, 115, 119, 120, 122, 123, 124, 128, 130, 132, 132,
134, 135, 135, 137, 139, 139, 140, 141, 142, 142, 143, 147, 150, 158, 159, 160, 160, 161, 162, 164, 165, 168, 169, 171, 171, 173, 174, 176, 177, 177, 184, 184, 186,
193, 196, 200, 202, 203, 205, 217, 219, 224, 225, 226, 227, 239, 239, 249, 251, 253, 269, 270, 276, 283, 295, 310, 319, 327, 328, 332, 332, 334, 364, 393, 496, 505,
541, 547, 568, 575, 619, 695, 831, 861, 1037, 3929)
lines(x1, y1, col='gray')
points(x1, y1, col='blue',pch=5,lwd=1)
# my information
x2 <- c(114)
y2 <- c(203)
points(x2, y2, type="b", lwd=5, lty=5, col='red', pch=3)
#title("Speedup vs Map Order", "")
legend(1,1000, c("Number of My Friends"), lwd=6, lty=8, col='red', pch=3);
mean(y1)
sd(y1)
median(y1)
|
1850801e04cafc581ac9594cad401f52820a897e
|
d58294d8a3635f8cfacd8b2755fda5fe3a189bc5
|
/Letter Recognition Using Support Vector Machines/DDA1710199_main.R
|
50619cd570148beae2755179a40d1615a046c5ca
|
[] |
no_license
|
sauravramuk/DA.Projects
|
eadbd7dc8f45db5075756dc976fd46d3d33d8f3a
|
8020aa0395b8fe854add087cb428989b7eb74bd8
|
refs/heads/master
| 2021-06-30T00:50:56.912278
| 2017-09-20T03:27:15
| 2017-09-20T03:27:15
| 101,907,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,838
|
r
|
DDA1710199_main.R
|
library(caret)
library(caTools)
library("kernlab")
library(dplyr)
digits.train <- read.csv("mnist_train.csv")
digits.test <- read.csv("mnist_test.csv")
#There are no NA or non numeric values for the records
colnames(digits.train)[1] <- "Digit"
colnames(digits.test)[1] <- "Digit"
#Function that tries to extract crude
#features from the raw pixel data. The features
#extracted are very simple, due to lack of knowledge in the domain,
#computational and time constraints :)
#df : data frame
#param1 : This is a value to control which pixels will be
#counted in the function. Higher values means darker pixels
#will be counted
generateFeatures <- function(df,param1){
#convert the dataframe into a matrix where each row is a datapoint (pixel data of an image)
temp.df.mat <- sapply(X = df, FUN = function(x) return(matrix(x, byrow = T, nrow =nrow(df) , ncol = 1)))
#run the function row-wise
temp.featureSet <- apply(X = temp.df.mat,
MARGIN = 1,
FUN = function(x){
#Each row is converted into a 28x28 matrix upon which feature extraction is performed
temp.mat <- matrix(as.vector(x[2:785]), nrow = 28, ncol = 28, byrow = T)
#this vector records the following:
#28 Vertical: count of pixels which have value
#higher than the threshold value set by param1, ROW-WISE
#28 Horizontal: count of pixels which have value
#higher than the threshold value set by param1, COLUMN-WISE
#1 for Height
#1 for Width
#1 for Height/Width ratio
x.y.axis.counter <- integer(59)
#this will go through each pixel and record them if their value
#is higher than the threshold set. (param1)
for(i in 1:28){
for(j in 1:28){
if(temp.mat[i,j]>param1){
x.y.axis.counter[j] <-x.y.axis.counter[j] + 1
x.y.axis.counter[28+i] <- x.y.axis.counter[28 + i] + 1
}
}
}
#Find digit width and height
a <- length(x.y.axis.counter)
b <- a - 56
#Flags that will be set false when the
#first non zero value is encountered
x.beg <- T
x.end <- T
y.beg <- T
y.end <- T
#to store index value when the first non
#zero value is encountered
x.beg.v <- integer(0)
x.end.v <- integer(0)
y.beg.v <- integer(0)
y.end.v <- integer(0)
#this loop will have 4 apporach, one from left & one from right,
#for both the horizontal and the vertical axis
for(i in 1:14){
if(x.beg & x.y.axis.counter[i] > 0){
x.beg.v <- i
x.beg <- F
}
if(x.end & x.y.axis.counter[29-i] > 0){
x.end.v <- 29-i
x.end <- F
}
if(y.beg & x.y.axis.counter[28 +i] > 0){
y.beg.v <- 28 + i
y.beg <- F
}
if(y.end & x.y.axis.counter[a - b - i] > 0){
y.end.v <- a-b-i
y.end <- F
}
if(!x.beg & !x.end & !y.beg & !y.end)
break()
#Critical check: its possible that because of the param1 value
#Counts in the "x.y.axis.counter" variable is not even registerd(Cases like
#edges of lines especially for the digit 1). When this is encountered the
#value is set as the index value
if(i==14){
if(x.beg)
x.beg.v <- i
if(x.end)
x.end.v <- i
if(y.beg)
y.beg.v <- i
if(y.end)
y.end.v <- i
}
}
#height
x.y.axis.counter[a-1] <- (y.end.v - y.beg.v)
#Width
x.y.axis.counter[a-2] <- (x.end.v - x.beg.v)
#Height to Width Ratio
x.y.axis.counter[a] <- (y.end.v - y.beg.v)/(x.end.v - x.beg.v)
#Return the vector
return(x.y.axis.counter)
}
)
#transpose
temp.featureSet <- t(temp.featureSet)
#convert to data frame
final.feature.df <- as.data.frame(temp.featureSet)
#combinig the dependent variable
final.feature.df <- cbind(Digit = df$Digit, final.feature.df)
#assiginig standard column names
colnames(final.feature.df) <- c("Digit",
"H1","H2","H3","H4","H5","H6","H7","H8","H9","H10","H11","H12","H13","H14",
"H15","H16","H17","H18","H19","H20","H21","H22","H23","H24","H25","H26","H27","H28",
"V28","V27","V26","V25","V24","V23","V22","V21","V20","V19","V18","V17","V16","V15",
"V14","V13","V12","V11","V10","V9","V8","V7","V6","V5","V4","V3","V2","V1",
"Width", "Height", "H.W.Ratio")
#return the final df
return(final.feature.df)
}
#Using the above function we try to extract features from the data sets
train.small <- generateFeatures(digits.train,150)
train.small$Digit <- as.character(train.small$Digit)
test.small <- generateFeatures(digits.test,150)
test.small$Digit <- as.character(test.small$Digit)
#Principal Component analysis to reduce the dimensionality
#using the bas prcomp function
train.small.pca <- prcomp(train.small[c(-1,-2,-30)], scale. = T)
names(train.small.pca)
#the rotation matrix generated
train.small.pca$rotation
#to see for the first 2 Principal components
biplot(train.small.pca, scale = 0)
#SD
train.small.pca$sdev
var_trn <- train.small.pca$sdev^2
var_trn <- var_trn/sum(var_trn)
var_trn <- var_trn*100
#plot to see what percentage of variance does each principal components explain
plot(var_trn, xlab = "Principal Component",
ylab = "Percentage of Variance Explained",
type = "b")
#plot to see the cumulative percentage explained by th PCs
plot(cumsum(var_trn), xlab = "Principal Component",
ylab = "Cumulative Percentage of Variance Explained",
type = "b")
#The first 45 PC explains 98.5 % of the variance in the data set
sum(var_trn[1:45])
#We will go ahead and select 45 PCs
#Train data
train.small <- as.data.frame(cbind(Digit = train.small$Digit, train.small.pca$x))
#train.small <- as.data.frame(train.small)
train.small <- train.small[,1:46]
temp. <- as.data.frame(sapply(X = train.small[,-1], FUN = function(x){
x <- as.numeric(as.character(x))
return(round(x,6))
}
))
train.small <- cbind(Digit = train.small$Digit, temp.)
#Testdata
test.small1 <- as.data.frame(predict(train.small.pca, newdata = test.small[,c(-1,-2,-30)]))
test.small1 <- test.small1[,1:45]
temp. <- as.data.frame(sapply(X = test.small1, FUN = function(x){
x <- as.numeric(as.character(x))
return(round(x,6))
}
))
test.small1 <- cbind(Digit = test.small$Digit, temp.)
#Need to sample a smaller dataset due to computational constraints
set.seed(10)
indices.train <- sample(1:nrow(train.small), 10000)
digit.small.train <- train.small[indices.train,]
set.seed(100)
indices.test <- sample(1:nrow(test.small1), 5000)
digit.small.test <- test.small1[indices.test,]
#Running a radial svm on a smaller data set
model1 <- ksvm(Digit~.,
data = digit.small.train,
scaled = F,
kernel = "rbfdot"
)
pr <- predict(object = svm.fit, digit.small.test)
conf.mat <- confusionMatrix(pr, digit.small.test$Digit)
#We need to find an optimised C and sigma value
#performing hypertuning and crossvalidation
#creating grid of sigma = 0.01 to 0.1 (10 values)
# C = 1 to 5 (5 values)
#folds = 6
#5000 records
#Need to sample a smaller dataset due to computational constraints
set.seed(10)
indices.train <- sample(1:nrow(train.small), 5000)
digit.small.train <- train.small[indices.train,]
set.seed(100)
indices.test <- sample(1:nrow(test.small1), 5000)
digit.small.test <- test.small1[indices.test,]
metrc <- "Accuracy"
trn.ctrl <- trainControl(method = "cv",number = 6)
tune.grd <- expand.grid(.sigma = seq(0.01, 0.1, 0.01), .C = seq(1,5,1))
svm.fit <- train(Digit~.,
data = digit.small.train,
method = "svmRadial",
tuneGrid = tune.grd,
trControl = trn.ctrl,
metric = "Accuracy"
)
#getting an overall accuracy of 86%, can be done better
plot(svm.fit)
print(svm.fit)
##RESULT(Best c and sigma):
# sigma C Accuracy Kappa
# 0.01 5 0.8594031 0.8437216
pr <- predict(object = svm.fit, digit.small.test)
conf.mat <- confusionMatrix(pr, digit.small.test$Digit)
#30000 Records
#Need to sample a smaller dataset due to computational constraints
set.seed(10)
indices.train <- sample(1:nrow(train.small), 30000)
digit.small.train <- train.small[indices.train,]
set.seed(100)
indices.test <- sample(1:nrow(test.small1), 9999)
digit.small.test <- test.small1[indices.test,]
#30000 records, c = 6,7,8,9, and c = 0.01, 0.02
metrc <- "Accuracy"
trn.ctrl <- trainControl(method = "cv",number = 6)
tune.grd <- expand.grid(.sigma = seq(0.01, 0.02, 0.01), .C = seq(7,9,1))
svm.fit <- train(Digit~.,
data = digit.small.train,
method = "svmRadial",
tuneGrid = tune.grd,
trControl = trn.ctrl,
metric = "Accuracy"
)
plot(svm.fit)
print(svm.fit)
pr <- predict(object = svm.fit, digit.small.test)
conf.mat <- confusionMatrix(pr, digit.small.test$Digit)
#trying with sigma = 0.01 and C = 9 with the whole datasset
set.seed(10)
indices.train <- sample(1:nrow(train.small), 59999)
digit.small.train <- train.small[indices.train,]
set.seed(100)
indices.test <- sample(1:nrow(test.small1), 9999)
digit.small.test <- test.small1[indices.test,]
#30000 records, c = 6,7,8,9, and c = 0.01, 0.02
metrc <- "Accuracy"
trn.ctrl <- trainControl(method = "cv",number = 6)
tune.grd <- expand.grid(.sigma = c(0.01), .C = c(9))
svm.fit <- train(Digit~.,
data = digit.small.train,
method = "svmRadial",
tuneGrid = tune.grd,
trControl = trn.ctrl,
metric = "Accuracy"
)
plot(svm.fit)
print(svm.fit)
pr <- predict(object = svm.fit, digit.small.test)
conf.mat <- confusionMatrix(pr, digit.small.test$Digit)
#using crude feature extraction, Principal Component Analysis and
#Support vector model an Accuracy of 91.15 % was achieved.
#sigma = 0.01 and C = 9
# Accuracy : 0.9115
# 95% CI : (0.9058, 0.917)
# No Information Rate : 0.1135
# P-Value [Acc > NIR] : < 2.2e-16
#
# Kappa : 0.9016
|
1b2fda07a14ecd2f53363fd1991061e40bf2803b
|
4e180e6b0753c9bf344891866eb3b8e26b1ce56d
|
/code_old_reference/regen_analysis_zscore_lme_3.R
|
d62fbe9961a4cda00b1ccb9a52a5b41f97e4e238
|
[] |
no_license
|
youngdjn/postfire-regen
|
631b1a85df9d449e918475360166d599766d7992
|
7e561bfdf4003a04b3ae3654ca0f2962cf91a1fe
|
refs/heads/master
| 2023-03-18T01:37:18.722791
| 2023-03-08T05:13:48
| 2023-03-08T05:13:48
| 75,105,347
| 0
| 0
| null | 2017-07-25T20:23:47
| 2016-11-29T17:29:47
|
R
|
UTF-8
|
R
| false
| false
| 25,772
|
r
|
regen_analysis_zscore_lme_3.R
|
setwd("C:/Users/DYoung/Dropbox/Research projects/Post-fire regen shared")
source("R/regen_utils_2_zscore.R")
library(arm)
library(plyr)
library(MuMIn)
library(snow)
library(lme4)
library(ggplot2)
library(gridExtra)
d <- read.csv("Data/regen_clim_full.csv",header=TRUE)
d$FORB <- d$FORBE
d$firesev <- d$FIRE_SEV # use field-assessed fire sev
d <- d[d$firesev %in% c(4,5),] # only medium-high and high severity
d$firesev <- as.factor(d$firesev)
d <- d[!(d$Fire %in% c("ELK","CHINABACK","ZACAM","SHOWERS")),] #,"DEEP","STRAYLOR","HARDING","MOONLIGHT","ANTELOPE")),] # rich has some uncertainty in fire and sampling year could be added later; just removed because of uncertainty in fire and sampling year
# kevin says deep, straylor, harding, showers were anomalous (showers was already removed, not sure why)
# also removing moonlight, antelope because so dry
d$Fire <- as.factor(d$Fire)
# ## remove plots with over 2000 mm precip (most BTU plots)
#d <- d[d$ppt.normal < 2000,]
d$seedtr.comb <- ifelse((is.na(d$seedtr.dist) | (d$seedtr.dist >= 150)),d$dist.to.low,d$seedtr.dist)
d$seedtr.any.comb <- ifelse(is.na(d$seed.tree.any) | (d$seed.tree.any >= 150),d$dist.to.low,d$seed.tree.any)
d$regen.presab <- ifelse(d$regen.count > 0,TRUE,FALSE)
## calculate northness and eastness
d$northness <- cos(deg2rad(d$aspect))
d$eastness <- sin(deg2rad(d$aspect))
d$ppt.normal.sq <- d$ppt.normal^2
d$tmean.normal.sq <- d$tmean.normal^2
#(optional) thin to Sierra fires
sierra.fires <- c("STRAYLOR","CUB","RICH","DEEP","MOONLIGHT","ANTELOPE","BTU LIGHTENING","HARDING","BASSETTS","PENDOLA","AMERICAN RIVER","RALSTON","FREDS","SHOWERS","POWER")
d$in.sierra <- ifelse(d$Fire %in% sierra.fires,TRUE,FALSE)
d <- d[d$in.sierra,]
##!!!!!!!!! need to thin to 5-year post plots
d <- d[d$survey.years.post == 5,]
# d.a <- d[d$species == "PSME",]
# write.csv(d.a,"welch_plots.csv")
#### Summary plots of climate distribution ####
#early vs. late average precip: plots that were dry during the first three years were wet during the last two years
ggplot(d,aes(x=diff.norm.ppt.z,y=diff.norm.ppt.z.late,color=Fire)) +
geom_point(size=5) +
theme_bw(20) +
labs(x="Average precip anomaly first 3 years",y="Average precip anomaly last 2 years")
#early vs. late minimum precip: not very much correlation here
ggplot(d,aes(x=diff.norm.ppt.min.z,y=diff.norm.ppt.min.z.late,color=Fire)) +
geom_point(size=5) +
theme_bw(20) +
labs(x="Minimum precipitation anomaly first 3 years",y="Minimum precip anomaly last 2 years")
#early minimum precip vs. normal climate: #!may want to leave out Rich
ggplot(d,aes(x=diff.norm.ppt.min.z,y=ppt.normal,color=Fire)) +
geom_point(size=5) +
theme_bw(20) +
labs(x="Minimum precipitation anomaly first 3 years (SD from mean)",y="Normal precipitation (mm)")
#early normal temp vs. normal precip: #!may want to leave out Rich
ggplot(d,aes(x=tmean.normal,y=ppt.normal,color=Fire,shape=Fire)) +
geom_point(size=5) +
theme_bw(20) +
scale_shape_manual(values=LETTERS[1:13]) +
labs(x="Normal tmean",y="Normal precipitation (mm)")
#elevation vs. normal precip: #!may want to leave out Rich
ggplot(d,aes(x=elev.m,y=ppt.normal,color=Fire,shape=Fire)) +
geom_point(size=5) +
theme_bw(20) +
scale_shape_manual(values=LETTERS[1:13]) +
labs(x="Elev (m)",y="Normal precipitation (mm)")
#early average precip vs. normal
ggplot(d,aes(x=diff.norm.ppt.z,y=ppt.normal,color=Fire)) +
geom_point(size=5) +
theme_bw(20) +
labs(x="Average precip anomaly first 3 years",y="Normal precip")
summary(d$ppt.normal)
summary(d$diff.norm.ppt.min.z)
#early minimum precip vs. early average precip
ggplot(d,aes(x=diff.norm.ppt.min.z,y=diff.norm.ppt.z,color=Fire)) +
geom_point(size=5) +
theme_bw(20) +
labs(x="Minimum precip anomaly first 3 years",y="Average precip anomaly first 3 years")
#early minimum precip vs. late average precip: no correlatio here but #! may want to leave out Rich
ggplot(d,aes(x=diff.norm.ppt.min.z,y=diff.norm.ppt.z.late,color=Fire)) +
geom_point(size=5) +
theme_bw(20) +
labs(x="Minimum precipitation anomaly first 3 years (SD from mean)",y="Average precipitation anomaly in the last 2 years (SD from mean)")
### Calc percentage of plots with hardwoods by fire
d.hw <- d[d$species=="HARDWOOD",]
prop.hw <- aggregate(d.hw$regen.presab,by=list(d.hw$Fire),FUN=mean)
names(prop.hw) <- c("Fire","Prop.hw")
d.conif <- d[d$species=="CONIFER",]
prop.conif <- aggregate(d.conif$regen.presab,by=list(d.conif$Fire),FUN=mean)
names(prop.conif) <- c("Fire","Prop.conif")
prop <- cbind(prop.hw,prop.conif$Prop.conif)
names(prop) <- c("Fire","prop.hw","prop.conif")
#### fire-level summaries ####
library(reshape2)
focal.vars <- c("GRASS","SHRUB","diff.norm.ppt.min.z","diff.norm.ppt.z","northness","seedtr.any.comb","ppt.normal","regen.count","regen.presab")
fire.mean <- aggregate(d[,focal.vars],by=list(d$Fire,d$species),FUN=mean,na.rm=TRUE)
fire.high <- aggregate(d[,focal.vars],by=list(d$Fire,d$species),quantile,probs=.75,na.rm=TRUE)
fire.low <- aggregate(d[,focal.vars],by=list(d$Fire,d$species),quantile,probs=.25,na.rm=TRUE)
names(fire.mean)[1:2] <- c("Fire","species")
names(fire.high)[1:2] <- c("Fire","species")
names(fire.low)[1:2] <- c("Fire","species")
fire.mean.conif <- fire.mean[fire.mean$species=="CONIFER",]
fire.high.conif <- fire.high[fire.high$species=="CONIFER",]
fire.low.conif <- fire.low[fire.low$species=="CONIFER",]
fire.mean.hw <- fire.mean[fire.mean$species=="HARDWOOD",c("regen.count","regen.presab")]
fire.high.hw <- fire.high[fire.high$species=="HARDWOOD",c("regen.count","regen.presab")]
fire.low.hw <- fire.low[fire.low$species=="HARDWOOD",c("regen.count","regen.presab")]
names(fire.mean.hw) <- paste(names(fire.mean.hw),"hw",sep=".")
names(fire.high.hw) <- paste(names(fire.high.hw),"hw",sep=".")
names(fire.low.hw) <- paste(names(fire.low.hw),"hw",sep=".")
fire.nplots <- aggregate(d[,focal.vars[1]],by=list(d$Fire,d$species),FUN=length)
fire.nplots <- fire.nplots[fire.nplots$Group.2=="ABCO","x"]
fire.mean.full <- cbind(fire.mean.conif,fire.mean.hw)
fire.low.full <- cbind(fire.low.conif,fire.low.hw)
fire.high.full <- cbind(fire.high.conif,fire.high.hw)
firenames <- fire.mean.full$Fire
mean.q <- fire.mean.full[,3:ncol(fire.mean.full)]
low.q <- fire.low.full[,3:ncol(fire.low.full)]
high.q <- fire.high.full[,3:ncol(fire.high.full)]
# mean.q <- signif(mean.q,4)
# low.q <- signif(low.q,4)
# high.q <- signif(high.q,4)
mean.q <- as.matrix(round(mean.q,2))
low.q <- as.matrix(round(low.q,2))
high.q <- as.matrix(round(high.q,2))
mean.q <- (signif(mean.q,4))
low.q <- (signif(low.q,4))
high.q <- (signif(high.q,4))
bounds <- paste("(",low.q,",",high.q,")",sep="")
full <- paste(mean.q,bounds)
full <- matrix(full,nrow=nrow(mean.q))
rownames(full) <- firenames
colnames(full) <- colnames(mean.q)
full <- as.data.frame(full)
full$nplots <- fire.nplots
write.csv(full,"fire_summary.csv")
# ## optional: remove BTU, AmRiv, Cub, Rich as potential confounders
# remove.fires <- c("BTU LIGHTENING","AMERICAN RIVER","CUB","RICH")
# d$keep.fires <- ifelse(!(d$Fire %in% remove.fires),TRUE,FALSE)
# d <- d[d$keep.fires,]
#### Prepare to fit models and make counterfactual plots ####
#rescale parameters
d$regen.presab <- ifelse(d$regen.count > 0,TRUE,FALSE)
d$ppt.post.sq <- d$ppt.post^2
d$ppt.post.min.sq <- d$ppt.post.min^2
## center and standardize
leave.cols <- c("regen_presab","regen_count","Regen_Plot","Fire","Year","survey.years.post","SHRUB","FORB","GRASS","diff.norm.ppt.min.z","diff.norm.ppt.z","diff.norm.ppt.z.late")
d.c <- center.df(d,leave.cols=leave.cols)
stan_d1_mean <- mean.df(d,leave.cols=leave.cols)
stan_d1_sd <- sd.df(d,leave.cols=leave.cols)
d.c.focal <- d.c #alternate
# # aggregate by fire and look for correlation among predictors (potential for confounding)
# d.agg <- aggregate(d.c.focal,by=list(d.c.focal$Fire),FUN=mean)
# d.year <- data.frame(Fire=d$Fire,Year=d$Year)
# d.year <- unique(d.year)
# d.agg <- merge(d.agg,d.year,by.x="Group.1",by.y="Fire")
# d.agg$in.sierra <- ifelse(d.agg$Group.1 %in% sierra.fires,1,2)
# plot(diff.norm.ppt.z_c~diff.norm.tmean.z_c,col=in.sierra,d=d.agg)
# plot(diff.norm.ppt.min.z_c~Year,col=in.sierra,d=d.agg)
# d.agg$fire.year <- paste(d.agg$Group.1,d.agg$Year.x-5)
# library(ggplot2)
# library(directlabels)
# p <- ggplot(d.agg, aes(x=diff.norm.ppt.z_c,y=diff.norm.ppt.min.z_c,label=fire.year))+
# geom_point(size=5) +
# geom_text(check_overlap=TRUE) +
# scale_x_continuous(limits=c(-2.5,1.5))
# p + direct.label(p, 'last.points')
# m <- lm(diff.norm.ppt.z_c~diff.norm.ppt.min.z_c,d=d.agg)
#
#
#
# ### figure out precip counterfactual bins
# d.c.btu <- d.c[d.c$Fire == "BTU LIGHTENING",]
# b1.upper <- max(d.c.btu$ppt.normal_c)
#
# b1.b2.boundary <- max(d.c.btu$ppt.normal_c)
#
# d.c.rich <- d.c[d.c$Fire == "RICH",]
# gap.upper <- min(d.c.rich$ppt.normal_c)
#
# d.c.moon <- d.c[d.c$Fire == "MOONLIGHT",]
# gap.lower <- max(d.c.moon$ppt.normal_c)
#
# b2.b3.boundary <- mean(c(gap.upper,gap.lower))
#
# d.c.str <- d.c[d.c$Fire == "STRAYLOR",]
# b3.bottom <- min(d.c.str$ppt.normal_c)
#
# ## bounds are -1.59, -0.5, 1.252, 3.06
#
# ppt.level1 <- mean(c(b1.upper,b1.b2.boundary))
# ppt.level2 <- mean(c(b2.b3.boundary,b1.b2.boundary))
# ppt.level3 <- mean(c(b2.b3.boundary,b3.bottom))
#
## alternative defs
b1.low <- 900
b1.b2.boundary <- 1500
b2.high <- 2000
b1.mid <- 1200
b2.mid <- 1800
#Plot the two climatic (weather) predictors on the transformed scale
ggplot(d.c,aes(x=diff.norm.ppt.min.z,y=ppt.normal_c,color=Fire)) +
geom_point(size=5) +
theme_bw(20) +
labs(x="Minimum precip anomaly first 3 years",y="Average precip anomaly first 3 years")
#### Fit model and make counterfactual plots ####
p <- list() #for holding counterfactual plots for each vegetation type
coef.df <- data.frame() # for holding coefficient summaries
types <- c("CONIFER","HARDWOOD","SHRUB","GRASS")
for(type in types) {
if(type=="HARDWOOD" | type=="CONIFER") {
pres.ab <- TRUE
d.sp <- d[d$species==type,]
} else {
pres.ab <- FALSE
d.sp <- d[d$species=="ALL",] #species is irrelevant here; just need to reduce to one row per plot
}
#center and standardize
leave.cols <- c("regen_presab","regen_count","Regen_Plot","Fire","Year","survey.years.post","SHRUB","FORB","GRASS","diff.norm.ppt.z.late","diff.norm.ppt.z","diff.norm.ppt.min.z")
d.c.sp <- center.df(d.sp,leave.cols=leave.cols)
d.c.sp.focal <- with(d.c.sp,data.frame(regen.presab,northness_c,ppt.normal_c,ppt.normal.sq_c,ppt.post_c,diff.norm.ppt_c,ppt.post.min_c,ppt.post.sq_c,ppt.post.min.sq_c,tmean.normal_c,tmean.normal.sq_c,diff.norm.ppt.z,diff.norm.ppt.min.z,Fire,seedtr.any.comb_c,eastness_c,diff.norm.tmean.JJA.mean.z_c,diff.norm.tmean.DJF.mean.z_c,diff.norm.tmean.z_c,SHRUB,FORB,GRASS,Year,diff.norm.ppt.z.late))
d.c.sp.incomplete <- d.c.sp.focal[!complete.cases(d.c.sp.focal),]
d.c.sp.focal <- d.c.sp.focal[complete.cases(d.c.sp.focal),]
## get normal precip standardization params
ppt.norm.mean <- mean(d.sp$ppt.normal)
ppt.norm.sd <- sd(d.sp$ppt.normal)
#center and standardize precip boundaries
b1.low.c <- (b1.low-ppt.norm.mean)/ppt.norm.sd
b1.b2.boundary.c <- (b1.b2.boundary-ppt.norm.mean)/ppt.norm.sd
b2.high.c <- (b2.high-ppt.norm.mean)/ppt.norm.sd
b1.mid.c <- (b1.mid-ppt.norm.mean)/ppt.norm.sd
b2.mid.c <- (b2.mid-ppt.norm.mean)/ppt.norm.sd
### Prepare percent cover data ###
hist(d.c.sp.focal$SHRUB,nclass=30)
hist(d.c.sp.focal$FORB)
hist(d.c.sp.focal$GRASS)
d.c.sp.focal$shrub.prop <- d.c.sp.focal$SHRUB/100
d.c.sp.focal$grass.prop <- d.c.sp.focal$GRASS/100 # could do same for FORB
d.c.sp.focal$shrub.prop <- ifelse(d.c.sp.focal$shrub.prop==0,0.005,d.c.sp.focal$shrub.prop)
d.c.sp.focal$shrub.prop <- ifelse(d.c.sp.focal$shrub.prop>=1,0.995,d.c.sp.focal$shrub.prop)
d.c.sp.focal$grass.prop <- ifelse(d.c.sp.focal$grass.prop==0,0.005,d.c.sp.focal$grass.prop)
d.c.sp.focal$grass.prop <- ifelse(d.c.sp.focal$grass.prop==1,0.995,d.c.sp.focal$grass.prop)
d.c.sp.focal$SHRUB <- ifelse(d.c.sp.focal$SHRUB == 0,0.5,d.c.sp.focal$SHRUB)
d.c.sp.focal$GRASS <- ifelse(d.c.sp.focal$GRASS == 0,0.5,d.c.sp.focal$GRASS)
d.c.sp.focal$shrub.l <- logit(d.c.sp.focal$shrub.prop)
d.c.sp.focal$grass.l <- logit(d.c.sp.focal$grass.prop)
# calculate what proportion of plots have the focal species
#sum(d.c.sp.focal$regen.presab)/nrow(d.c.sp.focal)
m.full <- NULL
if(pres.ab) {
if(type=="CONIFER") {
m.full <- glmer(regen.presab ~
seedtr.any.comb_c +
#seedtr.comb_c +
northness_c +
#ppt.normal_c * diff.norm.ppt.z +
ppt.normal_c * diff.norm.ppt.min.z +
ppt.normal.sq_c +
(1|Fire),
data=d.c.sp.focal, family=binomial, na.action="na.fail")
} else if(type=="HARDWOOD") {
m.full <- glmer(regen.presab ~
#seedtr.any.comb_c +
#seedtr.comb_c +
northness_c +
#ppt.normal_c * diff.norm.ppt.z +
ppt.normal_c * diff.norm.ppt.min.z +
ppt.normal.sq_c +
(1|Fire),
data=d.c.sp.focal, family=binomial, na.action="na.fail")
}
} else { #this is a % cover model
if(type=="SHRUB") {
m.full <-lmer(shrub.l ~
northness_c +
ppt.normal_c * diff.norm.ppt.z +
ppt.normal_c * diff.norm.ppt.min.z +
ppt.normal.sq_c +
#tmean.normal_c +
(1|Fire),
data=d.c.sp.focal, na.action="na.fail", REML=FALSE)
} else if(type=="GRASS") {
m.full <-lmer(grass.l ~
northness_c +
ppt.normal_c * diff.norm.ppt.z +
ppt.normal_c * diff.norm.ppt.min.z +
ppt.normal.sq_c +
#tmean.normal_c +
(1|Fire),
data=d.c.sp.focal, na.action="na.fail", REML=FALSE)
}
}
cl <- makeCluster(4, type = "SOCK")
clusterEvalQ(cl, library(lme4))
clusterExport(cl,"d.c.sp.focal")
a <- pdredge(m.full,cluster=cl, fixed=c("ppt.normal_c","diff.norm.ppt.min.z"),
subset=
#(!(`seedtr.any.comb_c`&`seedtr.comb_c`)) &
#(!(`diff.norm.tmean.z_c` & `diff.norm.tmean.JJA.mean.z_c`)) &
#(!(`diff.norm.tmean.z_c` & `diff.norm.tmean.DJF.mean.z_c`)) &
(!(`ppt.normal.sq_c` & !`ppt.normal_c`))
#!(`tmean.normal.sq_c` & !`tmean.normal_c`))
)
stopCluster(cl)
imp <- data.frame(var=names(importance(a)),imp=importance(a))
best.model.num <- as.character(row.names(a[1,]))
best.model <- get.models(a,best.model.num)[[1]]
#best.model <- m.full
print(summary(best.model))
#best.model.tree <- best.model
best.coef <- fixef(best.model)
best.coef <- data.frame(var=names(best.coef),coef=best.coef)
#imp.coef <- merge(imp,best.coef,by="var",all.x=TRUE)
#write.csv(imp.coef,"mumin_out.csv",row.names=FALSE)
# mod.names <-row.names(a)
# mod.weights <- a[,"weight"]
N <- 1000 #number of samples to take
nsims <- N
coef.sim <- data.frame()
# # get all fit models (this section for multi-model inference only)
# models <- list()
#
# for(i in 1:length(mod.names)) {
#
# mod.name <- mod.names[i]
# models[[mod.name]] <- get.models(a,mod.name)[[1]]
#
# }
# this section for multi-model averaging
# for(i in 1:N) {
#
# mod.name <- sample(mod.names,prob=mod.weights,replace=TRUE,size=1)
# mod.name <- mod.names[1] # if using only the best model (otherwise comment out for multi-model averaging)
# model <- models[[mod.name]]
# coef.sim.row <- fixef(sim(model,n.sims=1)) # change to get just one simulation, store in matrix
# coef.sim.row <- as.data.frame(coef.sim.row)
# coef.sim <- rbind.fill(coef.sim,coef.sim.row)
#
# }
# simulate coef for best model. this section for using best-model only
coef.sim <- fixef(sim(best.model,n.sims=nsims))
coef.sim[is.na(coef.sim)] <- 0
coef.mean <- apply(coef.sim,2,mean)
coef.low <- apply(coef.sim,2,quantile,prob=0.025)
coef.high <- apply(coef.sim,2,quantile,prob=0.975)
coef.mean <- signif(coef.mean,3)
coef.low <- signif(coef.low,3)
coef.high <- signif(coef.high,3)
coef.summary <- rbind(coef.mean,coef.low,coef.high)
coef.bounds <- paste("(",coef.low,",",coef.high,")",sep="")
coef.full <- paste(coef.mean,coef.bounds)
names(coef.full) <- names(coef.mean)
coef.full <- as.data.frame(t(coef.full))
coef.full$type <- type
coef.df <- rbind.fill(coef.df,coef.full)
# could adapt for plotting coef plots (would require standardized coefs to interpret)
# bin.full.num <- as.data.frame(rbind(bin.median,bin.lwr,bin.upr))
# bin.full.num$val <- c("median","lower","upper")
# bin.full.num$year <- year
# bin.coef.num.df <- rbind(bin.coef.num.df,bin.full.num)
################################
#### Make predictions ##########
################################
#link ppt.normal to ppt.sq
ppt.link <- data.frame(ppt.normal_c=d.c.sp.focal$ppt.normal_c,ppt.normal.sq_c=d.c.sp.focal$ppt.normal.sq_c)
m <- lm(ppt.normal.sq_c ~ ppt.normal_c + I(ppt.normal_c^2),ppt.link)
ppt.link.a <- coef(m)[1]
ppt.link.b <- coef(m)[2]
ppt.link.b2 <- coef(m)[3]
# -0.1721 + 0.8811x + 0.1722x^2
# prediction matrix (newdata)
#determine appropriate "counterfactual" values for predictors
summary(d.c.sp.focal$diff.norm.ppt.min.z) # -1.475 to 2.015
summary(d.c.sp.focal$ppt.normal_c) # -1.5 to 3
summary(d.c.sp.focal$seedtr.any.comb_c) #-1 to 4.9
hist(d.c.sp.focal$seedtr.any.comb_c) #-1 to 4.9
hist(d.c.sp.focal$northness_c) #-1 to 4.9
# set "counterfactual" values for predictors #-1.045, 0.376, 2.156
ppt.normal.c.low <- b1.mid.c
ppt.normal.c.mid <- b2.mid.c
plot(ppt.normal_c~diff.norm.ppt.min.z,col=Fire,data=d.c.sp.focal)
#get range of min precip anomaly for each ppt value
d.c.sp.focal.lowprecip <- d.c.sp.focal[(d.c.sp.focal$ppt.normal_c > b1.low.c) & (d.c.sp.focal$ppt.normal_c < b1.b2.boundary.c),]
low.min <- min(d.c.sp.focal.lowprecip$diff.norm.ppt.min.z)
low.max <- max(d.c.sp.focal.lowprecip$diff.norm.ppt.min.z)
d.c.sp.focal.midprecip <- d.c.sp.focal[(d.c.sp.focal$ppt.normal_c < b2.high.c) & (d.c.sp.focal$ppt.normal_c > b1.b2.boundary.c),]
mid.min <- min(d.c.sp.focal.midprecip$diff.norm.ppt.min.z)
mid.max <- max(d.c.sp.focal.midprecip$diff.norm.ppt.min.z)
#early minimum precip vs. normal climate: #!may want to leave out Rich
ggplot(d.c.sp.focal,aes(x=diff.norm.ppt.min.z,y=ppt.normal_c,color=Fire)) +
geom_point(size=5) +
theme_bw(20) +
labs(x="Minimum precipitation anomaly first 3 years (SD from mean)",y="Normal precipitation (mm)")
diff.norm.min.seq <- seq(from=low.min,to=low.max,length.out=100)
pred.df.lowprecip <- data.frame(
diff.norm.ppt.min.z = diff.norm.min.seq,
ppt.normal_c =ppt.normal.c.low,
ppt.normal.sq_c = ppt.link.a + ppt.link.b*ppt.normal.c.low + ppt.link.b2*(ppt.normal.c.low^2),
northness_c= 0,
diff.norm.ppt.z = 0,
seedtr.comb_c = 0,
seedtr.any.comb_c = 0,
tmean.normal_c = 0
)
diff.norm.min.seq <- seq(from=mid.min,to=mid.max,length.out=100)
pred.df.midprecip <- data.frame(
diff.norm.ppt.min.z = diff.norm.min.seq,
ppt.normal_c =ppt.normal.c.mid,
ppt.normal.sq_c = ppt.link.a + ppt.link.b*ppt.normal.c.mid + ppt.link.b2*(ppt.normal.c.mid^2),
northness_c= 0,
diff.norm.ppt.z = 0,
seedtr.comb_c = 0,
seedtr.any.comb_c = 0,
tmean.normal_c = 0
)
# diff.norm.min.seq <- seq(from=high.min,to=high.max,length.out=100)
#
# pred.df.highprecip <- data.frame(
#
# diff.norm.ppt.min.z = diff.norm.min.seq,
# ppt.normal_c =ppt.normal.c.high,
# ppt.normal.sq_c = -0.1721 + 0.8811*ppt.normal.c.high + 0.1722*ppt.normal.c.high^2,
# northness_c= 0,
# diff.norm.ppt.z = 0,
# seedtr.comb_c = 0,
# seedtr.any.comb_c = 0,
# tmean.normal_c = 0
#
# )
pred.df <- rbind(pred.df.lowprecip,pred.df.midprecip)#,pred.df.highprecip)
#
# #### alternative pred df for 3-d plotting: all potential combinations of min and average (need to restrict z.seq ranges)
# pairwise <- expand.grid(z.seq,z.seq)
# pred.df <- data.frame(
# diff.norm.ppt.min.z = pairwise[,1],
# ppt.normal_c = 0,
# ppt.normal.sq_c = -0.186 + 0.846*0 + 0.186*0^2,
# northness_c= 0,
# diff.norm.ppt.z = pairwise[,2]
# )
# ### end alternative pred df ####
# interact.df <- matrix(data=NA,nrow=nsims,ncol=ncol(pred.df)^2)
# names.df <- vector(length=ncol(pred.df)^2)
#### add interaction prediction terms to the df (all pairsiwe interactions)
interact.df <- data.frame("Fake"=rep(NA,nrow(pred.df)))
for(i in 1:ncol(pred.df)) { # for each col
for(j in 1:ncol(pred.df)) {
name.i <- names(pred.df)[i]
name.j <- names(pred.df)[j]
name.inter <- paste(name.i,name.j,sep=":")
val.inter <- pred.df[,i] * pred.df[,j]
interact.df <- cbind(interact.df,val.inter)
names(interact.df)[ncol(interact.df)] <- name.inter
}
}
pred.df <- cbind(pred.df,interact.df)
pred.df$"(Intercept)" <- 1 # this is the "predictor" value to multiple the intercept by
# ### predict for all tree seedlings
# library(merTools)
#
# predFun <- function(x)predict(x, pred.df, re.form=NA)
#
# p <- predict(best.model.tree,newdata=pred.df,type="response")
# p2 <- bootMer(best.model.tree,nsim=100,FUN=predFun)
# #predMat <- p2$t
#
coef.names <- c("(Intercept)",colnames(coef.sim)[-1])
pred.order <- pred.df[,coef.names]
#pred.terms <- coef.sim * pred.order
pred.fit <- data.frame(mean=rep(NA,nrow(pred.df)),lwr=NA,upr=NA)
for (i in 1:nrow(pred.df)) {
# for each row in the prediction DF, calculate the distribution of fitted response
pred.mat <- matrix(rep(as.matrix(pred.order)[i,],each=nrow(coef.sim)),nrow=nrow(coef.sim))
a <- as.matrix(coef.sim)
b <- as.matrix(pred.mat)
pred.terms <- a * b
pred.y <- rowSums(pred.terms)
pred.y.resp <- invlogit(pred.y) #both cover and probability were logit-transformed, so transform back to response scale
pred.y.resp.mean <- mean(pred.y.resp)
pred.y.resp.lwr <- quantile(pred.y.resp,probs=0.025)
pred.y.resp.upr <- quantile(pred.y.resp,probs=0.975)
ret <- data.frame(mean=pred.y.resp.mean,lwr=pred.y.resp.lwr,upr=pred.y.resp.upr)
pred.fit[i,] <- ret
}
pred.fit <- pred.fit*100 # transform to percent
pred.fit <- cbind(pred.df,pred.fit) # merge predictors and simulated responses
ylab <- NULL
if(pres.ab) {
ylab <- "Probability of regeneration"
ylim <- 60
} else {
ylab <- "Percent cover"
ylim <- 60
}
#### plot counterfactual predictions ####
title <- paste(type,"- Low mean precip")
# low normal precip
d.plot <- pred.fit[pred.fit$ppt.normal_c==ppt.normal.c.low,]
p.low <- ggplot(d.plot,aes(x=diff.norm.ppt.min.z,y=mean)) +
geom_line(color="darkgreen") +
geom_ribbon(aes(ymin=lwr,ymax=upr),alpha=0.5,fill="darkgreen") +
coord_cartesian(ylim=c(0, ylim)) +
scale_x_continuous(limits = c(-1.25,0)) +
xlab("Postfire precipitation (3-yr minimum)") +
ylab(ylab) +
theme_bw(13) +
ggtitle(title)
title <- paste(type,"- High mean precip")
# mid normal precip
d.plot <- pred.fit[pred.fit$ppt.normal_c==ppt.normal.c.mid,]
p.mid <- ggplot(d.plot,aes(x=diff.norm.ppt.min.z,y=mean)) +
geom_line(color="darkgreen") +
geom_ribbon(aes(ymin=lwr,ymax=upr),alpha=0.5,fill="darkgreen") +
coord_cartesian(ylim=c(0, ylim)) +
scale_x_continuous(limits = c(-1.25,0)) +
xlab("Postfire precipitation (3-yr minimum)") +
ylab(ylab) +
theme_bw(13) +
ggtitle(title)
# # high normal precip
# d.plot <- pred.fit[pred.fit$ppt.normal_c==ppt.normal.c.high,]
# p.high <- ggplot(d.plot,aes(x=diff.norm.ppt.min.z,y=mean)) +
# geom_line(color="darkgreen") +
# geom_ribbon(aes(ymin=lwr,ymax=upr),alpha=0.5,fill="darkgreen") +
# scale_x_continuous(limits = c(-1.25,0)) +
# coord_cartesian(ylim=c(0, 50)) +
# xlab("Postfire precipitation (3-yr minimum)") +
# ylab(ylab) +
# theme_bw(13)
p[[type]] <- grid.arrange(p.low,p.mid,nrow=1)
}
p.full <- grid.arrange(p[["CONIFER"]],p[["HARDWOOD"]],p[["SHRUB"]],p[["GRASS"]],nrow=4)
coef.df
write.csv(coef.df,"model_coefs.csv",row.names=FALSE)
library(Cairo)
Cairo(file="Fig2_moderate_sev.png",typ="png",width=600,height=1000)
p.full <- grid.arrange(p[["CONIFER"]],p[["HARDWOOD"]],p[["SHRUB"]],p[["GRASS"]],nrow=4)
dev.off()
#### 3-d plot no longer using ####
#
# #plot 3-d
# library(ggplot2)
# library(viridis)
#
# p <- ggplot(pred.fit,aes(x=diff.norm.ppt.z,y=diff.norm.ppt.z_c)) +
# geom_tile(aes(fill=mean)) +
# #geom_ribbon(aes(ymin=lwr,ymax=upr),alpha=0.5,fill="darkgreen") +
# #scale_y_continuous(limits = c(-6,100)) +
# #xlab("Postfire precipitation (3-yr average)") +
# #ylab("Percent cover") +
# theme_bw(13) +
# scale_fill_viridis(limits=c(0,100))
#
#
# library(Cairo)
# Cairo(file="test2.png",typ="png",width=400,height=300)
# p
# dev.off()
|
73913f565f485fa640527f01f9f6e85d70aedfbe
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ifultools/examples/scaleData.Rd.R
|
a89454ef49f0a7614f5f6b3916b166e00a51b12a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 178
|
r
|
scaleData.Rd.R
|
library(ifultools)
### Name: scaleData
### Title: Scale numeric data
### Aliases: scaleData
### Keywords: utilities
### ** Examples
scaleData(c(1,10,100,1000),scale="db")
|
1d118e95327d5d98aa08cb7151ce6603bbc488c2
|
f4cc3754e8d6de28625650ed2d93cf7b71e3ff19
|
/Lectures/Lecture_W06_Discrete_Probability/main.R
|
5ce1178228baac5c2f9d66c2f81b80512e796412
|
[] |
no_license
|
xujianzi/GEOG533
|
2bc3cacee25a02c51dbd07f8db3364e54cfbe926
|
a141022819d6ceb2e03c1bdba4610630f5fc4d3e
|
refs/heads/master
| 2021-05-15T06:19:05.409487
| 2017-12-14T22:11:22
| 2017-12-14T22:11:22
| 114,300,878
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,109
|
r
|
main.R
|
### Counting the number of combinations
#choose(n,k) = n!/k!(n-k)!
choose(3,2)
factorial(3)/(factorial(2)*factorial(3-2))
choose(5,3)
factorial(5)/(factorial(3)*factorial(5-3))
choose(50,3)
factorial(50)/(factorial(3)*factorial(50-3))
choose(4,2)
### Generating combinations
#combn(items, k)
combn(1:5,3)
combn(c("T1","T2","T3","T4","T5"),3)
### Generate random numbers
runif(1)
runif(10)
runif(10,min = -3,max = 3)
rnorm(10)
rnorm(10,mean = 100,sd = 15)
rnorm(3,mean = c(-10,0,10),sd = 1)
### Generating reproducible random numbers
set.seed(100)
runif(10)
set.seed(100)
runif(10)
#### Generating a random sample
library(MASS)
df <- Cars93
sample(df$Price, 10)
sample(df$Manufacturer,10)
?sample
sample(1:10,size = 10,replace = FALSE)
sample(1:10,size = 10,replace = TRUE)
?set.seed
set.seed(100)
sample(1:10)
sample(1:10,size = 10,replace = FALSE)
sample(c("H","T"),size = 10,replace = TRUE)
sample(c("blue","yellow","red","green","purple"),size = 5,replace = FALSE)
births <- sample(c("boy","girl"),size = 10,replace = TRUE,prob = c(0.513,0.487))
births
table(births)
manybirths <- sample(c("boy","girl"),size = 100000,replace = TRUE,prob = c(0.513,0.487))
table(manybirths)
?dbinom
dbinom(5,size = 10,prob = 0.5)
dbinom(7,size = 10,prob = 0.5)
pbinom(7,size = 10,prob = 0.5)
pbinom(7,size = 10,prob = 0.5,lower.tail = FALSE)
dbinom(6,size = 10,prob = 0.5) + dbinom(7,size = 10,prob = 0.5)
pbinom(7,size = 10,prob = 0.5) - pbinom(5,size = 10,prob = 0.5)
pbinom(c(5,7),size = 10,prob = 0.5)
diff(pbinom(c(5,7),size = 10,prob = 0.5))
n <- 20
x <- 0:n
y <- dbinom(x,size = n,prob = 0.5)
plot(x,y)
plot(x,y,type = "b",pch = 16,col="black")
n <- 20
x <- 0:n
y <- pbinom(x,size = n,prob = 0.5)
plot(x,y)
plot(x,y,type = "b",pch = 16,col="black")
x <- seq(from = -3,to = 3,length.out = 100)
y <- dnorm(x)
plot(x,y)
plot(x,y,type = "l",pch = 16,col="black")
### plot multiple graphs
x <- seq(from=0,to = 6,length.out = 100)
ylim <- c(0,0.6)
par(mfrow=c(2,2))
plot(x,dunif(x,min=2,max=4),main="Uniform",type="l",ylim=ylim)
plot(x,dnorm(x,mean=3,sd=1),main="Normal",type="l",ylim=ylim)
plot(x,dexp(x,rate=1/2),main="Exponential",type="l",ylim=ylim)
plot(x,dgamma(x,shape=2,rate = 1),main="Gamma",type="l",ylim=ylim)
### shading a region
par(mfrow=c(1,1))
x <- seq(from=-3,to = 3,length.out = 100)
y <- dnorm(x)
plot(x,y,main="Standard Normal Distribution",type="l",ylab = "Density",xlab="Quantile")
abline(h = 0)
region.x <- x[1 <= x & x <= 2]
region.y <- y[1 <= x & x <= 2]
region.x <- c(region.x[1],region.x,tail(region.x,1))
region.y <- c(0,region.y,0)
polygon(region.x,region.y,density = 10)
polygon(region.x,region.y,density = -1,col="red")
### http://www.alisonsinclair.ca/2011/03/shading-between-curves-in-r/
x <- seq(-3,3,0.01)
y1 <- dnorm(x,0,1)
y2 <- 0.5*dnorm(x,0,1)
plot(x,y1,type="l",bty="L",xlab="X",ylab="dnorm(X)")
points(x,y2,type="l",col="red")
polygon(c(x,rev(x)),c(y2,rev(y1)),col="skyblue")
### Geometric distribution
dgeom(0,prob = 0.5)
dgeom(1,prob = 0.5)
n <- 10
x <- 0:n
y <- dgeom(x,prob = 0.5)
plot(x,y)
plot(x,y,type = "b",pch = 16,col="black",ylim = c(0,1))
y2 <- dgeom(x, prob=0.8)
lines(x,y2,type = "b",pch = 16,col="red")
y3 <- dgeom(x, prob=0.2)
lines(x,y3,type = "b",pch = 16,col="blue")
### Poisson distribution
### During the last 60 days, there were 3 accidents. What’s the probability that there will be more than 3 accidents during the next month?
dpois(0,lambda = 1.5)
dpois(1,lambda = 1.5)
dpois(2,lambda = 1.5)
dpois(3,lambda = 1.5)
1- dpois(0,1.5)-dpois(1,1.5)-dpois(2,1.5)-dpois(3,1.5)
ppois(3,lambda = 1.5,lower.tail = FALSE)
n <- 20
x <- 0:n
y <- dpois(x,lambda = 1)
plot(x,y)
plot(x,y,type = "b",pch = 16,col="black")
y2 <- dpois(x,lambda = 4)
lines(x,y2,type = "b",pch = 16,col="red")
y3 <- dpois(x,lambda = 10)
lines(x,y3,type = "b",pch = 16,col="blue")
dhyper(1,3,2,2)
x
rev(x)
c(x,rev(x))
c(y2,rev(y1))
age <- 1:10
y.low <- rnorm(length(age), 150, 25) + 10*age
y.high <- rnorm(length(age), 250, 25) + 10*age
plot(age,y.high,type = 'n', ylim = c(100, 400), ylab = 'Y Range', xlab = 'Age (years)')
lines(age, y.low, col = 'grey')
lines(age, y.high, col = 'grey')
polygon(c(age, rev(age)), c(y.high, rev(y.low)),col = "grey30", border = NA)
prob <- dbinom(0:n,size = n,prob = 0.5)
plot(0:n,prob,pch = 16,col="black")
n <- 20
prob <- dbinom(0:n,size = n,prob = 0.7)
plot(0:n,prob,col="green",pch=19)
numbirths <- 10
numboys <- 0:numbirths
binom.prob <- dbinom(numboys,size = numbirths,prob = 0.513)
plot(numboys,binom.prob)
plot(numboys,binom.prob,type = "b")
?plot
plot(numboys,binom.prob,type="b",xlab = "number of boys babies in 10 births", ylab = "probability that this occurs")
pbinom(3,size = 10,prob = 0.513)
dbinom(3,size = 10,prob = 0.513)
1 - pbinom(7,size = 10,prob = 0.513)
numbirths <- 10
numboys <- 0:numbirths
binom.prob <- pbinom(numboys,size = numbirths,prob = 0.513)
plot(numboys,binom.prob)
plot(numboys,binom.prob,type = "b")
plot(numboys,binom.prob,type="b",xlab = "number of boys babies in 10 births", ylab = "probability of this many boy births or fewer")
dbinom(5,10,0.5)
|
8d7e6d93932cd83bd94be457c0f26c157114bb03
|
c438e401cbc856aeb77707846260f0525734b997
|
/data-raw/06-process_events_data.R
|
b9e7c7da369ed46b9b95f1315f13c7682773ffd5
|
[] |
no_license
|
geanders/hurricaneexposuredata
|
6794f93831b7ee3dec19ea83975e1b2b738a0014
|
b42fe54788ba8ade5e6aab614c75eea41d51a80c
|
refs/heads/master
| 2022-06-02T05:47:11.387854
| 2022-05-16T01:39:35
| 2022-05-16T01:39:35
| 61,568,076
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,136
|
r
|
06-process_events_data.R
|
## Be sure to re-build package after running 02 before running this script
library(noaastormevents)
library(dplyr)
data(hurr_tracks, package = "hurricaneexposuredata")
storm_id_table <- hurr_tracks %>%
select(storm_id, usa_atcf_id) %>%
distinct()
storm_years <- gsub(".+-", "", storm_id_table$storm_id)
storms <- storm_id_table$storm_id
storm_events <- vector("list", length(storms))
names(storm_events) <- storms
for(storm_year in unique(storm_years)){
print(storm_year)
yearly_storms <- storms[storm_years == storm_year]
for(storm in yearly_storms){
print(storm)
i <- which(storms == storm)
this_storm_events <- find_events(storm = storm, dist_limit = 500) %>%
dplyr::rename(type = event_type) %>%
dplyr::mutate(fips = stringr::str_pad(fips, width = 5, side = "left", pad = "0")) %>%
dplyr::select(fips, type) %>%
dplyr::group_by(fips) %>%
dplyr::summarize(events = list(type))
storm_events[[i]] <- this_storm_events
}
# Remove data after each year (otherwise, this is lots of data)
rm(noaastormevents_package_env)
}
usethis::use_data(storm_events, overwrite = TRUE)
|
8e8e38a0a164ba1ca49dde70b32bb7a3f453946c
|
9489320a4fc68aed3e900830a32a25703639a0d7
|
/code/disturb-events-co13/nCLIMDIV-Palmer-Drought-In-R.R
|
a83aa44b80ef056425ba0fb090d4f1a827bf9dd0
|
[] |
no_license
|
mperignon/NEON-Lesson-Building-Data-Skills
|
bf79c792ba098b5b6b863539acc39205f212a3fe
|
7619414db6453a042566cd02bbaf1356cb89ad32
|
refs/heads/gh-pages
| 2020-12-24T11:18:07.939012
| 2016-11-07T05:27:18
| 2016-11-07T05:27:18
| 73,037,865
| 0
| 0
| null | 2016-11-07T03:10:09
| 2016-11-07T03:10:09
| null |
UTF-8
|
R
| false
| false
| 9,794
|
r
|
nCLIMDIV-Palmer-Drought-In-R.R
|
## ----load-libraries------------------------------------------------------
library(ggplot2) # create efficient, professional plots
library(plotly) # create interactive plots
## ----load-libraries-hidden, echo=FALSE, results="hide"-------------------
# this library is only added to get the webpage derived from this code to render
# the plotly graphs. It is NOT needed for any of the analysis or data
# visualizations.
# install.packages("webshot")
# webshot::install_phantomjs()
library(webshot) # embed the plotly plots
## ----import-drought-data-------------------------------------------------
# Set working directory to the data directory
# setwd("YourFullPathToDataDirectory")
# Import CO state-wide nCLIMDIV data
nCLIMDIV <- read.csv("drought/CDODiv8506877122044_CO.txt", header = TRUE)
# view data structure
str(nCLIMDIV)
## ----convert-year-month--------------------------------------------------
# convert to date, and create a new Date column
nCLIMDIV$Date <- as.Date(nCLIMDIV$YearMonth, format="%Y%m")
## ----convert-date--------------------------------------------------------
#add a day of the month to each year-month combination
nCLIMDIV$Date <- paste0(nCLIMDIV$YearMonth,"01")
#convert to date
nCLIMDIV$Date <- as.Date(nCLIMDIV$Date, format="%Y%m%d")
# check to see it works
str(nCLIMDIV$Date)
## ----create-quick-palmer-plot--------------------------------------------
# plot the Palmer Drought Index (PDSI)
palmer.drought <- ggplot(data=nCLIMDIV,
aes(Date,PDSI)) + # x is Date & y is drought index
geom_bar(stat="identity",position = "identity") + # bar plot
xlab("Date") + ylab("Palmer Drought Severity Index") + # axis labels
ggtitle("Palmer Drought Severity Index - Colorado \n 1991-2015") # title on 2 lines (\n)
# view the plot
palmer.drought
## ----summary-stats-------------------------------------------------------
#view summary stats of the Palmer Drought Severity Index
summary(nCLIMDIV$PDSI)
#view histogram of the data
hist(nCLIMDIV$PDSI, # the date we want to use
main="Histogram of PDSI", # title
xlab="Palmer Drought Severity Index (PDSI)", # x-axis label
col="wheat3") # the color of the bars
## ----set-plotly-creds, eval=FALSE----------------------------------------
## # set plotly user name
## Sys.setenv("plotly_username"="YOUR_plotly_username")
##
## # set plotly API key
## Sys.setenv("plotly_api_key"="YOUR_api_key")
##
## ----create-ggplotly-drought---------------------------------------------
# Use exisitng ggplot plot & view as plotly plot in R
palmer.drought_ggplotly <- ggplotly(palmer.drought)
palmer.drought_ggplotly
## ----create-plotly-drought-plot------------------------------------------
# use plotly function to create plot
palmer.drought_plotly <- plot_ly(nCLIMDIV, # the R object dataset
type= "bar", # the type of graph desired
x=Date, # our x data
y=PDSI, # our y data
orientation="v", # for bars to orient vertically ("h" for horizontal)
title=("Palmer Drought Severity Index - Colorado 1991-2015"))
palmer.drought_plotly
## ----post-plotly, message=FALSE------------------------------------------
# publish plotly plot to your plot.ly online account when you are happy with it
# skip this step if you haven't connected a Plotly account
plotly_POST(palmer.drought_plotly)
## ----challenge-Div04, include=FALSE, echo=FALSE, results="hide"----------
# Import CO state-wide nCLIMDIV data
nCLIMDIV.co04 <- read.csv("drought/CDODiv8868227122048_COdiv04.txt", header = TRUE)
# view data structure
str(nCLIMDIV.co04)
#add a day of the month to each year-month combination
nCLIMDIV.co04$Date <- paste0(nCLIMDIV.co04$YearMonth,"01")
#convert to date
nCLIMDIV.co04$Date <- as.Date(nCLIMDIV.co04$Date, format="%Y%m%d")
# check to see it works
str(nCLIMDIV.co04$Date)
# plot the Palmer Drought Index (PDSI) w/ ggplot
palmer.drought.co04 <- ggplot(data=nCLIMDIV.co04,
aes(Date,PDSI)) + # x is Date & y is drought index
geom_bar(stat="identity",position = "identity") + # bar plot
xlab("Date") + ylab("Palmer Drought Severity Index") + # axis labels
ggtitle("Palmer Drought Severity Index - Platte River Drainage") # title on 2 lines (\n)
# view the plot
palmer.drought.co04
# --OR-- we could use plotly
# use plotly function to create plot
palmer.drought.co04_plotly <- plot_ly(nCLIMDIV.co04, # the R object dataset
type= "bar", # the type of graph desired
x=Date, # our x data
y=PDSI, # our y data
orientation="v", # for bars to orient vertically ("h" for horizontal)
title=("Palmer Drought Severity Index - Platte River Drainage"))
palmer.drought.co04_plotly
## ----challenge-PHSI, echo=FALSE, results="hide", include=FALSE-----------
# our example code uses the Palmer Hydrological Drought Index (PHDI)
# Palmer Hydrological Drought Index (PHDI) -- quoted from the metadata
# "This is the monthly value (index) generated monthly that indicates the
# severity of a wet or dry spell. This index is based on the principles of a
# balance between moisture supply and demand. Man-made changes such as
# increased irrigation, new reservoirs, and added industrial water use were not
# included in the computation of this index. The index generally ranges from -
# 6 to +6, with negative values denoting dry spells, and positive values
# indicating wet spells. There are a few values in the magnitude of +7 or -7.
# PHDI values 0 to -0.5 = normal; -0.5 to -1.0 = incipient drought; -1.0 to -
# 2.0 = mild drought; -2.0 to -3.0 = moderate drought; -3.0 to -4.0 = severe
# drought; and greater than -4.0 = extreme drought. Similar adjectives are
# attached to positive values of wet spells. This is a hydrological drought
# index used to assess long-term moisture supply.
# check format of PHDI
str(nCLIMDIV)
# plot PHDI using ggplot
palmer.hydro <- ggplot(data=nCLIMDIV,
aes(Date,PHDI)) + # x is Date & y is drought index
geom_bar(stat="identity",position = "identity") + # bar plot
xlab("Date") + ylab("Palmer Hydrological Drought Index") + # axis labels
ggtitle("Palmer Hydrological Drought Index - Colorado") # title on 2 lines (\n)
# view the plot
palmer.hydro
## ----palmer-NDV-plot-only, echo=FALSE, results="hide"--------------------
# NoData Value in the nCLIMDIV data from 1990-199 US spatial scale
nCLIMDIV_US <- read.csv("drought/CDODiv5138116888828_US.txt", header = TRUE)
#add a day of the month to each year-month combination
nCLIMDIV_US$Date <- paste0(nCLIMDIV_US$YearMonth,"01")
#convert to date
nCLIMDIV_US$Date <- as.Date(nCLIMDIV_US$Date, format="%Y%m%d")
# check to see it works
str(nCLIMDIV_US$Date)
palmer.droughtUS <- ggplot(data=nCLIMDIV_US,
aes(Date,PDSI)) + # x is Date & y is drought index
geom_bar(stat="identity",position = "identity") + # bar plot
xlab("Date") + ylab("Palmer Drought Severity Index") + # axis labels
ggtitle("Palmer Drought Severity Index - United States") # title
palmer.droughtUS
## ----palmer-no-data-values-----------------------------------------------
# NoData Value in the nCLIMDIV data from 1990-2015 US spatial scale
nCLIMDIV_US <- read.csv("drought/CDODiv5138116888828_US.txt", header = TRUE)
# add a day of the month to each year-month combination
nCLIMDIV_US$Date <- paste0(nCLIMDIV_US$YearMonth,"01")
# convert to date
nCLIMDIV_US$Date <- as.Date(nCLIMDIV_US$Date, format="%Y%m%d")
# check to see it works
str(nCLIMDIV_US$Date)
# view histogram of data -- great way to check the data range
hist(nCLIMDIV_US$PDSI,
main="Histogram of PDSI values",
col="springgreen4")
# easy to see the "wrong" values near 100
# check for these values using min() - what is the minimum value?
min(nCLIMDIV_US$PDSI)
# assign -99.99 to NA in the PDSI column
# Note: you may want to do this across the entire data.frame or with other columns
# but check out the metadata -- there are different NoData Values for each column!
nCLIMDIV_US[nCLIMDIV_US$PDSI==-99.99,] <- NA # == is the short hand for "it is"
#view the histogram again - does the range look reasonable?
hist(nCLIMDIV_US$PDSI,
main="Histogram of PDSI value with NA value assigned",
col="springgreen4")
# that looks better!
#plot Palmer Drought Index data
ggplot(data=nCLIMDIV_US,
aes(Date,PDSI)) +
geom_bar(stat="identity",position = "identity") +
xlab("Year") + ylab("Palmer Drought Severity Index") +
ggtitle("Palmer Drought Severity Index - Colorado\n1991 thru 2015")
# The warning message lets you know that two "entries" will be missing from the
# graph -- these are the ones we assigned NA.
## ----subset-decade-------------------------------------------------------
# subset out data between 2005 and 2015
nCLIMDIV2005.2015 <- subset(nCLIMDIV, # our R object dataset
Date >= as.Date('2005-01-01') & # start date
Date <= as.Date('2015-12-31')) # end date
# check to make sure it worked
head(nCLIMDIV2005.2015$Date) # head() shows first 6 lines
tail(nCLIMDIV2005.2015$Date) # tail() shows last 6 lines
## ----plotly-decade-------------------------------------------------------
# use plotly function to create plot
palmer_plotly0515 <- plot_ly(nCLIMDIV2005.2015, # the R object dataset
type= "bar", # the type of graph desired
x=Date, # our x data
y=PDSI, # our y data
orientation="v", # for bars to orient vertically ("h" for horizontal)
title=("Palmer Drought Severity Index - Colorado 2005-2015"))
palmer_plotly0515
# publish plotly plot to your plot.ly online account when you are happy with it
# skip this step if you haven't connected a Plotly account
plotly_POST(palmer_plotly0515)
|
490308a55795ef2e8fd6a46cfc60dad0c877954c
|
b4f4751efb5a7b380039c6c312bf4c31125fdff7
|
/man/TF_robotarm.Rd
|
92e1ee4cd7316889f93c6cb5c6145eef07dd2bfc
|
[] |
no_license
|
CollinErickson/TestFunctions
|
df948bfa4194284c01bc30d139c56ff54ba8f1f4
|
f7755de107c453fccbefabdddbe8c2c9d68d1200
|
refs/heads/master
| 2023-02-22T16:09:08.483163
| 2023-02-13T02:26:17
| 2023-02-13T02:26:17
| 63,456,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 463
|
rd
|
TF_robotarm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions1.R
\name{TF_robotarm}
\alias{TF_robotarm}
\title{TF_robotarm: Robot arm function for evaluating a single point.}
\usage{
TF_robotarm(x)
}
\arguments{
\item{x}{Input vector at which to evaluate.}
}
\value{
Function output evaluated at x.
}
\description{
TF_robotarm: Robot arm function for evaluating a single point.
}
\examples{
TF_robotarm(rep(0,8))
TF_robotarm(rep(1,8))
}
|
80ef385fdbd59799b9b0eaeb0a0552f0cb87bf15
|
59d0ef049e63b38cb5a8b84cad8cd45fcaad8974
|
/R/calcLambda.R
|
6233b243d19d11d64c3294847dd0dc8e721a885c
|
[] |
no_license
|
guhjy/lavaPenalty
|
e6a9452992c34c555e7de8957b34e79b7338fc7f
|
9bbaaa25517c12b81ad282b0b05b3029f4a8e6f5
|
refs/heads/master
| 2020-04-08T14:24:05.235200
| 2018-02-26T09:43:01
| 2018-02-26T09:43:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,379
|
r
|
calcLambda.R
|
# {{{ doc
#' @title Estimate the regularization parameter
#' @name calcLambda
#' @description Find the optimal regularization parameter according to a fit criterion
#'
#' @param x a plvmfit object
#' @param seq_lambda1 the sequence of penalisation paramaters to be used to define the sub model
#' @param data.fit the data used to fit the model
#' @param data.test the data used to test the model
#' @param warmUp should the new model be initialized with the solution of the previous model (i.e. estimated for a different penalization parameter)
#' @param fit criterion to decide of the optimal model to retain among the penalized models.
#' @param trace shoud the execution of the function be traced
#' @param ... additional arguments - e.g. control argument for estimate.lvm
#'
#' @examples
#'
#' set.seed(10)
#' n <- 300
#' formula.lvm <- as.formula(paste0("Y~",paste(paste0("X",1:4), collapse = "+")))
#' mSim <- lvm(formula.lvm)
#' df.data <- sim(mSim,n)
#'
#' lvm.model <- lvm(formula.lvm)
#' plvm.model <- penalize(lvm.model)
#'
#' res <- estimate(plvm.model, data = df.data, increasing = FALSE, regularizationPath = TRUE)
#'
#' perf <- calcLambda(res$regularizationPath, res$x, data.fit = df.data)
#' perf
#' @rdname calcLambda
#' @export
`calcLambda` <- function(x, ...) UseMethod("calcLambda")
# }}}
# {{{ calcLambda.plvm
#' @rdname calcLambda
#' @export
calcLambda.plvmfit <- function(x,
seq_lambda1,
data.fit = x$data$model.frame,
data.test = x$data$model.frame,
warmUp = lava.options()$calcLambda$warmUp,
fit = lava.options()$calcLambda$fit,
trace = TRUE,
...){
# {{{ test
if(is.path(x)==FALSE){
stop("missing regularization path in object \n")
}
if(fit %in% c("AIC","BIC","P_error") == FALSE){
stop("fit must be in AIC BIC P_error \n",
"proposed value: ",fit,"\n")
}
# }}}
# {{{ extract path
constrain <- penalty(x, type = "Vlasso")$Vlasso
regPath <- getPath(x, path.constrain = TRUE, only.breakpoints=TRUE)
seq_lambda1.abs <- regPath$lambda1.abs
seq_lambda1 <- regPath$lambda1
n.lambda1 <- length(seq_lambda1.abs)
# }}}
# {{{ initialization
if("control" %in% names(list(...))){
control <- list(...)$control
}else{
control <- list()
}
if("trace" %in% names(control) == FALSE){
control$trace <- FALSE
}
n.knot <- NROW(regPath)
n.constrain <- NCOL(penalty(x, type = "Vlasso")$Vlasso)
n.coef <- length(coef(x))
cv.lvm <- rep(NA,n.knot)
seq.criterion <- rep(NA,n.knot)
best.res <- Inf
best.lambda1 <- NA
best.lambda1.abs <- NULL
pathCI <- data.table(expand.grid(coef = names(coef(x)), knot = 1:n.knot))
pathCI[, estimate := as.numeric(NA)]
pathCI[, lower := as.numeric(NA)]
pathCI[, upper := as.numeric(NA)]
pathCI[, p.value := as.numeric(NA)]
pathCI[, lambda1 := regPath$lambda1[knot]]
pathCI[, lambda2 := regPath$lambda2[knot]]
pathCI[, lambda1.abs := regPath$lambda1.abs[knot]]
pathCI[, lambda2.abs := regPath$lambda2.abs[knot]]
setkeyv(pathCI, c("coef","knot"))
store.resCoef <- setNames(rep(NA, n.coef), names(coef(x)))
## attribute a name to each coefficient
model0 <- x$model
coefWithPenalty <- names(Matrix::which(Matrix::rowSums(abs(constrain))>0))
index.coefWithPenalty <- setNames(match(coefWithPenalty, rownames(constrain)),coefWithPenalty)
for(iCoef in coefWithPenalty){ # iCoef <- coefWithPenalty[1]
regression(model0, as.formula(iCoef)) <- as.formula(paste0("~beta", index.coefWithPenalty[iCoef]))
}
# }}}
if(trace){
cat("Estimation of the optimum lambda according to the ",fit," criterion \n")
pb <- txtProgressBar(min = 0, max = n.lambda1, style = 3)
}
for(iKnot in 1:n.knot){ #
## define the constrains
ils.constrain <- regPath$constrain0[[iKnot]]
# {{{ form the reduced model
Imodel0 <- model0
class(Imodel0) <- setdiff(class(model0),"plvm")
if(!is.null(ils.constrain)){
for(iCon in ils.constrain){ # iCon <- 1
iConstrain <- constrain[,iCon]
iName <- names(which(iConstrain!=0))
iCoef <- iConstrain[iConstrain!=0]
if(length(iName)==1){
regression(Imodel0, as.formula(iName)) <- 0
}else{
iF <- as.formula(paste0(index.coefWithPenalty[iName[1]],"~",
index.coefWithPenalty[iName[-1]]))
constrain(Imodel0, iF) <- function(x){}
}
}
}
# }}}
# {{{ fit the reduced model and extract gof
fitTempo2 <- estimate(Imodel0, data = data.fit,
control = control)
## estimates
tableTempo2 <- summary(fitTempo2)$coef
store.resCoef[] <- NA
store.resCoef[rownames(tableTempo2)] <- tableTempo2[,"Estimate"]
pathCI[knot == iKnot, estimate := store.resCoef]
store.resCoef[] <- NA
store.resCoef[rownames(tableTempo2)] <- tableTempo2[,"Estimate"] + qnorm(0.025)*tableTempo2[,"Std. Error"]
pathCI[knot == iKnot, lower := store.resCoef]
store.resCoef[] <- NA
store.resCoef[rownames(tableTempo2)] <- tableTempo2[,"Estimate"] + qnorm(0.975)*tableTempo2[,"Std. Error"]
pathCI[knot == iKnot, upper := store.resCoef]
## gof criteria
cv.lvm[iKnot] <- fitTempo2$opt$convergence==0
if(fit %in% c("AIC","BIC")){
seq.criterion[iKnot] <- gof(fitTempo2)[[fit]]
}else{
predY <- as.data.frame(predict(fitTempo2,
data = data.test[,exogenous(fitTempo2), drop = FALSE]))
seq.criterion[iKnot] <- 1/(n.endogeneous*NROW(data.test)) * sum((data.test[,endogenous(fitTempo2)] - predY)^2)
}
## look for the best LVM
if(cv.lvm[iKnot] && best.res>seq.criterion[iKnot]){
best.res <- seq.criterion[iKnot]
best.lvm <- fitTempo2
best.lambda1 <- seq_lambda1[iKnot]
best.lambda1.abs <-seq_lambda1.abs[iKnot]
}
# }}}
if(trace){setTxtProgressBar(pb, iKnot)}
}
if(trace){close(pb)}
#### export
best.lvm$control <- x$control
best.lvm$call <- x$call
best.lvm$regularizationPath <- x$regularizationPath
best.lvm$penalty <- x$penalty
best.lvm$penalty$lambda1 <- best.lambda1
best.lvm$penalty$lambda1.abs <- best.lambda1.abs
best.lvm$nuclearPenalty <- x$nuclearPenalty
best.lvm$regularizationPath$path[match(index,regPath$index), (fit) := seq.criterion]
best.lvm$regularizationPath$path[match(index,regPath$index), cv := cv.lvm]
best.lvm$regularizationPath$path[match(index,regPath$index), optimum := seq.criterion==best.res]
best.lvm$regularizationPath$criterion <- fit
class(best.lvm) <- class(x)
return(best.lvm)
}
# }}}
|
3cf4791a36bdb81ff1d0299765bdf4ebf6a5a960
|
dd559bdf01b7e9f10040f158f98629c92f29fa06
|
/tests/testthat.R
|
c4065c91a54dc8b9dce1e8c5dfcb514905c7a5b6
|
[] |
no_license
|
sunqihui1221/MyPackage
|
7a06f7309c5fa404faa63eaf906a9e1a92c87165
|
b49d300d3adbbe52fd81ab2d8da3babe0d5f3b21
|
refs/heads/master
| 2020-08-21T00:06:49.570422
| 2019-10-18T18:22:55
| 2019-10-18T18:22:55
| 216,080,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 79
|
r
|
testthat.R
|
library(testthat)
library(MyPackage)
test_check("MyPackage")
plus10(5, TRUE)
|
abcbc07541c0d4b9cd11409b5b3e86dd18f1c057
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/LSD/examples/heatscatterpoints.Rd.R
|
c5b50041f0b8391aa9e916d8686907a1fc3363ce
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
heatscatterpoints.Rd.R
|
library(LSD)
### Name: heatscatterpoints
### Title: A colored scatterplot based on a two-dimensional Kernel Density
### Estimation (add to an existing plot)
### Aliases: LSD.heatscatterpoints heatscatterpoints
### Keywords: heatcolors scatterplot,
### ** Examples
points = 10^4
x = c(rnorm(points/2),rnorm(points/2)+4)
y = x + rnorm(points,sd=0.8)
x = sign(x)*abs(x)^1.3
plot.new()
plot.window(xlim = c(-5,15),ylim = c(-4,8))
heatscatterpoints(x,y,add.contour=TRUE,color.contour="green",greyscale=TRUE)
axis(1)
axis(2)
box()
|
c1b345d27bc4b109e4b397a421f3f72181d06086
|
a47ce80aa269ff4f7c0c54e0df97815b0bb783a7
|
/plot4.R
|
4adcf680fe9a941f88e51f519347b7af793a9382
|
[] |
no_license
|
rajathithan/ExData_Plotting1
|
176f9287b78ad195d184d460535ef0caed3faab3
|
095cd8437d1fd408d9a1d410fea79e19b3c2cc8d
|
refs/heads/master
| 2021-01-18T18:58:50.418531
| 2016-02-14T13:31:58
| 2016-02-14T13:31:58
| 48,317,156
| 0
| 0
| null | 2015-12-20T11:12:41
| 2015-12-20T11:12:39
| null |
UTF-8
|
R
| false
| false
| 791
|
r
|
plot4.R
|
data <- "ec/newecdata.rds"
ecdata <- readRDS(data)
png("ec/plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
plot(ecdata$Time, ecdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(ecdata$Time, ecdata$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(ecdata$Time, ecdata$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(ecdata$Time, ecdata$Sub_metering_2, type="l", col="red")
lines(ecdata$Time, ecdata$Sub_metering_3, type="l", col="blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(ecdata$Time, ecdata$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
27a9b13af058ba6d1b39e51bd137f8d4cb4a64f6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Claddis/examples/ReadMorphNexus.Rd.R
|
9c08d7705b212e8802c5ed3d2bdb42cbc70ea60b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 859
|
r
|
ReadMorphNexus.Rd.R
|
library(Claddis)
### Name: ReadMorphNexus
### Title: Reads in a morphological #NEXUS data file
### Aliases: ReadMorphNexus
### Keywords: NEXUS
### ** Examples
# Create example matrix (NB: creates a file in the current
# working directory called "morphmatrix.nex"):
cat("#NEXUS\n\nBEGIN DATA;\n\tDIMENSIONS NTAX=5 NCHAR=5;\n\t
FORMAT SYMBOLS= \" 0 1 2\" MISSING=? GAP=- ;\nMATRIX\n\n
Taxon_1 010?0\nTaxon_2 021?0\nTaxon_3 02111\nTaxon_4 011-1
\nTaxon_5 001-1\n;\nEND;\n\nBEGIN ASSUMPTIONS;\n\t
OPTIONS DEFTYPE=unord PolyTcount=MINSTEPS ;\n\t
TYPESET * UNTITLED = unord: 1 3-5, ord: 2;\n\t
WTSET * UNTITLED = 1: 2, 2: 1 3-5;\nEND;", file = "morphmatrix.nex")
# Read in example matrix:
morph.matrix <- ReadMorphNexus("morphmatrix.nex")
# View example matrix in R:
morph.matrix
# Remove the generated data set:
file.remove("morphmatrix.nex")
|
8fad7a3c16feb95c922aa3bd17c860a869be01b3
|
7fc89fddba6496ed4a7541a4acaca0f3bbc84816
|
/features2.R
|
9638449d8d7aff7818b82162dd4690b9e7d88a47
|
[
"MIT"
] |
permissive
|
nathanjchan/ice-on-mars
|
bb9b3a4f0f0188fbd42e030daee870a487a045d7
|
e28671df241c286f825b84f3eded695eff530b6c
|
refs/heads/master
| 2023-07-21T00:46:29.469174
| 2023-07-10T03:30:17
| 2023-07-10T03:30:17
| 279,992,372
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,709
|
r
|
features2.R
|
# Libraries ----
library(parallel)
# library(fractaldim)
# library(radiomics)
source("functions.R")
# Global Variables ----
df = read.csv("radar.csv", stringsAsFactors = FALSE)
df = df[, !(colnames(df) %in% "X")]
# Sample ----
n = 500
global_i = 0
global_n = n
global_start_time = Sys.time()
set.seed(23*3)
sample = sampleTifClassification(n)
#sample = sampleTifRegression(n)
half1 = 1:(n/2)
half2 = (n/2 + 1):n
# Parallelization
num_cores = detectCores()
cl = makeCluster(num_cores, outfile = "output.txt")
clusterExport(cl, varlist = c("relevantColumns", "getStatistics", "global_start_time"))
clusterEvalQ(cl, {
library(raster)
library(e1071)
library(fractaldim)
rasterOptions(tmpdir = "D:/Mars_Data/__raster__/")
})
features2 = parLapply(cl, sample$tif, extractFeatures2)
stopCluster(cl)
features_df2 = as.data.frame(do.call(rbind, features2))
feature_names2 = c(paste0("colot_hist", 1:32))
num_features2 = length(feature_names2)
if (num_features2 != ncol(features_df2)) {
stop("Number of feature names and number of features don't match!")
}
colnames(features_df2) = feature_names2
big2 = features_df2
write.csv(big2, "bigClassificationColorhist.csv")
# Testing ----
start.time <- Sys.time()
radar = raster(sample$tif[1])
# crop by selecting middle 3000
relevant = relevantColumns(ncol(radar), 3000)
e = extent(relevant[1] - 1, relevant[3000], 0, nrow(radar))
radar_crop = crop(radar, e)
radar_mat = as.matrix(radar_crop)
# radiomics
test = calc_features(radar_mat)
calc_features(glcm(radar_mat))
calc_features(glrlm(radar_mat))
calc_features(glszm(radar_mat))
calc_features(mglszm(radar_mat))
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
|
aa95ea5c50307a2f36b7754b1840fbbf59a9e425
|
1444cdcb441dc2ce139819c81ca60b775fc5270d
|
/run_analysis.R
|
620042fb3bf232c87f6c619880cad7b82217511c
|
[] |
no_license
|
dvfriedheim/Getting-and-Cleaning-Data-Course-Project
|
f04d27325b255e1231f8721246a075071e60a248
|
940865c3df82a46230339ea8a7f16481959f9ac0
|
refs/heads/master
| 2021-01-23T04:19:53.158390
| 2017-03-25T20:24:12
| 2017-03-25T20:24:12
| 86,186,098
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,162
|
r
|
run_analysis.R
|
## Preliminaries: download data, read in and combine subject, activity and raw data files
if(!file.exists("./data")){dir.create("./data")}
URL<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(URL,"./data/Dataset.zip")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
library(readr)
activityLabels<-read_table("./data/UCI HAR Dataset/activity_labels.txt",col_names=c("#","label"))
activityLabels$label<-tolower(activityLabels$label)
features<-read.table("./data/UCI HAR Dataset/features.txt",header=FALSE)
features$V2<-as.character(features$V2)
subjectTrain<-read_table("./data/UCI HAR Dataset/train/subject_train.txt",col_names="subject")
yTrain<-read_table("./data/UCI HAR Dataset/train/y_train.txt",col_names="activity")
subjectActivityTrain<-cbind(subjectTrain,yTrain)
xTrain<-read_table("./data/UCI HAR Dataset/train/X_train.txt",
col_names=features$V2)
fullTrain<-cbind(subjectActivityTrain,xTrain)
subjectTest<-read_table("./data/UCI HAR Dataset/test/subject_test.txt",col_names="subject")
yTest<-read_table("./data/UCI HAR Dataset/test/y_test.txt",col_names="activity")
subjectActivityTest<-cbind(subjectTest,yTest)
xTest<-read_table("./data/UCI HAR Dataset/test/X_test.txt",
col_names=features$V2)
fullTest<-cbind(subjectActivityTest,xTest)
## Step 1. Merges the training and the test sets to create one data set.
fullTrainAndTest<-rbind(fullTrain,fullTest)
## Step 2. Extracts only the measurements on the mean and standard deviation for each measurement.
library(plyr)
library(dplyr)
finalTrainAndTest<-select(fullTrainAndTest,grep("subject|activity|mean\\(\\)|std\\(\\)",
colnames(fullTrainAndTest)))
## Step 3. Uses descriptive activity names to name the activities in the data set
finalTrainAndTest$activity<-factor(finalTrainAndTest$activity,labels=activityLabels$label)
## Step 4. Appropriately labels the data set with descriptive variable names.
names(finalTrainAndTest)<-gsub("^t","time",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("^f","frequency",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("Acc","Acceleration",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("Gyro","Velocity",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("Mag","Magnitude",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("Mean\\(\\)","Mean",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("-std\\(\\)","StandardDeviation",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("X$","X axis",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("Y$","Y axis",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("Z$","Z axis",names(finalTrainAndTest))
names(finalTrainAndTest)<-gsub("BodyBody","Body",names(finalTrainAndTest))
## Step 5. From the data set in step 4, creates a second, independent tidy data set with
## the average of each variable for each activity and each subject.
tidyAverages<-aggregate(. ~subject + activity, finalTrainAndTest, mean)
write.table(tidyAverages, "tidyAverages.txt", row.names = FALSE, quote = FALSE)
|
229d18419b93c56461e0bf4e696c21e7c5196b8d
|
60938027552d345a7afe52d4800abccacb18abf1
|
/Functions_equilibrium.R
|
d33892439006f47059126ba20c2d04113e3375b0
|
[] |
no_license
|
matthewhholden/Management_Targets_For_Anti_African_Elephant_Poaching
|
07a23d7827b8a8ac8e81674bd3c4fb22e73d38ae
|
a12dadc8ce82bb39bff31fd8410798e91e711812
|
refs/heads/master
| 2021-01-21T20:37:59.569767
| 2017-05-25T02:35:47
| 2017-05-25T02:35:47
| 92,258,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,300
|
r
|
Functions_equilibrium.R
|
# Functions ####
dEdt.nulcline = function(pars, N){
r = pars[1]; k = pars[2]; q0 = pars[3];
cp = pars[4]; s0 = pars[5]; b = pars[6];
s1 = pars[7]; mu = pars[8]; pc = pars[9];
cc = pars[10]; p0 = pars[11]; area = pars[12];
eta = pars[13]; z = pars[14];
m = pars[15]; delta = pars[16];
E = (-cp - b*m*mu*N*q0 + mu*N*p0*q0 - cc*pc*s0 + b*cc*m^2*pc*s1 -
cc*m*p0*pc*s1)/(area*b*N*q0*(mu*N*q0 - cc*m*pc*s1))
return(E)
}
stability = function(eigvals){
#imput eigenvalues and determine stability
#1: stable (nonde or spiral), 2: saddle 3: unstable 4:indeterminate form
if( sum( Re(eigvals) < 0 ) == 2 ){
stab = 1;
} else if ( sum ( Re(eigvals) > 0 ) == 2){
stab = 3;
} else if ( eigvals[1]*eigvals[2]<0 ) {
stab = 2;
} else { stab = NA }
}
dNdt.nulcline = function(pars, N){
r = pars[1]; k = pars[2]; q0 = pars[3];
cp = pars[4]; s0 = pars[5]; b = pars[6];
s1 = pars[7]; mu = pars[8]; pc = pars[9];
cc = pars[10]; p0 = pars[11]; area = pars[12];
eta = pars[13]; z = pars[14];
m = pars[15]; delta = pars[16];
E = (r/q0)*(1-N/k)
return(E)
}
Jacobian = function(pars, x){
r = pars[1]; k = pars[2]; q0 = pars[3];
cp = pars[4]; s0 = pars[5]; b = pars[6];
s1 = pars[7]; mu = pars[8]; pc = pars[9];
cc = pars[10]; p0 = pars[11]; area = pars[12];
eta = pars[13]; z = pars[14];
m = pars[15]; delta = pars[16];
n = x[1]; e = x[2];
dfdn = (-e)*q0 - (n*r)/k + (1 - n/k)*r
dfde = (-n)*q0
dgdn = e*((-area)*b*e*mu*n*q0^2 + mu*q0*(p0 - b*(m + area*e*n*q0)) +
area*b*cc*e*m*pc*q0*s1)
dgde = -cp + mu*n*q0*(p0 - b*(m + area*e*n*q0)) +
e*((-area)*b*mu*n^2*q0^2 + area*b*cc*m*n*pc*q0*s1) -
cc*pc*(s0 + m*(p0 - b*(m + area*e*n*q0))*s1)
J = matrix(c(dfdn, dfde, dgdn, dgde), 2, 2, byrow = TRUE)
return(J)
}
dEdtCoefsN = function(pars){
#coefficients for the polynomial (in N) dE/dt
r = pars[1]; k = pars[2]; q0 = pars[3];
cp = pars[4]; s0 = pars[5]; b = pars[6];
s1 = pars[7]; mu = pars[8]; pc = pars[9];
cc = pars[10]; p0 = pars[11]; area = pars[12];
eta = pars[13]; z = pars[14];
m = pars[15]; delta = pars[16];
coef0 = -cp - cc*pc*s0 + b*cc*m^2*pc*s1 - cc*m*p0*pc*s1
coef1 = (-b*m*mu*q0 + mu*p0*q0 + area*b*cc*m*pc*r*s1)
coef2 = (-area*b*mu*q0*r - (area*b*cc*m*pc*r*s1)/k)
coef3 = (area*b*mu*q0*r)/k
return( c(coef0, coef1, coef2, coef3) )
}
calculate.equilibria = function(pars, N0){
r = pars[1]; k = pars[2]; q0 = pars[3];
cp = pars[4]; s0 = pars[5]; b = pars[6];
s1 = pars[7]; mu = pars[8]; pc = pars[9];
cc = pars[10]; p0 = pars[11]; area = pars[12];
eta = pars[13]; z = pars[14];
m = pars[15]; delta = pars[16];
beta = Beta(pars)
root = polyroot( dEdtCoefsN(pars) )
N.eq = NA;
N.nonTrivRoots = sort( Re( root[
which( abs(Im(root)) < 1e-6 &
Re(root) > 0 & Re(root) < k ) ] ))
E.nonTrivRoots = dNdt.nulcline(pars, N.nonTrivRoots)
N.roots = c(0, N.nonTrivRoots, k);
E.roots = c(0, E.nonTrivRoots, 0);
rl = length(N.roots)
#compute stability of all roots
N.stab = rep(NA, rl)
for(i in 1:rl){
N.stab[i] = stability(eigen( Jacobian(pars, c(N.roots[i], E.roots[i])) )$values )
}
# print(rl); print(N.stab); #print( (N.stab==1) == c(0,1,0,1,0) ) ;
# Determine the stability of the roots
if( (rl == 2 || rl == 3) && N.stab[2] == 1){
N.eq = N.roots[2]
} else if (rl == 5 && sum( (N.stab==1) == c(0,1,0,1,0)) ==5 ){
N.eq = ifelse(N0 > N.roots[3], N.roots[4], N.roots[2])
} else if (rl == 5 && sum( (N.stab==1) == c(0,0,0,1,0)) ==5 ){
N.eq = N.roots[4]
} else if (rl == 4 && N.stab[4] == 1 &&
N.stab[3]>1 && N.stab[2]>1 && N.stab[1]>1){
N.eq = N.roots[4]
}
return(list(N.eq, rl, beta, N.roots, N.stab))
}
Beta = function(pars){
r = pars[1]; k = pars[2]; q0 = pars[3];
cp = pars[4]; s0 = pars[5]; b = pars[6];
s1 = pars[7]; mu = pars[8]; pc = pars[9];
cc = pars[10]; p0 = pars[11]; area = pars[12];
eta = pars[13]; z = pars[14];
m = pars[15]; delta = pars[16];
beta = cp + cc*s0 + (p0 - b*m)*(m*cc*s1 - mu*q0*k)
beta2 = r + cp + cc*pc*s0 + (p0 - b*m)*(m*cc*pc*s1 - mu*q0*k)
return(beta2)
}
#################### Function to plot the nullclines for several examples
Plot.all.types = function(l, ind, ty=NA, blim = c(1,numb), strat='SQ'){
par(mfrow=c(4,4))
sDrip = stockDrip
s.new = rep(0, numSims)
if(strat == 'FE'){ s.new = s1v} else if ( strat == 'SQ') {sDrip = 0}
for(counter in 1:l){
if(dim(ind)[1]>0){
i = ind[counter,1]
j = ind[counter,2]
if(j>=blim[1] && j<=blim[2]){
if( strat == 'FE'){
ass = s.new[i]*ccv[i]*pcv[i]*sDrip/(muv[i]*qv[i])
buffer = .00002
if (ass > 0 && ty == 4){
N = c(seq(buffer, ass-buffer, by = min(.001,ass)), seq(ass+buffer, kv[i], by = .001))
} else { N = c(seq(0, kv[i], by = .001)) }
} else {
N = c(seq(0, kv[i], by = .0002))
}
#nullcline plots for legal trade funding enforcement
pars = c(rv[i], kv[i], qv[i], cpv[i], s0v[i], bSeq[j], s.new[i], muv[i],
pcv[i], ccv[i], p0v[i,j], area, 1, 1, sDrip, delta)
E.E = dEdt.nulcline(pars, N)
E.N = dNdt.nulcline(pars, N)
plot(N, E.E,
xlim = c(0,max(1.05*N)), ylim = c(0,max(2*E.N)),
type = 'l', main = paste(i,', ',j) )
lines(N, E.N, col='blue')
#equilibria
if(strat =='FE'){
points( roots.FE[[j]][[i]], c(0, dNdt.nulcline(pars, roots.FE[[j]][[i]][-1])),
lwd=2, col='green' )
abline(v=ass, col = 'red', lty = 2)
} else if(strat == 'M'){
points( roots.M[[j]][[i]], c(0, dNdt.nulcline(pars, roots.M[[j]][[i]][-1])),
lwd=2, col='green' )
} else {
points( roots.SQ[[j]][[i]], c(0, dNdt.nulcline(pars, roots.SQ[[j]][[i]][-1])),
lwd=2, col='green' )
}
}
} else { print(paste('no solutions of type ', ty, ' for strategy', strat)) }
}
}
#plot typical phase plot ####
PhasePlot = function(pars, strat, fp, stab,
title='', lab.axis = c('',''),
cexp=3, ca=1.2, lw=3){
r = pars[1]; k = pars[2]; q0 = pars[3];
cp = pars[4]; s0 = pars[5]; b = pars[6];
s1 = pars[7]; mu = pars[8]; pc = pars[9];
cc = pars[10]; p0 = pars[11]; area = pars[12];
eta = pars[13]; z = pars[14];
m = pars[15]; delta = pars[16];
mult.k = 1.1;
col.null.e = 'mediumseagreen'
col.null.n = 'lightpink3'
col.fp = 'black'
N = seq(0, mult.k*k, by = .0001)
symbols = 18*(stab == 1) + 1
ylimit = c(0,.0022)
xlimit = c(0,mult.k*k)
if(strat=='FE'){
ass = s1*cc*pc*m/(mu*q0)
if(ass>0){
buffer = 0.0001;
N1 = c(seq(buffer, ass-buffer, by = min(buffer, ass)))
if(ass<mult.k*k) N2 = seq(ass+buffer, mult.k*k, by = buffer)#, seq(ass+buffer, k, by = buffer))
}
}
if(strat=='FE' && ass>0 ){
E.E1 = dEdt.nulcline(pars, N1)
E.N1 = dNdt.nulcline(pars, N1)
plot(N1, E.E1,
xlim = xlimit, ylim = ylimit,
type = 'l', main = title,
xlab = lab.axis[1], ylab=lab.axis[2],
xaxs="i", yaxs="i", lwd=lw, col=col.null.e)
lines(N1, E.N1, col=col.null.n , lwd=lw)
lines(xlimit, c(0,0), col=col.null.e, lwd=lw, xpd=TRUE)
lines(c(0,0), ylimit, col=col.null.n, lwd=lw, xpd=TRUE)
if( ass<mult.k*k ){
E.E2 = dEdt.nulcline(pars, N2)
E.N2 = dNdt.nulcline(pars, N2)
lines(N2, E.E2,
type = 'l', main = paste(i,', ',j),
xaxs="i", yaxs="i", lwd=lw, col=col.null.e)
lines(N2, E.N2, col=col.null.n , lwd=lw)
}
points( fp, c(0, dNdt.nulcline(pars, fp[-1])),
cex = cexp, lwd=lw , pch = 16, col = 'white', xpd=TRUE)
points( fp, c(0, dNdt.nulcline(pars, fp[-1])),
cex = cexp, lwd=lw , pch = symbols, col = col.fp, xpd=TRUE)
} else {
#print('got here')
E.E = dEdt.nulcline(pars, N)
E.N = dNdt.nulcline(pars, N)
ylimit = c(0, max(E.N,E.E)+.0001)
plot(N, E.E,
xlim = xlimit, ylim = ylimit,
type = 'l', main = title,
xlab = lab.axis[1], ylab=lab.axis[2],
xaxs="i", yaxs="i", lwd=lw, col=col.null.e)
lines(N, E.N, col=col.null.n , lwd=lw)
lines(xlimit, c(0,0), col=col.null.e, lwd=lw, xpd=TRUE)
lines(c(0,0), ylimit, col=col.null.n, lwd=lw, xpd=TRUE)
points( fp, c(0, dNdt.nulcline(pars, fp[-1])),
cex = cexp, lwd=lw, pch = 16, col = 'white', xpd=TRUE )
points( fp, c(0, dNdt.nulcline(pars, fp[-1])),
cex = cexp, lwd=lw, pch = symbols, col = col.fp, xpd=TRUE )
#if( strat=='FE' && ass>0 ){abline(v=ass, col = 'grey', lty = 2, lwd=lw)}
}
}
|
3baf4c28123642a5936cff62971167c4953b17c8
|
9334d2ce11d1c587b9233cd727e0649c34957cef
|
/Scripts/complete.R
|
74f950935e33b4bd0e1dc740bcaf8a709557bc97
|
[] |
no_license
|
sanketdave88/R-Programming-Notes
|
61ee0e983e40044d78cfd196795ecfd7ec3f7c3e
|
21d0a91e54f80540ff6c5c8dc51e38d91b08a660
|
refs/heads/master
| 2022-11-26T19:22:13.974622
| 2020-08-09T03:13:04
| 2020-08-09T03:13:04
| 284,391,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
r
|
complete.R
|
complete <- function(directory,id = 1:332){
if(grepl('specdata',directory) == TRUE){
directory <- "./Data/rprog_data_specdata/specdata"
}
complete_data <- numeric(length(id))
allfiles <- as.character(list.files(directory))
filepaths <- paste(directory,allfiles,sep = "/")
j <- 1
for(i in id){
currentfile <- read.csv(filepaths[i], header = T, sep = ",")
complete_data[j] <- sum(complete.cases(currentfile))
j <- j + 1
}
result <- data.frame(id = id,nobs = complete_data)
result
}
|
7b5fa410b40d193eff0c268c3a032a17a586f8f1
|
7f4ebcf527bea646b1a0a74c3e2e50fb035f4b3d
|
/comparison chart.R
|
4b82725569208a2a3a8492c5d6e4225dc0f3483a
|
[] |
no_license
|
hsmohammed/COVID-19
|
c37ac52e54c2eb55176d1b1d6cca8ec83d4a6728
|
d608cc10a1f3dadf3ff33b987241b3196513750d
|
refs/heads/master
| 2021-04-04T01:39:57.027576
| 2020-04-15T03:46:16
| 2020-04-15T03:46:16
| 248,413,576
| 1
| 1
| null | 2020-03-19T04:52:57
| 2020-03-19T04:52:57
| null |
UTF-8
|
R
| false
| false
| 2,077
|
r
|
comparison chart.R
|
COVID_confirmed_viz <- COVID_confirmed %>%
dplyr::filter(country %in% c("Iran", "US", "Italy", "Spain")) %>%
group_by(country) %>%
arrange(country, date) %>%
dplyr::filter(date >= "0020-02-15")
#
COVID_deaths_viz <- COVID_deaths %>%
dplyr::filter(country %in% c("Iran", "US", "Italy", "Spain")) %>%
group_by(country) %>%
arrange(country, date) %>%
dplyr::filter(date >= "0020-02-15")
#
COVID_recovered_viz <- COVID_recovered %>%
dplyr::filter(country %in% c("Iran", "US", "Italy", "Spain")) %>%
group_by(country) %>%
arrange(country, date) %>%
dplyr::filter(date >= "0020-02-20")
#
COVID_confirmed_viz1 <- COVID_confirmed %>%
dplyr::filter(country == "Korea, South") %>%
group_by(country) %>%
arrange(country, date) %>%
dplyr::filter(date >= "0020-02-20")
#
COVID_deaths_viz1 <- COVID_deaths %>%
dplyr::filter(country == "Korea, South") %>%
group_by(country) %>%
arrange(country, date) %>%
dplyr::filter(date >= "0020-02-20")
#
COVID_recovered_viz1 <- COVID_recovered %>%
dplyr::filter(country == "Korea, South") %>%
group_by(country) %>%
arrange(country, date) %>%
dplyr::filter(date >= "0020-02-20")
#
p1 <- ggplot()+
geom_line(data = COVID_confirmed_viz, aes(date, n_cases, group = country, color = country), size = 1)+
geom_line(data = COVID_confirmed_viz1, aes(date, n_cases, group = country, color = country), size = 3, linetype = "dashed")+
ylab("n")+
theme_bw()+
ggtitle("Confirmed cases")
#
p2 <- ggplot()+
geom_line(data = COVID_deaths_viz, aes(date, n_cases, group = country, color = country), size = 1)+
geom_line(data = COVID_deaths_viz1, aes(date, n_cases, group = country, color = country), size = 3, linetype = "dashed")+
ylab("n")+
theme_bw()+
ggtitle("Deaths")
#
p3 <- ggplot()+
geom_line(data = COVID_recovered_viz, aes(date, n_cases, group = country, color = country), size = 1)+
geom_line(data = COVID_recovered_viz1, aes(date, n_cases, group = country, color = country), size = 3, linetype = "dashed")+
ylab("n")+
theme_bw()+
ggtitle("Recovered")
#
#
grid.arrange(p1,p2,p3)
|
768ef104bbfa630516280c75fd02da2cb28f5cfc
|
29d34e3302b71d41d77af715727e963aea119392
|
/R/cols2list.R
|
10797202c3426a276262613e9e32748f4c1ae353
|
[] |
no_license
|
bakaibaiazbekov/rtemis
|
1f5721990d31ec5000b38354cb7768bd625e185f
|
a0c47e5f7fed297af5ad20ae821274b328696e5e
|
refs/heads/master
| 2020-05-14T20:21:40.137680
| 2019-04-17T15:42:33
| 2019-04-17T15:42:33
| 181,943,092
| 1
| 0
| null | 2019-04-17T18:00:09
| 2019-04-17T18:00:09
| null |
UTF-8
|
R
| false
| false
| 520
|
r
|
cols2list.R
|
# cols2list.R
# ::rtemis::
# 2018 Efstathios D. Gennatas egenn.github.io
#' Convert data frame columns to list elements
#'
#' Convenience function to create a list out of data frame columns
#'
#' @param x Input: Will be coerced to data.frame, then each column will become an element of a list
#' @author Efstathios D. Gennatas
#' @export
cols2list <- function(x) {
x <- as.data.frame(x)
lst <- lapply(seq(x), function(i) x[, i])
if (!is.null(colnames(x))) names(lst) <- colnames(x)
lst
} # rtemis::cols2list
|
af7978e38e24a789068b58179e911afba1b07f62
|
b7bbe4cc327717b7f20cbe80eeb4bbd3ae18d1ac
|
/R/Modelling/DLtune.R
|
1f5f2d5045e120331799752a445474e9ddc16ce7
|
[] |
no_license
|
guillaumelf/Challenge2018
|
dd29fd918475e88da932ebeb256592c7b1383d37
|
1c403a6105c9fe4a7c1f1a3020b56c706ab5a9c0
|
refs/heads/master
| 2021-05-08T07:33:59.598679
| 2018-01-07T14:24:38
| 2018-01-07T14:24:38
| 106,869,111
| 0
| 1
| null | 2017-10-14T12:13:05
| 2017-10-13T20:13:51
| null |
UTF-8
|
R
| false
| false
| 3,319
|
r
|
DLtune.R
|
#ALLEAU 20/11/2017
#h2o Deep learning avec coord sans na sans
library(h2o)
library(readr)
library(dplyr)
source(file="R/fonction.R", encoding ="UTF-8")
### importation
test=read.csv('R/modif/test_WNADL.csv',sep=',',dec='.',header=T,stringsAsFactors = FALSE,encoding = "UTF-8")
test$ech <- as.factor(test$ech)
test$insee <- as.factor(test$insee)
test$ddH10_rose4 <- as.factor(test$ddH10_rose4)
dftest=test%>%select(-c(X,X.1))
data_agg=read.csv('R/modif/data_agg_gui.csv',sep=',',dec='.',header=T,stringsAsFactors = FALSE,encoding = "UTF-8")
data_agg=fac(data_agg)
databig=data_agg%>%select(-c(X,date,mois))
### PARTIE 1
# chargement package
library(h2o)
library('stringr')
h2oServer <-h2o.init(nthreads = 1) # a changer selon la machine !!!
h2o.removeAll() # nettoie la memoire, au cas ou
# chargement donnees
df <- as.h2o(databig)
splits <- h2o.splitFrame(df, 0.95, seed=1234)
train_hex <- h2o.assign(splits[[1]], "train_hex") # 95%
test_hex <- h2o.assign(splits[[2]], "test_hex")
### PARTIE 2
# fonction pour effectuer test auto
response <- "tH2_obs"
predictors <- setdiff(names(df), response)
workdir="/R/DL_resu" # PENSEZ A LE CHANGER !!!
score_test_set=T
run <- function(extra_params) {
str(extra_params)
print("Training.")
model <- do.call(h2o.deeplearning, modifyList(list(x=predictors, y=response,
training_frame=train_hex, model_id="dlmodel"), extra_params))
sampleshist <- model@model$scoring_history$samples
samples <- sampleshist[length(sampleshist)]
time <- model@model$run_time/1000
print(paste0("training samples: ", samples))
print(paste0("training time : ", time, " seconds"))
print(paste0("training speed : ", samples/time, " samples/second"))
if (score_test_set) {
print("Scoring on test set.")
test_error <- h2o.rmse(model)
print(paste0("test set error : ", test_error))
} else {
test_error <- 1.0
}
h2o.rm("dlmodel")
c(paste(names(extra_params), extra_params, sep = "=", collapse=" "),
samples, sprintf("%.3f", time),
sprintf("%.3f", samples/time), sprintf("%.3f", test_error))
}
writecsv <- function(results, file) {
table <- matrix(unlist(results), ncol = 5, byrow = TRUE)
colnames(table) <- c("parameters", "training samples",
"training time", "training speed", "test set error")
write.csv2(table, file.path(workdir,file))
}
### PREMIERE ETUDE
EPOCHS=200
args <- list(
list(epochs=5*EPOCHS, hidden=c(512,512,512),
activation="RectifierWithDropout", input_dropout_ratio=0.2, l1=1e-5,validation_frame = test_hex),
list(epochs=5*EPOCHS, hidden=c(256,256,256,256),
activation="RectifierWithDropout",validation_frame = test_hex),
list(epochs=5*EPOCHS, hidden=c(256,256),
activation="RectifierWithDropout", input_dropout_ratio=0.2, l1=1e-5,validation_frame = test_hex),
list(epochs=5*EPOCHS, hidden=c(1024,1024,1024),
activation="RectifierWithDropout", input_dropout_ratio=0.2, l1=1e-5,validation_frame = test_hex),
list(epochs=5*EPOCHS, hidden=c(1024,512,1024,1024),
activation="RectifierWithDropout", input_dropout_ratio=0.2, l1=1e-5,validation_frame = test_hex)
)
writecsv(lapply(args, run), "22_11.csv")
## fermeture h2o server
h2o.shutdown()
|
d48a33468b44647e8680261a04906ac6cc1b6cce
|
330a27c197664d05c9592b21d0bf2682b6ae094a
|
/MLOpsMonitoring/man/add_selected_but_no_bought.Rd
|
72b991dd6094731a4e322c26fd87d0a3ee392dbd
|
[] |
no_license
|
datastorm-open/demo_webinar_mlops
|
091140efe460c98b85789b8c70c11f81bd37e371
|
1dbd231478b84939460294464131b525b3d015f4
|
refs/heads/master
| 2023-08-03T10:34:59.647526
| 2023-07-20T09:21:35
| 2023-07-20T09:21:35
| 321,422,579
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,298
|
rd
|
add_selected_but_no_bought.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeling.R
\name{add_selected_but_no_bought}
\alias{add_selected_but_no_bought}
\title{add_selected_but_no_bought}
\usage{
add_selected_but_no_bought(data, from, to, by, min_customer = 500,
max_customer = 1000, odd_new_customer = 5, mu_sku = 30,
sigma_sku = 20, mu_qtty = 30, sigma_qtty = 20)
}
\arguments{
\item{data}{: a dataset in which we will add fake lines}
\item{from}{: same as from in ?seq.Date}
\item{to}{: same as to in ?seq.Date}
\item{by}{: same as by in ?seq.Date}
\item{min_customer}{: min number of customers the can be altered each month}
\item{max_customer}{: max number of customers the can be altered each month}
\item{odd_new_customer}{: inverse of probability to add a new customer rather than altering an existing one}
\item{mu_sku}{: number of SKU by customer to be added follows rnorm(mu_sku, sigma_sku)}
\item{sigma_sku}{: number of SKU by customer to be added follows a rnom(mu_sku, sigma_sku)}
\item{mu_qtty}{: quantity added for each of SKU added follows rnorm(mu_qtty, sigma_qtty)}
\item{sigma_qtty}{: quantity added for each of SKU added follows rnorm(mu_qtty, sigma_qtty)}
}
\value{
a new dataset
}
\description{
add_selected_but_no_bought
}
\examples{
\dontrun{
#TODO
}
}
|
3e85a79e56254c26446d56bb21e51eebcc7d3beb
|
8de55d619d716b78e4f532e72811fb229a285841
|
/R/graphReport.R
|
847550bf2435f24848a5f4f5aa49ccc898d46585
|
[] |
no_license
|
tbendsen/VIAreports
|
e9b99373586a2fe8bdd05585f76ee44fd73eccaf
|
2997d35368e03c803affaf7d12c69f10cf22279e
|
refs/heads/master
| 2020-09-10T17:10:39.841646
| 2019-11-14T19:39:05
| 2019-11-14T19:39:05
| 221,772,877
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,127
|
r
|
graphReport.R
|
#run this file to generate the graph-reports
defaultPath="C:/Users/THBE/Desktop/rapporter/"
#' createGraphReports
#' Laver en grafisk rapport, som på forskellig vis illustrerer lokaleudnyttelsen.
#' Creates one report pr. campus, placed in defaultfolder/names.pdf (defaultname is campus_suffix)
#' Creates one xlsx-file with week-based counts, and one sheet pr. campus
#' Creates one xlsx-file with room-based counts, and one sheet pr. campus.
#' xlsx-files placed in defaultfolder/suffix/type_suffix.xlsx
#'
#' @param campuses En liste med et eller flere campusser (genereres med createCampus)
#' @param dateSelection En SQL-tekststreng, genereres med funktionen createDateSelection
#'
#' @param names Et navn til hver rapport (default lokalegruppen)
#' @param suffix en fælles endelse til alle rapporter
#' @param ... arguments passed to RoomUse (max- and minCapacity)
#'
#' @import dplyr
#' @import tidyr
#' @import ggplot2
#' @import gridExtra
#'
#' @return nothing
#' @export
#'
#' @examples createGraphReports(campus("AarhusN_UV"),createDateSelection(2019,5,10,maxTime=960))
createGraphReports<-function(campuses,dateSelection,names=NULL,suffix="x",...) {
if (class(dateSelection)!="DateSelection") {
warning("dateselection skal være af klassen DateSelection")
}
if (class(campuses[[1]])!="list") {
message("variabel campuses skal være en liste med et eller flere campusser")
}
n<-length(campuses)
warning("xlsxfil hardcodet - ligger på skrivebordet")
#delete old files
unlink(paste(defaultPath,suffix,"/*.xlsx",sep=""))
dir.create(paste(defaultPath,suffix,sep=""))
for (i in 1:length(campuses)) {
camp<-campuses[[i]]
name<-NA
if (!is.null(names)) {
name<-names[i]
}
createReport(campus = camp,dateSelection = dateSelection,name = name,suffix = suffix,...)
}
}
writexlsx<-function(data,weight,factor,suffix,sheetName) {
fileName<-paste(defaultPath,suffix,"/",factor,"_",suffix,".xlsx",sep="")
#save data in xlsxfile
xlsxData<-data[,c("title","year","week","weekday","timefrom","timeto","roomname")]
x<-xlsxData %>% mutate(week=if_else(week<10,paste("0",week,sep=""),as.character(week))) %>% mutate(week=paste(year,"-",week,sep=""))
#renames columns with names weight and factor
x<-x%>%rename(factor=factor,weight=weight)
weightCount<-length(unique(x$weight))
x<-x %>%group_by(title,factor) %>% summarize(lessonPrWeight=round(n()/weightCount,1))
x<-x %>% spread(key=title,value=lessonPrWeight)
x<-as.data.frame(x)
#x<-x[,-2]
f<-""
if (factor=="roomname") {
f<-"Lokalenavn"
}
else if (factor=="week") {
f<-"Ugenummer"
}
#I have only a vague idea about the next two lines, but they rename "factor" back to original column-name
f<-sym(f)
x<-x%>%rename(!!f:=factor)
write.xlsx(x,file=fileName,sheetName = sheetName,row.names = FALSE,showNA=FALSE,append=TRUE)
}
createReport<-function(campus,dateSelection,name,suffix,...) {
#cat("\n---",campus$roomgroup,"---\n")
s<-campus
#I can't remember what this does
doFill<-1
texts<-c()
for (i in 1:length(s$lessons)) {
l<-s$lessons[[i]]
mi<-min(l)
ma<-max(l)
if (mi==ma) {
texts[i]<-paste(ma,".",sep="")
}
else {
texts[i]<-paste(mi,".-",ma,".",sep="")
}
}
s$texts<-texts
roomgroup<-s$roomgroup
extraLimits<-s$extraLimits
texts<-s$texts
les<-s$lessons
cleanup<-s$cleanupFunction
comment<-s$comment
plots<-list()
plotNames<-c()
roomuse<-RoomUse(roomgroup=roomgroup,dateSelection=dateSelection,
extraLimits=extraLimits,cleanupFunction=cleanup,...)
sheetName=paste(roomgroup,collapse="_")
roomData<-roomuse@data
writexlsx(data = roomData,weight = "week",factor = "roomname",suffix = suffix,sheetName=sheetName)
writexlsx(data = roomData,weight = "roomname",factor = "week",suffix=suffix,sheetName=sheetName)
#generer opgørelse over udnyttelse====
myText<-"";
myText<-paste(myText,"Lokaleudnyttelse: ",name,"\n\n",sep="")
myText<-paste(myText,"Lokalegrupperne: ",paste(roomgroup,collapse =" og "),"\n" ,sep="")
myText<-paste(myText,("Data opdateret: "),min(roomData$updatedate),"\n",sep="")
myText<-paste(myText,length(unique(roomData$roomname))," lokaler \n",sep="")
myText<-paste(myText,lessonUsage(roomuse=roomData,texts = texts,lessons = les),sep="")
# #find rooms with lowest usage
# lessonCountRoom<-lapply(X = split(x=roomuse,f = interaction(roomuse$roomname)),FUN = nrow)
# weekCount<-length(unique(roomuse[,"week"]))
#calculate effective timetableusage
sort(unique(roomData[,"timefrom"]))->a
sort(unique(roomData[,"timeto"]))->b
n<-length(b)
rep(1080,n)->m
pmin(m,b)-a->l
sum(l[which(l>0)])->mySum
u<-round(100-mySum/(1080-480)*100)
myText<-paste(myText,u,"% af tiden mellem 8 og 18 er pauser\n",sep="")
myText<-paste(myText,"Der er ",length(l[which(l>0)])," lektioner med start mellem 8 og 18\n",sep="")
#weekdays
#bemærk: crascher hvis datasæt ikke indeholder timer i max-lektionsnummer
myText<-paste(myText,"Benyttelse mandag og fre i ",texts[3],"lektion\n",sep="")
myText<-paste(myText,dayUsage(roomuse = roomData,day=1,dayLength=length(les[[3]])),sep="")
myText<-paste(myText,dayUsage(roomuse=roomData,day=5,dayLength=length(les[[3]])),sep="")
if (!comment=="") {
myText<-paste(myText,"\nBemærk: ",comment,"\n",sep="")
}
#tjek ringetider
myText<-paste(myText,"\nRingetider (minutter til pause - minutter til næste lektionstart)\n",sep="")
lessonLength<-list()
unique(roomData[,c("timefrom","timeto")])->r
r[order(r[,1]),]->r
#beregn lektionslængde (dvs. frem til næste lektionsstart)
for (i in 1:nrow(r)) {
diff<-r[i,"timeto"]-r[i,"timefrom"]
index<-as.character(r[i,"timefrom"])
if (i<nrow(r)) {
lessonLength[[index]]<-r[i+1,"timefrom"]-r[i,"timefrom"]
}
else {
lessonLength[[index]]<-r[i,"timeto"]-r[i,"timefrom"]
}
myText<-paste(myText,i," (",diff," - ",lessonLength[[index]],")",": ",ringetid(r[i,"timefrom"])," - ",ringetid(r[i,"timeto"]),"\n",sep="")
}
for (i in 1:nrow(roomData)) {
roomData[i,"effectiveLength"]<-
lessonLength[[as.character(roomData[i,"timefrom"])]]
}
cat(myText)
#writeClipboard(myText)
#plot lektionsnummer====
myPlot<-createGraph(roomuse=roomData,weights=c("week","roomname"),
myFactor="timefrom",
xlabel="starttidspunkt",
ylabel="lektioner pr. uge pr. lokale",
labels=function(xvalues) {
ringetid(xvalues)
},
doFill=doFill,
doRotate=1
)
i<-length(plots)+1
plotNames[i]<-paste(paste(roomgroup,collapse=","),"_starttid",".png",sep="")
plots[[i]]<-myPlot
#plot week====
myPlot<-createGraph(roomuse=roomData,weights=c("roomname"),
myFactor="week",
xlabel="ugenummer",
ylabel="lektioner pr. lokale",
labels=function(xvalues) {
xvalues
},
doFill=doFill,doRotate=1
)
plotRoomgroup<-paste(roomgroup,collapse="_")
i<-length(plots)+1
plotNames[i]<-paste(plotRoomgroup,"_uge",".png",sep="")
plots[[i]]<-myPlot
#plot dage ====
labels<-function(xvalues) {
dage[as.integer(xvalues)]
}
myPlot<-createGraph(roomuse=roomData,weights=c("roomname","week"),
myFactor="weekday",
xlabel="ugedag",
ylabel="lektioner pr. lokale pr. uge",
labels=labels,
doFill=doFill
)
i<-length(plots)+1
plotNames[i]<-paste(plotRoomgroup,"_ugedag",".png",sep="")
plots[[i]]<-myPlot
#plot rooms====
#lokaler
labels<-function(xvalues) {
xvalues
}
myPlot<-createGraph(roomuse=roomData,weights=c("week"),
myFactor="roomname",
xlabel="lokale",
ylabel="lektioner pr. uge",
labels=labels,
doFill=doFill,doRotate=1
)
i<-length(plots)+1
plotNames[i]<-paste(plotRoomgroup,"_lokaler",".png",sep="")
plots[[i]]<-myPlot
#plot and save
grid.arrange(plots[[1]],plots[[2]],plots[[3]],plots[[4]],ncol=2)
if (is.na(name)) {
name<-plotRoomgroup
}
file<-paste(defaultPath,name,suffix,".pdf",sep="")
pdf(file=file,paper="a4")
textplot(myText)
grid.arrange(plots[[1]],ncol=1)
grid.arrange(plots[[2]],ncol=1)
grid.arrange(plots[[3]],ncol=1)
grid.arrange(plots[[4]],ncol=1)
dev.off()
#save individual plots
for (i in 1:length(plots)) {
name=plotNames[i]
plot=plots[[i]]
#ggsave(filename=name,path="lokaler/speciel/",plot=plot)
}
}
|
3585cf3c472651ae7dc07ad0e31bb785fa807335
|
274c11c96f4976067674e3c76b7f490aba020123
|
/man/commit.Rd
|
0c865a4dec2d2a0c02fa86d376c2296069ce2d73
|
[] |
no_license
|
FvD/gert
|
2b169acfd21a1179a946bedc042e43f7d7d904f9
|
5c2de88c1b07a140b774d1240c07b06a94e6dae1
|
refs/heads/master
| 2020-06-06T16:04:26.407104
| 2019-06-18T14:05:28
| 2019-06-18T14:05:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,153
|
rd
|
commit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/commit.R
\name{commit}
\alias{commit}
\alias{git_commit}
\alias{git_commit_all}
\alias{git_add}
\alias{git_rm}
\alias{git_status}
\alias{git_ls}
\alias{git_log}
\alias{git_reset}
\title{Stage and commit changes}
\usage{
git_commit(message, author = NULL, committer = author, repo = ".")
git_commit_all(message, author = NULL, committer = author,
repo = ".")
git_add(files, force = FALSE, repo = ".")
git_rm(files, repo = ".")
git_status(repo = ".")
git_ls(repo = ".")
git_log(ref = "HEAD", max = 100, repo = ".")
git_reset(type = c("soft", "hard", "mixed"), ref = "HEAD",
repo = ".")
}
\arguments{
\item{message}{a commit message}
\item{author}{A \link{git_signature} value, default is \link{git_signature_default}.}
\item{committer}{A \link{git_signature} value.}
\item{repo}{a path to an existing repository, or a \code{git_repository} object as
returned by \link{git_open}, \link{git_init} or \link{git_clone}.}
\item{files}{vector of paths relative to the git root directory.
Use \code{"."} to stage all changed files.}
\item{force}{add files even if in gitignore}
\item{ref}{string with a branch/tag/commit}
\item{max}{lookup at most latest n parent commits}
\item{type}{must be one of \code{"soft"}, \code{"hard"}, or \code{"mixed"}}
}
\description{
To commit changes, start with \emph{staging} the files to be included
in the commit using \code{\link[=git_add]{git_add()}} or \code{\link[=git_rm]{git_rm()}}. Use \code{\link[=git_status]{git_status()}} to see an
overview of staged and unstaged changes, and finally \code{\link[=git_commit]{git_commit()}} creates
a new commit with currently staged files.
\link{git_commit_all} is a shorthand that will automatically stage all new and
modified files and then commit.
Also \code{\link[=git_log]{git_log()}} shows the most recent commits and \code{\link[=git_ls]{git_ls()}} lists
all the files that are being tracked in the repository.
}
\seealso{
Other git: \code{\link{branch}}, \code{\link{fetch}},
\code{\link{git_config}}, \code{\link{repository}},
\code{\link{signature}}
}
\concept{git}
|
785b670288ac01c5a6f1e7ad86e8970b0c591f97
|
9ede8acadd3134305ac53c221f589764c9a4b694
|
/10_VectorData.R
|
d1fd23dffa06a2e61026d60b3423128f75863828
|
[] |
no_license
|
ColinChisholm/SpatialR
|
a98644cc5f6be31250f2b55e70d5fa620c3acb72
|
7f12ee62fab9888777a60a09883b6c1f6fbb564d
|
refs/heads/master
| 2020-04-03T04:29:36.584028
| 2019-06-24T15:59:13
| 2019-06-24T15:59:13
| 155,015,594
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,760
|
r
|
10_VectorData.R
|
# Title: Spatial R
# Section: 10 Vector Data
# Purposes: Present Basic Vector Data usage including:
# - Data Import
# - Examination of data structure and attributes
# - Basic Visualization (Lines, Polygons, Points)
##### Libraries essential for spatial data ####
# #List of libraries (thx to Tristan Goodbody for providing me a number of these)
# library(dplyr) #Data Manipulation
# library(tidyr) #Data Manupulation
# library(ggplot2) #Graphing
# library(foreign) #Import Foriegn to R data types. Generally used to directly read .dbf files (attributes of Shapefiles)
#
# #spatial data specific libraries
# library(sp) #Essential Spatial Data
library(rgdal) #GDAL available to R
# library(rgeos) #for topology operations on geometries
# library(geosphere) #Spherical trigonometry for geographic applications. That is, compute distances and re-lated measures for angular (longitude/latitude) locations
library(raster) #Raster import and analysis
# library(rts) #For comparing time rasters in time series
#
# #For Point Cloud Data Analysis.
# library(lidR) #https://github.com/Jean-Romain/lidR/wiki
# library(EBImage) #Needed for several functions in lidR
#
# #Spatial Visualization
# library(viridis) #Color Palletes for cartography / styling -- also useful to ensure visible to color-blind
# library(rasterVis) #Methods for enhanced visualization and interaction with raster data
# library(RColorBrewer) #Creates nice looking color palettes especially for thematic map
##### DATA IMPORT #####################################################################################################
# Scripting generated following the instructions from the NEON turorial
# A. Set working environment
setwd("E:/workspace/SpatialR/SpatialR/data/NEON_Vector")
##### Importing and Exploring Data ####################################################################################
# B1. Import Spatial Vector Data -- here I am Using the NEON example dataset
AOI <- readOGR("HARV", "HarClip_UTMZ18")
# readOGR("PATH", "LayerFile") #For your own data specify the PATH then the LayerName
# Adding some more layers:
Roads <- readOGR("HARV", "HARV_roads") #Sample data contains line features
Tower <- readOGR("HARV", "HARVtower_UTM18N") #Sample data contains point feature
# B2. Explore the data
summary(AOI) # summary provides meta-data on the file.
head(Roads) # displays the first ten entries in the attribute table
class(AOI) # Type of vector data -- in this case a polygon for to be more precise a: SpatialPolygonDataFrame
length(Roads)# How many features are in the data. As with Other dataframes returns the length of the attribute table
crs(Tower) # Projection information
extent(AOI) # Spatial extent of the data: (xmin, xmax, ymin, ymax)
Tower # Calling the variable directly will display the meta-data
names(Tower) # Lists all the attribute fields
Roads$TYPE # Lists all of the entries under the attribute (LAYER$Attribute)
table(Roads$TYPE) # provides a list of all the unique entries under the field and lists how many there are of each
#Subset a feature based on an attribute here I save it to a new variable
footpaths <- Roads[Roads$TYPE == "footpath",] # alternate: subset(Roads, TYPE == "footpath")
length(footpaths)
# B3. Set projection ... if needed -- initial spatial data is declared
# Todo
##### Plot a Single Layer (Line Data) #################################################################################
# C1. Plot a single variable
plot(footpaths, #Layer
col = c("red", "blue"), #Color of the lines (2 features here and 2 colors)
lwd = 6, #Line Weight
main = "NEON Harvard Forest Field Site \n Footpaths" #Title
)
# C2. Plot a Single layer based on the attribute type. Note the attributes need to be loaded as factors (this should happen by default)
# syntax for coloring by factor: col = c("colorOne", "colorTwo","colorThree")[object$factor]
class(Roads$TYPE) #Returns that this is a factor type variable
levels(Roads$TYPE) #Returns the order of the factors
#"boardwalk" "footpath" "stone wall" "woods road"
roadPalette <- c("blue","green","grey","purple") #Create a color pallete for the roads (this is a new variable)
roadWidths <- c(2,4,3,8)
plot(Roads,
lwd = roadWidths,
col = roadPalette[Roads$TYPE],
main = "Harvard Lines")
# C3. Add a Legend
legend("bottomright", #Position
legend = levels(Roads$TYPE), #Specify the entries
fill = roadPalette) #Specifies the color -- uses the color variable from C2
##### Plot Polygon Data ###############################################################################################
States <- readOGR("US-Boundary-Layers", "US-State-Boundaries-Census-2014") #Load the data
length(States) #List number of attributes
levels(States$region) #Lists unique attributes (also confirms this is a factor)
colStates <- c("purple", "green", "yellow", "red", "grey") #Create a color pallete
plot(States, #Plot State
col = colStates[States$region], #Color using the pallete above and the 'region' attreibute as a factor
main = "Regions of the United States") #Title
legend("bottom", #Add Legend
legend = levels(States$region), #Legend Text
fill = colStates) #Legend colors
##### Plot Point Data #################################################################################################
Plots <- readOGR("HARV", "PlotLocations_HARV") #Load the data
names(Plots) #List all the attributes (field names)
table(Plots$soilTypeOr) #Lists the levels and number in each
x <- length(levels(Plots$soilTypeOr)) #X receives the length of the number of levels
#soilCol <- palette(terrain.colors(x)) #palette() specifies the colors based x
soilCol <- palette(rainbow(x)) #Alternate Color Palette
symLeg <- c(15,17) #2 symbols used to represent the legend entry
symbols <- c(symLeg)[Plots$soilTypeOr] #This will be the symbol types used google image: "R pch" for a summary of each
plot(Plots,
col = soilCol[Plots$soilTypeOr],
pch = symbols,
main = "Soils Plots at Harvard")
legend("bottomright",
legend = levels(Plots$soilTypeOr),
pch = symLeg,
col = soilCol,
bty = "n", #No box around legend
cex = 2 #Magnify the text
)
|
3bfe94a63e4aff83a535016a1070169a4e01f446
|
4a2cc98530bca183b9a88fbfc96675dbd3677075
|
/man/statistics.classNRI.Rd
|
428d2984fb6d91b8d8820fc18fd876d0985e3046
|
[] |
no_license
|
JohnPickering/rap
|
c09648ef716a68ee4e1557882d6110740830b896
|
cca6dfaa31fbad726f952d552cf8a36594815a91
|
refs/heads/master
| 2023-07-10T10:35:04.541536
| 2023-06-30T05:13:54
| 2023-06-30T05:13:54
| 37,507,613
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,181
|
rd
|
statistics.classNRI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rap.R
\name{statistics.classNRI}
\alias{statistics.classNRI}
\title{Reclassification metrics with classes (ordinals) as inputs}
\usage{
statistics.classNRI(c1, c2, y, s1 = NULL, s2 = NULL)
}
\arguments{
\item{c1}{Risk class of Reference model (ordinal factor).}
\item{c2}{Risk class of New model (ordinal factor)}
\item{y}{Binary of outcome of interest. Must be 0 or 1.}
\item{s1}{The savings or benefit when an event is reclassified to a higher group by the new model. i.e instead of counting as 1 an event classified to a higher group, it is counted as s1.}
\item{s2}{The benefit when a non-event is reclassified to a lower group. i.e instead of counting as 1 an event classified to a lower group, it is counted as s2.}
}
\value{
A matrix of metrics for use within CI.classNRI
}
\description{
The function statistics.classNRI calculates the NRI metrics for reclassification of data already in classes. For use by CI.classNRI.
}
\examples{
\dontrun{
data(data_class)
y <- data_class$outcome
c1 <- data_class$base_class
c2 <- data_class$new_class
#e.g.
output<-statistics.classNRI(c1, c2, y)
}
}
|
2dcfff6315e267bb1a570f3a40d218f8bee08995
|
141f0b24bc5bb5e50c1e71af6bad133d9886b33e
|
/regresion_polinomica.R
|
2e886c08876dffdc29f99e52ca93a80a58a2183c
|
[] |
no_license
|
lauralpezb/ML-con-R
|
6470bb9187852e72a40f7ed64a81740c676f635e
|
b9b3fc1045fa2512a238f4f90275e9903a09d2f2
|
refs/heads/master
| 2022-12-19T15:01:00.938523
| 2020-09-10T04:54:58
| 2020-09-10T04:54:58
| 294,294,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,388
|
r
|
regresion_polinomica.R
|
rm(list=ls()) #Limpiar memoria
#Librerias
install.packages("tidyverse")
library(tidyverse) #Visualización
install.packages("MASS")
library(MASS)
install.packages("caTools")
library(caTools)
install.packages("ggplot2")
library(ggplot2)
#datos
data("Boston")
head(Boston,7)
#Preparar datos
set.seed(1234)
split <- sample.split(Boston$medv, SplitRatio = 0.75)
training <- subset(Boston,split == TRUE)
test <- subset(Boston,split == FALSE)
#Explorar datos:
summary(training)
nrow(training)
nrow(test)
attach(training)
ggplot(training, aes(lstat, medv)) +
stat_smooth() +
geom_point(colour="red") +
theme_minimal() +
xlab("Población en situación desfavorable") +
ylab("Valor medio de viviendas en \ $1000s.")
model <- lm(lstat~poly(medv,2))
model
summary(model) #*** Vaiable altamente significativa
#y=53.22x2 - 113.35x + 12.67
# Residual standard error: 3.932 (RSE) error cometido por el modelo
#predecir
prediccion <- predict(model, newdata = test)
prediccion
#Evaluar modelo, analizando los residuos
residuos <- rstandard(model)
par(mfrow=c(1,3))
hist(residuos)
boxplot(residuos, main="Diagrama de cajas")
qqnorm(residuos)
qqline(residuos, col = "red")
#Varianza constante - la varianza de los errores es constante
par(mfrom=c(1,1))
plot(fitted.values(model),rstandard(model), xlab="valores ajustados", ylab="Residuos estandarizados")
abline(h=0, col = "red")
|
b468ef2c4e8dd8b50dc5d597c6086916881d65af
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Devore7/examples/ex11.53.Rd.R
|
ad3474b9f840e09b4c6244f16e31af38f333c798
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
ex11.53.Rd.R
|
library(Devore7)
### Name: ex11.53
### Title: R Data set: ex11.53
### Aliases: ex11.53
### Keywords: datasets
### ** Examples
data(ex11.53)
str(ex11.53)
|
5776046d5c886613099d4b0a9368749ae468459d
|
ac95f4a5c652a9a328068edf009ab67cc8f62bff
|
/man/plot_iphc_map.Rd
|
71c17edf14b85480b9c47e5d0be33a024f2e98ef
|
[] |
no_license
|
pbs-assess/gfiphc
|
8f200dcef7e36de6ecaef09cdb0383166f9307da
|
90ad0b6d4ac6e1b5f52a3e55fc0b7ce0fe0069d3
|
refs/heads/master
| 2023-06-25T09:32:58.894375
| 2023-06-19T17:42:20
| 2023-06-19T17:42:20
| 177,639,706
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,580
|
rd
|
plot_iphc_map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iphc-maps.R
\name{plot_iphc_map}
\alias{plot_iphc_map}
\title{Plot the locations of the IPHC stations for a given year, with various}
\usage{
plot_iphc_map(
set_counts_of_sp,
sp_short_name = NULL,
years,
main_title = NULL,
mar_val = c(1.8, 2, 1, 0.5),
mgp_val = c(1.6, 0.5, 0),
lat_cut_off = 50.6,
lon_cut_off_1 = -130,
lon_cut_off_2 = -128.25,
pch_pos_count = 19,
pch_zero_count = 1,
pch_non_standard = 4,
cex_val = 1,
add_to_existing = FALSE,
indicate_in_area = FALSE,
indicate_standard = TRUE,
include_legend = TRUE,
...
)
}
\arguments{
\item{set_counts_of_sp}{input tibble of the species of interest, likely generated from
\code{cache_pbs_data_iphc} (which seems to save a list with the first element
the desired tibble), with column names \code{year}, \code{station}, \code{lat}, \code{lon},
\code{E_it}, \code{N_it}, \code{C_it}, \code{E_it20}, \code{N_it20}, \code{C_it20}, \code{usable}, and
\code{in_area} if \code{indicate_in_area} is TRUE}
\item{sp_short_name}{short name species of interest to have in legend,
if NULL then plots all stations labelled as
usable and unusable, but with no catch rates. Still needs
\code{set_counts_of_sp} input tibble (can be any species, as they all have all
stations documented).}
\item{years}{year of interest. See vignettes for movie code.}
\item{main_title}{title for map, if NULL then is All \code{years} stations}
\item{mar_val}{mar values to reduce whitespace around maps}
\item{mgp_val}{mgp values to reduce whitespace around maps}
\item{lat_cut_off}{latitudinal cut off near top of Vancouver Island, below
which stations are excluded when constructing Series A and B; this is just
below all stations from 1995-1998, so that Series A and B include all
stations for 1995-1998. If NULL then nothing plotted}
\item{lon_cut_off_1}{left-hand end of line (longitude) to show cut off}
\item{lon_cut_off_2}{right-hand end of line (longitude) to show cut off}
\item{pch_pos_count}{pch for positive counts (or sets inside area if
\code{indicate_in_area} is TRUE)}
\item{pch_zero_count}{pch for zero counts (or sets outside area if
\code{indicate_in_area} is FALSE)}
\item{pch_non_standard}{pch for showing non-standard stations for that year}
\item{cex_val}{cex size of plotted circles (or whatever is chosen using
above \code{pch} options}
\item{add_to_existing}{if TRUE then add to an existing plot, if FALSE then
create new map using \code{plot_BC()}}
\item{indicate_in_area}{indicate whether or not station is within a
specficied area, as indicated by TRUE/FALSE in \code{set_counts_of_sp$in_area}}
\item{indicate_standard}{indicate whether or not station is part of the
standard stations or an expansion (in 2018, 2020, and later), as
indicated by Y/N in \code{set_counts_of_sp$standard}}
\item{include_legend}{whether to include legend or not (TRUE/FALSE)}
\item{...}{extra arguments to \code{par()}}
}
\value{
A map of the IPHC survey stations for that year and species, with a
legend describing the points.
}
\description{
Options include whether or not a given species was caught in that year, or
indicating showing stations within a given area, or not referencing a
particular species (just showing stations), or showing non-standard stations.
}
\details{
If possible, also show whether the species was caught in the first 20 hooks
and only caught after the first 20 hooks. Uses \code{PBSmapping} style of plot,
as used in the Redbanded and Yelloweye Rockfish stock assessments.
}
\examples{
\dontrun{
# See vignette `data_for_one_species`
sp <- "yelloweye rockfish"
sp_set_counts <- readRDS(sp_hyphenate(sp)) # Need .rds
saved already
plot_iphc_map(sp_set_counts$set_counts, sp_short_name = "Yelloweye", years =
2003) # one year
# many years (matches maps in Yamanka et al. (2018) Yelloweye assessment,
# except stations 14A, 19IC, and 3D that are now marked as unusable):
for(i in unique(sp_set_counts$set_counts$year)){
plot_iphc_map(sp_set_counts$set_counts,
sp_short_name = "Yelloweye",
years = i)
invisible(readline(prompt="Press [enter] to continue"))
}
# Hooks with no bait, showing non-standard as crosses :
plot_iphc_map(hooks_with_bait$set_counts,
sp = "Hooks with bait",
years = 2018,
indicate_standard = TRUE)
# Just the stations, with no species information:
plot_iphc_map(sp_set_counts$set_counts,
sp_short_name = NULL,
years = 2008)
}
}
\author{
Andrew Edwards
}
|
30a3f2ce4bc3facfd030ae7589b301deb33e75b0
|
2d4bdf7c3b1ecdc0aec813bf59795db3a2050145
|
/inst/doc/nll_functions_modifying.R
|
b92be6ac0663d15877990966a6d6ac80b8b715f6
|
[] |
no_license
|
cran/anovir
|
ebb268bade210c8869f9bd0c638265ea51d28399
|
80ffca0c6bebcc536d21d0982cd49c83f2226486
|
refs/heads/master
| 2023-01-03T22:26:35.098839
| 2020-10-24T07:50:05
| 2020-10-24T07:50:05
| 307,946,785
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,218
|
r
|
nll_functions_modifying.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup, message = FALSE---------------------------------------------------
library(anovir)
## -----------------------------------------------------------------------------
utils::str(nll_basic)
## -----------------------------------------------------------------------------
head(body(nll_basic), 5)
## ----message = FALSE, warning = FALSE-----------------------------------------
head(data_lorenz, 2)
# step #1
m01_prep_function <- function(a1 = a1, b1 = b1, a2 = a2, b2 = b2){
nll_basic(a1 = a1, b1 = b1, a2 = a2, b2 = b2,
data = data_lorenz, time = t, censor = censored,
infected_treatment = g, d1 = 'Gumbel', d2 = 'Weibull')
}
# step #2
m01 <- mle2(m01_prep_function,
start = list(a1 = 23, b1 = 5, a2 = 3, b2 = 0.2)
)
coef(m01)
## ----message = FALSE, warning = FALSE-----------------------------------------
# copy/rename 'nll_function' (not obligatory, but recommended)
nll_basic2 <- nll_basic
# find/check location of code to be replaced. NB double '[['
body(nll_basic2)[[4]]
# replace default code with new code for function
body(nll_basic2)[[4]] <- substitute(pfa2 <- c1 + c2 * log(data$Infectious.dose))
# check code
head(body(nll_basic2), 5)
# replace argument 'a2' with those for 'c1', 'c2'. NB use of 'alist'
formals(nll_basic2) <- alist(a1 = a1, b1 = b1, c1 = c1, c2 = c2, b2 = b2,
data = data, time = time, censor = censor,
infected_treatment = infected_treatment, d1 = "", d2 = "")
# new analysis: step #1
m02_prep_function <- function(a1 = a1, b1 = b1, c1 = c1, c2 = c2, b2 = b2){
nll_basic2(a1 = a1, b1 = b1, c1 = c1, c2 = c2, b2 = b2,
data = data_lorenz, time = t, censor = censored,
infected_treatment = g, d1 = 'Gumbel', d2 = 'Weibull')
}
# step #2
m02 <- mle2(m02_prep_function,
start = list(a1 = 23, b1 = 5, c1 = 4, c2 = -0.1, b2 = 0.2)
)
coef(m02)
# compare results
AICc(m01, m02, nobs = 256)
# according to AICc m02 is better than m01
## ----message = FALSE, warning = FALSE-----------------------------------------
# copy/rename nll_function
nll_basic3 <- nll_basic
body(nll_basic3)[[4]] <- substitute(pfa2 <- c1 + c2 * log(data$Infectious.dose))
# replace argument 'a2' with those for 'c1', 'c2', and assign column names
formals(nll_basic3) <- alist(a1 = a1, b1 = b1, c1 = c1, c2 = c2, b2 = b2,
data = data_lorenz, time = t, censor = censored,
infected_treatment = g, d1 = "Gumbel", d2 = "Weibull")
# new analysis: step #1
m03_prep_function <- function(a1 = a1, b1 = b1, c1 = c1, c2 = c2, b2 = b2){
nll_basic3(a1 = a1, b1 = b1, c1 = c1, c2 = c2, b2 = b2)
}
# step #2
m03 <- mle2(m03_prep_function,
start = list(a1 = 23, b1 = 5, c1 = 4, c2 = -0.1, b2 = 0.2)
)
coef(m03)
identical(coef(m02), coef(m03))
|
5921ac8712a22d9e23b6351f9b88fa5013c2043f
|
c7c9a9753e068d93b32f90a9bafff236c0c85c11
|
/Expt1_Sensicality Study/SensicalityPops.R
|
8d4ba40bcbff17e94d00773d198150ce2fc752a6
|
[] |
no_license
|
hughrabagliati/CFS_Compositionality
|
bc1af660df540d43fc73414a029a1a0f941521ca
|
364270a3a61289f54dffa4e1468b3c3afc158dca
|
refs/heads/master
| 2021-01-17T15:18:20.755866
| 2017-07-04T10:31:45
| 2017-07-04T10:31:45
| 52,354,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,838
|
r
|
SensicalityPops.R
|
library(plyr)
library(lme4)
library(doBy)
library(ggplot2)
library(bootstrap)
# From Mike Frank
theta <- function(x,xdata,na.rm=T) {mean(xdata[x],na.rm=na.rm)}
ci.low <- function(x,na.rm=T) {
mean(x,na.rm=na.rm) - quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.025,na.rm=na.rm)}
ci.high <- function(x,na.rm=T) {
quantile(bootstrap(1:length(x),1000,theta,x,na.rm=na.rm)$thetastar,.975,na.rm=na.rm) - mean(x,na.rm=na.rm)}
read_data <- function(path_name){
list.files(path = path_name,full.names = T, pattern = ".csv") -> file_list
comp = c()
for (x in file_list){
data <- read.csv(x,header = T)
if ("perceptual.rating.reactiontime" %in% colnames(data)){
data <- subset(data, select = -perceptual.rating.reactiontime)
}
if ("X" %in% colnames(data)){
data <- subset(data, select = -X)
data$rt <- as.character(data$rt)
}
comp <- rbind(comp, data)
}
return(comp)
}
sense.pop <- read_data("./data/")
# Make RTs numeric [need to remove timeout "none" responses to 8s]
sense.pop <- subset(sense.pop, rt != "None")
sense.pop$rt <- as.numeric(sense.pop$rt)
sense.pop$Length <- nchar(as.character(sense.pop$prime),allowNA = T)
sense.pop$Condition <- as.character(sense.pop$prime_semantics)
sense.pop[sense.pop$prime_semantics %in% c("Sklar_control_A","Sklar_control_B"),]$Condition <- "Sklar_control"
sense.pop$Condition <- as.factor(sense.pop$Condition)
# Note that this analysis includes all of the inclusion criteria discussed by Sklar et al.
##########################################################################################################################
#
# Let's first analyze for the Sklar trials
sense.pop.sklar <- subset(sense.pop, Condition %in% c("Sklar_control", "Sklar_violation"))
# Remove outlier subjects by Accuracy and RT (mean acc must be > 0.9, mean rt must be < 3sd above group mean)
Acc <- summaryBy(match. + rt ~ SubjNo, data = subset(sense.pop.sklar), keep.names = T)
sense.pop.sklar <- subset(sense.pop.sklar, SubjNo %in% Acc[Acc$match. > 0.9,]$SubjNo)
sense.pop.sklar <- subset(sense.pop.sklar, SubjNo %in% Acc[Acc$rt < (mean(Acc$rt) + (3*sd(Acc$rt))),]$SubjNo)
# Remove incorrect trials
sense.pop.sklar <- subset(sense.pop.sklar, match. == 1)
# Remove outliers by subject (less than 3sd from participant mean -- note that this is not a symmetric exclusion criterion)
sense.pop.sklar <- ddply(sense.pop.sklar, .(SubjNo), function(d){
by_subj_include <- mean(d$rt, na.rm = T) + 3*c(-1,1)*sd(d$rt,na.rm = T)
d = subset(d, rt < by_subj_include[2])
})
# [for Expt 1 only] Remove outliers by condition (less than 3sd from condition mean -- note that this is not a symmetric exclusion criterion)
sense.pop.sklar <- ddply(sense.pop.sklar, .(Condition), function(d){
by_subj_include <- mean(d$rt, na.rm = T) + 3*c(-1,1)*sd(d$rt,na.rm = T)
d = subset(d, rt < by_subj_include[2])
})
# Remove RTs < 200ms
sense.pop.sklar <- subset(sense.pop.sklar, rt > 0.2)
# T test (Sklar style)
sense.pop.sklar$log.rt <- log(sense.pop.sklar$rt)
sense.pop.sklar.summary <- summaryBy(rt + log.rt~ SubjNo + Condition, data = subset(sense.pop.sklar, Condition %in% c("Sklar_violation", "Sklar_control")), keep.names = T)
t.test(rt ~ Condition, data = sense.pop.sklar.summary, paired = T)
t.test(log.rt ~ Condition, data = sense.pop.sklar.summary, paired = T)
# Bayes factor -- minimum effect of 0.01, maximum of 0.06, our effect = -0.03519684 and our SE = -0.03/-1.7874= 0.01678416
# lmer (Rabag style)
sense.pop.sklar.raw <- summary(lmer(rt ~ Condition + (1+Condition|SubjNo)+ (1|prime), data = subset(sense.pop.sklar, Condition %in% c("Sklar_violation", "Sklar_control"))))
print(sense.pop.sklar.raw)
print(paste("p value = ", 2*pnorm(-abs(coef(sense.pop.sklar.raw)[,3]))))
sense.pop.sklar.log <- summary(lmer(log.rt ~ Condition + (1+Condition|SubjNo)+ (1|prime), data = subset(sense.pop.sklar, Condition %in% c("Sklar_violation", "Sklar_control"))))
print(sense.pop.sklar.log)
print(paste("p value = ", 2*pnorm(-abs(coef(sense.pop.sklar.log)[,3]))))
##########################################################################################################################
#
# Let's now analyze for the new trials
sense.pop.new <- subset(sense.pop, Condition %in% c("Sensible", "Non-sensible"))
# Remove outlier subjects by Accuracy and RT (mean acc must be > 0.9, mean rt must be < (!!, see by trial exclusion below) 3sd from group mean)
Acc <- summaryBy(match. + rt ~ SubjNo, data = subset(sense.pop.new), keep.names = T)
sense.pop.new <- subset(sense.pop.new, SubjNo %in% Acc[Acc$match. > 0.9,]$SubjNo)
sense.pop.new <- subset(sense.pop.new, SubjNo %in% Acc[Acc$rt < (mean(Acc$rt) + (3*sd(Acc$rt))),]$SubjNo)
# Remove incorrect trials
sense.pop.new <- subset(sense.pop.new, match. == 1)
# Remove outliers by subject (less than 3sd from participant mean -- note that this is not a symmetric exclusion criterion)
sense.pop.new <- ddply(sense.pop.new, .(SubjNo), function(d){
by_subj_include <- mean(d$rt, na.rm = T) + 3*c(-1,1)*sd(d$rt,na.rm = T)
d = subset(d, rt < by_subj_include[2])
})
# [for Expt 1 only] Remove outliers by condition (less than 3sd from condition mean -- note that this is not a symmetric exclusion criterion)
sense.pop.new <- ddply(sense.pop.new, .(Condition), function(d){
by_subj_include <- mean(d$rt, na.rm = T) + 3*c(-1,1)*sd(d$rt,na.rm = T)
d = subset(d, rt < by_subj_include[2])
})
# Remove RTs < 200ms
sense.pop.new <- subset(sense.pop.new, rt > 0.2)
# T test (Sklar style)
sense.pop.new$log.rt <- log(sense.pop.new$rt)
sense.pop.new.summary <- summaryBy(rt + log.rt~ SubjNo + Condition, data = subset(sense.pop.new, Condition %in% c("Non-sensible","Sensible")), keep.names = T)
t.test(rt ~ Condition, data = sense.pop.new.summary, paired = T)
t.test(log.rt ~ Condition, data = sense.pop.new.summary, paired = T)
# Bayes factor -- minimum effect of 0.01, maximum of 0.06, our effect = -0.0002327283 and our SE = -0.03/-0.02217= 0.01049744
# lmer (rabag style)
sense.pop.new.raw <-summary(lmer(rt ~ Condition + (1+Condition|SubjNo)+ (1|prime), data = subset(sense.pop.new, Condition %in% c("Non-sensible","Sensible"))))
print(sense.pop.new.raw)
print(paste("p value = ", 2*pnorm(-abs(coef(sense.pop.new.raw)[,3]))))
sense.pop.new.log <- summary(lmer(log.rt ~ Condition + (1+Condition|SubjNo)+ (1|prime), data = subset(sense.pop.new, Condition %in% c("Non-sensible","Sensible"))))
print(sense.pop.new.log)
print(paste("p value = ", 2*pnorm(-abs(coef(sense.pop.new.log)[,3]))))
###########################################################################################################################
#
# Finally -- a quick test if longer stims are perceived faster than shorter,
sense.pop.length <- sense.pop
# Remove outlier subjects by Accuracy and RT (mean acc must be > 0.9, mean rt must be < (!!, see by trial exclusion below) 3sd from group mean)
Acc <- summaryBy(match. + rt ~ SubjNo, data = subset(sense.pop.length), keep.names = T)
sense.pop.length <- subset(sense.pop.length, SubjNo %in% Acc[Acc$match. > 0.9,]$SubjNo)
sense.pop.length <- subset(sense.pop.length, SubjNo %in% Acc[Acc$rt < (mean(Acc$rt) + (3*sd(Acc$rt))),]$SubjNo)
# Remove incorrect trials
sense.pop.length <- subset(sense.pop.length, match. == 1)
# Remove outliers by subject (less than 3sd from participant mean -- note that this is not a symmetric exclusion criterion)
sense.pop.length <- ddply(sense.pop.length, .(SubjNo), function(d){
by_subj_include <- mean(d$rt, na.rm = T) + 3*c(-1,1)*sd(d$rt,na.rm = T)
d = subset(d, rt < by_subj_include[2])
})
# Remove RTs < 200ms
sense.pop.length <- subset(sense.pop.length, rt > 0.2)
# Standardize lenth
sense.pop.length$Length <- (sense.pop.length$Length - mean(sense.pop.length$Length, na.rm = T))/sd(sense.pop.length$Length, na.rm = T)
sense.pop.length.raw <- summary(lmer(rt ~ Length + (1+Length|SubjNo), data = sense.pop.length))
print(sense.pop.length.raw)
print(paste("p value = ", 2*pnorm(-abs(coef(sense.pop.length.raw)[,3]))))
###########################################################################################################################
#
# Graphs
sense.sklar.graph <- summaryBy(rt ~ Condition + SubjNo, data = sense.pop.sklar, keep.names = T)
sense.sklar.graph <- summaryBy(rt ~ Condition, data = sense.sklar.graph, FUN = c(mean,ci.low,ci.high,sd))
sense.sklar.graph$SE <- sense.sklar.graph$rt.sd/sqrt(length(unique(sense.pop.sklar$SubjNo)))
sense.sklar.graph$Experiment <- "Experiment 1a \nIncongruent Phrases"
sense.new.graph <- summaryBy(rt ~ Condition + SubjNo, data = sense.pop.new, keep.names = T)
sense.new.graph <- summaryBy(rt ~ Condition, data = sense.new.graph, FUN = c(mean,ci.low,ci.high,sd))
sense.new.graph$SE <- sense.new.graph$rt.sd/sqrt(length(unique(sense.pop.new$SubjNo)))
sense.new.graph$Experiment <- "Experiment 1b \nReversible Sentences"
sense.graph <- rbind(sense.sklar.graph,sense.new.graph)
sense.graph$Cond_Graph <- "Violation"
sense.graph[sense.graph$Condition %in% c("Sklar_control", "Sensible"),]$Cond_Graph <- "Control"
sense.graph$Cond_Graph <- ordered(sense.graph$Cond_Graph, levels = c("Violation", "Control"))
sense.graph$rt.mean <- sense.graph$rt.mean * 1000
sense.graph$SE <- sense.graph$SE * 1000
sense.graph$rt.ci.high <- sense.graph$rt.ci.high * 1000
sense.graph$rt.ci.low <- sense.graph$rt.ci.low * 1000
dodge <- position_dodge(width=0.9)
ggplot(sense.graph, aes(Experiment,rt.mean, fill = Cond_Graph)) +
geom_bar(stat = "identity", position = dodge) +
geom_errorbar(aes(ymax = sense.graph$rt.mean +
sense.graph$rt.ci.high, ymin = sense.graph$rt.mean - sense.graph$rt.ci.low), width=0.25, position = dodge) +
labs(fill = "Sentence Type") +
theme(axis.text.x = element_text(colour = "black", size = 12)) +
ylab("Response Time (ms)") +
xlab("") +
ylim(c(0,1750))
|
0908a0413d3016ac8ab7b594d9d2480b7eee3fdd
|
3812d02a0b7a2b17ffae371fc2d41076fe2ad868
|
/man/PSM.Rd
|
77e2e6c969c2f15f669bc3ddffc7a4f777d461bf
|
[] |
no_license
|
reealpeppe/PSM
|
eef24128396d06bfa1ff1203bc35b6ba99b17463
|
027a8440461c5d3e698ab42620599fdbb2c96d5c
|
refs/heads/main
| 2023-07-24T05:44:06.384114
| 2021-09-06T20:36:13
| 2021-09-06T20:36:13
| 401,394,193
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,241
|
rd
|
PSM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PSM.R
\docType{package}
\name{PSM}
\alias{PSM}
\title{PSM: A package to estimate Partial Splines model}
\description{
The PSM package provides the main function "PartialSplines" that allows the user to estimate a model through which the response variable is a explained by its linear relationship with a group of explanatory variables and a spline variable with which the functional relationship is unknown. The package includes some method functions to simplify user's work.
Unfortunately the first version of PSM do not permit to specify the linear part through a formula, therefore the only way to add interaction between variables is by doing it manually inside the input dataframe 'x'. This is one of the first improvements that will be made.
}
\section{PSM functions}{
\itemize{
\item PartialSplines - to estimate the model
\item psm.cv - to optimize parameters through cross validation
\item coef - method function
\item predict - method function
\item fitted - method function
\item print - method function
\item plot - method function
\item other functions for internal use
}
}
\author{
Giuseppe Reale, Salvatore Mirlocca
}
|
d7c48dca06e62aef4d6aa736e4a933f7be15c7a2
|
613b46174610a2284afb644d43f95f47f5e02e9f
|
/R/diffExp/diffExpFunctions.R
|
85db9e9511ea3b6b29d8485c0efc235bfb9a58bf
|
[] |
no_license
|
leipzig/retinoblastoma_mirna_analysis
|
c18c368ce1d6ee2e7f00ebd00b2454da10f4dc0f
|
c3d2756cb4dfdd9adce9b54b72579de20e25a41e
|
refs/heads/master
| 2020-03-21T21:23:03.949913
| 2012-07-19T01:46:43
| 2012-07-19T01:46:43
| 139,060,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,126
|
r
|
diffExpFunctions.R
|
library(rtracklayer)
library(stringr)
loadConfig <- function (aligner, alignmentStringency, reference, alignmentStrategy) {
#bamDirectory<-concat("/bam",aligner,alignmentStringency,reference,alignmentStrategy,"/",sep="/")
assign("bamDirectory", concat("/bam",aligner,alignmentStringency,reference,alignmentStrategy,"/",sep="/"), envir=.GlobalEnv)
configFile<-'/nas/is1/leipzig/src/R/dirConfig.R'
source(configFile)
}
seqlengths2gr <- function(x, strand="*"){
## convert 'seqlengths' of BSgenome to sGRanges
GRanges(names(x), IRanges(1, x), strand=strand)
}
getGRFromGenome<-function(){
session <- browserSession()
genome(session) <- "hg19"
GR <- seqlengths2gr(seqlengths(session))
seqlevels(GR,force=TRUE)<-c(concat("chr",1:22),"chrX","chrY","chrM")
elementMetadata(GR)<-NULL
elementMetadata(GR)$id<-1:25
elementMetadata(GR)$name<-c(concat("chr",1:22),"chrX","chrY","chrM")
GR
}
getGRFromRefGene<-function(){
knownGenes<-makeTranscriptDbFromUCSC(genome="hg19",
tablename="refGene",
transcript_ids=NULL,
circ_seqs=DEFAULT_CIRC_SEQS,
url="http://genome.ucsc.edu/cgi-bin/",
goldenPath_url="http://hgdownload.cse.ucsc.edu/goldenPath"
)
GR <- transcripts(knownGenes)
names(elementMetadata(GR))<-c("id","name")
seqlevels(GR,force=TRUE)<-c(concat("chr",1:22),"chrX","chrY","chrM")
GR
}
getGRFromGFF<-function(gffFile){
#concat(refsDir,"/hsa.chr.gff")
gffRangedData<-import.gff(gffFile)
#GRanges with 1523 ranges and 5 elementMetadata values:
# seqnames ranges strand | type source phase group tx_id
#<Rle> <IRanges> <Rle> | <factor> <factor> <factor> <character> <character>
# NA chr1 [ 30366, 30503] + | miRNA <NA> <NA> ACC="MI0006363"; ID="hsa-mir-1302-2"; hsa-mir-1302-2
GR <- as(gffRangedData, "GRanges")
accessions<-as.integer(str_match(as.character(elementMetadata(GR)$group),"MI([0-9]+)")[,2])
mirnaNames<-as.character(str_match(as.character(elementMetadata(GR)$group),"hsa[a-zA-Z0-9-]+"))
elementMetadata(GR)<-NULL
elementMetadata(GR)$id<-accessions
elementMetadata(GR)$name<-mirnaNames
GR
}
getCounts <- function (GR,bamPaths, bamSamples, samples,minCount) {
bamView<-BamViews(bamPaths=bamPaths,bamSamples=bamSamples,bamRanges=GR)
bamcounts<-countBam(bamView)
bfl <- BamFileList(bamPaths)
olap <- summarizeOverlaps(GR, bfl)
counts<-as.data.frame(assays(olap)$counts)
colnames(counts)<-samples
#technical replicates
counts$WERI<-counts[,'31s8_WERI']+counts[,'36s2_WERI']+counts[,'WERI01_PGM']+counts[,'WERI02_PGM']+counts[,'WERI03_PGM']
counts$Y79<-counts[,'36s1_Y79']+counts[,'36s2_Y79']
counts$RB525T<-counts[,'RB525T']+counts[,'RB525T01_PGM']+counts[,'RB525T02_PGM']+counts[,'RB525T03_PGM']
counts<-counts[,!str_detect(colnames(counts),'^3')]
counts<-counts[,!str_detect(colnames(counts),'WERI.*PGM')]
counts<-counts[,!str_detect(colnames(counts),'RB525T.*PGM')]
#this messes up the id.var so matrix doesn't work
#42s1_nrml RB494N RB494T RB495N RB495T RB498N RB498T RB517T RB525T WERI Y79 (11 groups)
conds<-c('N','N','T','N','T','N','T','T','T','T','T')
individual<-c(1,2,2,3,3,4,4,5,6,7,8)
replicate<-c(1,1,2,1,2,1,2,2,2,2,2)
type<-c('IIfx','HS','HS','HS','HS','HS','HS','HS','HS','IIfx','IIfx')
paired<-c(FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,FALSE,FALSE)
pdata<-data.frame(condition=conds,replicate=replicate,type=type,individual=individual,paired=paired)
rownames(pdata)<-c('42s1_nrml','RB494N','RB494T','RB495N','RB495T','RB498N','RB498T','RB517T','RB525T','WERI','Y79')
rownames(counts)<-elementMetadata(rowData(olap))$tx_id
#at least minCount from the paired lanes
countsAboveThreshold<-subset(counts,rowSums(counts[,row.names(pdata[which(pdata$paired==TRUE),])])>=minCount)
#subset(counts,rowSums(counts[-1])>minCount)
countsMatrix<-as.matrix(countsAboveThreshold)
colnames(countsMatrix)<-colnames(countsAboveThreshold)
cds <- newCountDataSet( countsMatrix, pdata$condition )
list(bamcounts=bamcounts,counts=counts,cds=cds,countsMatrix=countsMatrix)
}
getCounts2 <- function (GR,bamPaths, bamSamples, samples,minCount,mode) {
bfl <- BamFileList(bamPaths)
olap <- summarizeOverlaps(GR, bfl,mode=mode)
precounts<-as.data.frame(assays(olap)$counts)
colnames(counts)<-samples
#technical replicates
#compute from metadatalayerCake<-data.frame(class=c("exon","tx","intergenic","unmapped"),fraction=c(.02,.25,.50,.23))
# counts$WERI<-counts[,'31s8_WERI']+counts[,'36s2_WERI']+counts[,'WERI01_PGM']+counts[,'WERI02_PGM']+counts[,'WERI03_PGM']
# counts$Y79<-counts[,'36s1_Y79']+counts[,'36s2_Y79']
# counts$RB525T<-counts[,'RB525T']+counts[,'RB525T01_PGM']+counts[,'RB525T02_PGM']+counts[,'RB525T03_PGM']
# counts<-counts[,!str_detect(colnames(counts),'^3')]
# counts<-counts[,!str_detect(colnames(counts),'WERI.*PGM')]
# counts<-counts[,!str_detect(colnames(counts),'RB525T.*PGM')]
# #this messes up the id.var so matrix doesn't work
#
# #42s1_nrml RB494N RB494T RB495N RB495T RB498N RB498T RB517T RB525T WERI Y79 (11 groups)
# conds<-c('N','N','T','N','T','N','T','T','T','T','T')
# individual<-c(1,2,2,3,3,4,4,5,6,7,8)
# replicate<-c(1,1,2,1,2,1,2,2,2,2,2)
# type<-c('IIfx','HS','HS','HS','HS','HS','HS','HS','HS','IIfx','IIfx')
# paired<-c(FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,FALSE,FALSE)
# pdata<-data.frame(condition=conds,replicate=replicate,type=type,individual=individual,paired=paired)
# rownames(pdata)<-c('42s1_nrml','RB494N','RB494T','RB495N','RB495T','RB498N','RB498T','RB517T','RB525T','WERI','Y79')
rownames(counts)<-elementMetadata(rowData(olap))$tx_id
#at least minCount from the paired lanes
countsAboveThreshold<-subset(counts,rowSums(counts[,row.names(pdata[which(pdata$paired==TRUE),])])>=minCount)
#subset(counts,rowSums(counts[-1])>minCount)
countsMatrix<-as.matrix(countsAboveThreshold)
colnames(countsMatrix)<-colnames(countsAboveThreshold)
cds <- newCountDataSet( countsMatrix, pdata$condition )
list(counts=counts,cds=cds,countsMatrix=countsMatrix)
}
doStats <- function (cds, fits, method, sharingMode) {
cds <- estimateSizeFactors( cds )
cds <- estimateDispersions( cds , fitType=fits, method=method, sharingMode=sharingMode)
res <- nbinomTest( cds, "N", "T")
#pds <- newCountDataSet( countsAboveThreshold, pdata )
#pds <- estimateDispersions( cds , fitType="local", method='pooled', sharingMode='fit-only')
#fit1 <- fitNbinomGLMs( pds, count ~ type + condition )
#fit0 <- fitNbinomGLMs( pds, count ~ type )
#pvalsGLM <- nbinomGLMTest( fit1, fit0 )
#padjGLM <- p.adjust( pvalsGLM, method="BH" )
#res$pvalsGLM<-pvalsGLM
#res$padjGLM<-padjGLM
write.csv(res,file="deseqResults.csv")
res
}
|
e3c663133388a1e21e5338db5c1868bbe2063d19
|
6f3de667027be0523a1c79df9506e07872e208b1
|
/srfc_life_cycle.R
|
5a72d7e57c9ce478fedd56f0bab72fddfc54a637
|
[] |
no_license
|
paulcarvalho/diagram
|
07d75693a68944618c081181b48a68c74cdfe20d
|
b1ec00f6594fbdbe8af90d89f812cc821eeff329
|
refs/heads/main
| 2023-06-01T05:58:52.797217
| 2021-06-21T16:12:48
| 2021-06-21T16:12:48
| 375,136,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,451
|
r
|
srfc_life_cycle.R
|
# Sacramento River fall-run Chinook salmon life cycle diagram
library(DiagrammeR)
# Create the first graph
nodes_1 <- create_node_df(n = 17,
x = c(0, 2, 4, 6, 8, 3, 5, 7, 9,
-2, -4 , -6, -8, -3, -5, -7, -9),
y = c(1, 1, 1, 1, 1, 3, 3, 3, 3,
1, 1, 1, 1, 3, 3, 3, 3),
label = c('J', 'O₂', 'O₃', 'O₄', 'O₅',
'S₂', 'S₃', 'S₄', 'S₅',
'O₂', 'O₃', 'O₄', 'O₅',
'S₂', 'S₃', 'S₄', 'S₅'),
style = FALSE,
color = 'black',
fontsize = 50,
width = 1)
edges_1 <- create_edge_df(from = c(1:4, 2:5, 1, 10:12, 10:13, 15),
to = c(2:5, 6:9, 10, 11:13, 14:17, 1),
color = 'black',
arrowsize = 2,
concentrate = TRUE)
graph1 <- create_graph(nodes_df = nodes_1,
edges_df = edges_1) %>%
add_global_graph_attrs(attr = 'overlap', value = FALSE, attr_type = 'graph')
render_graph(graph1)
<<<<<<< HEAD
# blah
# blah
=======
test conflict
>>>>>>> de48e21f09b8830b4d852aa872f0ec5b788d21c0
|
b295bb483a5986e6c2fac0c720ef6e6c949c8ab1
|
2593252f36443bc674855110e0c17b5b6bd3561d
|
/epigeneticX/blood_pipeline_QC.R
|
6f134a19ddcff58b79ecf09427c226dc1546a610
|
[] |
no_license
|
zhenyisong/core.facility
|
045b0f3eca0694d6013cee7e99715a301e20f63e
|
1df864d0ee9ba89e7217490fb7940ba60345a4bd
|
refs/heads/master
| 2021-06-30T05:54:24.338355
| 2019-03-31T11:39:40
| 2019-03-31T11:39:40
| 115,390,474
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,778
|
r
|
blood_pipeline_QC.R
|
# @author Yisong Zhen
# @since 2018-03-22
# @update 2018-04-12
# @parent blood_pipeline_QC.sh
#---
pkgs <- c( 'tidyverse', 'GenomicRanges',
'ChIPseeker', 'rtracklayer',
'GenomicAlignments', 'BiocParallel',
'Rsamtools','magrittr', 'DESeq2',
'stringr', 'JASPAR2016', 'TFBSTools',
'seqLogo', 'RSQLite', 'DBI',
'TxDb.Dmelanogaster.UCSC.dm6.ensGene',
'ggbio', 'ChIPpeakAnno',
'BSgenome.Dmelanogaster.UCSC.dm6')
load.lib <- lapply(pkgs, require, character.only = TRUE)
replication.rox2.ChIRP.path <- file.path(
'/wa/zhenyisong/results/chenlab/songli/pipelineQC/bowtie1')
setwd(replication.rox2.ChIRP.path)
#---
# this is the replication result for the
# Mol. Cell data and their result.
# using bowtie1 and macs14
#---
rox2.defacto.pearson.results <- read.delim( 'merge_peaks.xls.corr.xls',
header = TRUE, sep = '\t',
fill = TRUE, comment.char = '#',
stringsAsFactors = FALSE) %>%
filter(fold_enrichment >= 2) %>%
filter(correlation >= 0.3) %>%
filter(aver_coverage >= 1.5) %$%
{ GRanges(
seqname = as.character(chr),
ranges = IRanges( start = start,
end = end ) ) } %>%
.[width(.) >= 2300]
#---
# this result is the bwa program with
# my own understanding, and own protocol
# using macs2
# this method does not replicate the
# true result and I therefore check the overlapp
#---
rox2.change.ChIRP.path <- file.path(
'/wa/zhenyisong/results/chenlab/songli/pipelineQC/bwa')
setwd(rox2.change.ChIRP.path)
rox2.private.pearson.results <- read.delim( 'rox2_peaks.xls.corr.xls',
header = TRUE, sep = '\t',
fill = TRUE, comment.char = '#',
stringsAsFactors = FALSE) %>%
filter(fold_enrichment >= 2) %>%
filter(correlation >= 0.3) %>%
filter(aver_coverage >= 1.5) %$%
{ GRanges(
seqname = as.character(chr),
ranges = IRanges( start = start,
end = end ) ) } %>%
.[width(.) >= 2300]
rox2.macs2.xls.results <- read.delim( file = 'rox2_peaks.xls',
header = TRUE, sep = '\t',
fill = TRUE, comment.char = '#',
stringsAsFactors = FALSE) %$%
{ GRanges(
seqname = chr,
ranges = IRanges( start = start,
end = end),
peak.length = length,
abs.summit = abs_summit,
pileup = pileup,
log.pvalue = X.log10.pvalue.,
foldChange = fold_enrichment,
logqvalue = X.log10.qvalue.,
macs2.name = name ) }
#---
# the results of this step confirms that
# the two protocol, macs2 + bwa or macs14 + bowtie1 are
# comparable and interchangble.
#---
two.studies <- suppressWarnings( findOverlaps( query = rox2.defacto.pearson.results,
subject = rox2.macs2.xls.results,
type = 'within') )
#---
# the results of this step
# used the monior modification
# with bwa mapping strategy and
# macs2 to call these peaks.
# other processing methods were
# from Ke Qun 's protocol.
#---
rox2.minor.pearson.results <- read.delim( file = 'merge_z_peaks.xls.corr.xls',
header = TRUE, sep = '\t',
fill = TRUE, comment.char = '#',
stringsAsFactors = FALSE) %>%
filter(fold_enrichment >= 1) %>%
filter(correlation >= 0.3) %>%
filter(aver_coverage >= 1) %$%
{ GRanges(
seqname = as.character(chr),
ranges = IRanges( start = start,
end = end ) ) } %>%
.[width(.) >= 2300]
rox2.minor.xls.results <- read.delim( file = 'merge_z_peaks.xls',
header = TRUE, sep = '\t',
fill = TRUE, comment.char = '#',
stringsAsFactors = FALSE) %$%
{ GRanges(
seqname = chr,
ranges = IRanges( start = start,
end = end),
peak.length = length,
abs.summit = abs_summit,
pileup = pileup,
log.pvalue = X.log10.pvalue.,
foldChange = fold_enrichment,
logqvalue = X.log10.qvalue.,
macs2.name = name ) }
suppressWarnings( findOverlaps( query = rox2.defacto.pearson.results,
subject = rox2.minor.pearson.results,
type = 'any', minoverlap = 50) )
|
d77822ee14d77d1a03c3d24ed9715530b1c50513
|
b6a153835bad8716f08f12d572d9ee32c72238dd
|
/Scrape PCS Rider Attrs.R
|
de823e872f3543fde7a41bd174bd3a6d63781bb7
|
[] |
no_license
|
jfontestad/p-c
|
7213ef310f932413997e8c06f687610a9f3a2c55
|
23d0b3449c621d53c93bfb32ff38583da335646a
|
refs/heads/master
| 2023-09-05T03:35:25.580973
| 2021-11-20T13:32:41
| 2021-11-20T13:32:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,011
|
r
|
Scrape PCS Rider Attrs.R
|
library(tidyverse)
library(lubridate)
library(rvest)
library(RMySQL)
con <- dbConnect(MySQL(),
host='localhost',
dbname='cycling',
user='jalnichols',
password='braves')
#
LIMIT = 200
DATES1 = c(31759, 12408, 1951, 34621, 1951)
DATES2 = c(43578, 31759, 12408, 1951, 34621)
PAGES = seq(0, 1000, 200)
ITERS = length(DATES1) * length(PAGES)
data_list <- vector("list", length(ITERS))
for(i in 1:ITERS) {
DATE1 = DATES1[[floor((i-1) / 6)+1]]
DATE2 = DATES2[[floor((i-1) / 6)+1]]
PAGE = PAGES[[(i %% 6)+1]]
pg <- paste0('https://www.procyclingstats.com/rankings.php?id=',
DATE1,
'&id=',
DATE2,
'&nation=&team=&page=',
PAGE,
'&prev_rnk_days=1&younger=&older=&limit=',
LIMIT,
'&filter=Filter&morefilters=0') %>%
read_html()
d <- pg %>%
html_nodes("tbody") %>%
html_nodes("tr") %>%
html_nodes("td") %>%
html_nodes("a") %>%
html_attr(name = "href") %>%
enframe(name = NULL)
d2 <- pg %>%
html_nodes("tbody") %>%
html_nodes("tr") %>%
html_nodes("td") %>%
html_nodes("a") %>%
html_text() %>%
enframe(name = NULL)
data_list[[i]] <- cbind(d, d2 %>% rename(rider = value)) %>%
filter(str_detect(value, "rider/")) %>%
mutate(rider_url = paste0('https://www.procyclingstats.com/', value),
rider = str_trim(rider)) %>%
select(rider_url, rider)
}
#
riders_to_scrape <- bind_rows(data_list) %>%
unique()
#
#
#
#
#
rider_data_list <- vector("list", length(riders_to_scrape$rider_url))
for(r in 1:length(riders_to_scrape$rider_url)) {
pg <- riders_to_scrape$rider_url[[r]] %>%
read_html()
rider_data_list[[r]] <- pg %>%
html_nodes('div.rdr-info') %>%
html_text()
Sys.sleep(runif(1, 1, 5))
print(r)
}
#
rider_attr_list <- vector("list", length(rider_data_list))
for(x in 1:length(rider_data_list)) {
stg <- rider_data_list[[x]]
rider_attr_list[[x]] <- tibble(
dob = str_trim(str_sub(stg, str_locate(stg, "Date of birth:")[[1]] + 14, str_locate(stg, "Nationality")[[1]] - 5)),
weight = str_trim(str_sub(stg, str_locate(stg, "Weight:")[[2]] + 1, str_locate(stg, "Height")[[1]] - 1)),
height = str_trim(str_sub(stg, str_locate(stg, "Height:")[[2]] + 1, str_locate(stg, "Height:")[[2]] + 5))
)
print(nchar(stg))
}
#
rider_attributes <- bind_rows(rider_attr_list) %>%
cbind(rider = riders_to_scrape %>%
select(rider)) %>%
mutate(weight = as.numeric(str_trim(str_replace(weight, "kg", ""))),
height = as.numeric(str_trim(str_replace(height, " m", ""))))
#
con <- dbConnect(MySQL(),
host='localhost',
dbname='cycling',
user='jalnichols',
password='braves')
#
dbWriteTable(con, "rider_attributes", rider_attributes)
|
bdc22a67d5d919585126e6e12085ead355da0dfa
|
c8a19968f69bc99fa6dd6deae56fc4f77c672275
|
/getting_and_cleaning_data/quiz3.r
|
47b04803257d86664bb013e1fdf536a423237d69
|
[] |
no_license
|
rodolfoksveiga/courses
|
9d77f0deadcc9e1edf43da835beff3fdbc874740
|
a6d314ab4381121f5683bb6b4465cfd13fac36cc
|
refs/heads/master
| 2023-06-05T02:27:38.076022
| 2021-06-23T12:50:08
| 2021-06-23T12:50:08
| 288,047,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,256
|
r
|
quiz3.r
|
# set environment ####
pkgs = c('dplyr', 'jpeg')
lapply(pkgs, library, character.only = TRUE)
setwd('~/git/courses/getting_and_cleaning_data/')
# question 1 ####
df1 = read.csv('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv')
index = which(df1$ACR == 3 & df1$AGS == 6)
# question 2 ####
file_url2 = 'https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg'
file_name2 = 'jeff.jpg'
download.file(file_url2, file_name2)
image2 = readJPEG(file_name2, native = TRUE)
quants = quantile(image2, probs = c(0.3, 0.8))
# question 3, 4 and 5 ####
gdp = read.csv('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv')
educ = read.csv('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv')
df3 = merge(gdp, educ, by.x = 'X', by.y = 'CountryCode', all = FALSE)
df3$Gross.domestic.product.2012 = as.numeric(df3$Gross.domestic.product.2012)
df3 = arrange(df3, desc(Gross.domestic.product.2012))
df3 = df3[!is.na(df$Gross.domestic.product.2012), ]
country_13th = df[13, 'Long.Name']
df3$Income.Group = factor(df3$Income.Group)
avgs = tapply(df3$Gross.domestic.product.2012, df3$Income.Group, mean)
df3$Cut = cut(df3$Gross.domestic.product.2012, 5)
lmi = table(df3$Cut, df3$Income.Group)[1, 'Lower middle income']
|
7be203c2cbf30cfe9a8dfe9085987b7107e287fc
|
ca25692e5f1e3c1f63c59e37fd92ffcdc6c78412
|
/code/50_plotting_state_random_effects.R
|
7086e6b0c3b3ac26052771e65736349b6b1448cb
|
[] |
no_license
|
mkiang/decomposing_inequality
|
3a06f8d86c1eddc161e0c83bbfaa092cde7285f8
|
b3431e499572aeb2b11bfd2f7a2ea82d84ecfbb1
|
refs/heads/master
| 2020-09-06T12:19:47.116124
| 2019-11-09T06:40:55
| 2019-11-09T06:40:55
| 220,422,411
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,934
|
r
|
50_plotting_state_random_effects.R
|
## Imports ----
library(magrittr)
library(rstan)
library(tidyverse)
library(matrixStats)
source('./code/helpers/99_helper_functions.R')
## Load data ----
load('./data_working/model1_extracted_samples.RData')
load('./data_working/model2_extracted_samples.RData')
fips_idx_map <- readRDS('./data_working/fips_to_dummy_mappings.RDS')
## Make a new summary dataframe ----
m1_nu <- summarize_by_columns(m1_samps$nu) %>%
dplyr::rename(s_idx = c_idx) %>%
dplyr::mutate(model = "Model 1")
m2_nu <- summarize_by_columns(m2_samps$nu) %>%
dplyr::rename(s_idx = c_idx) %>%
dplyr::mutate(model = "Model 2")
nu_df <- rbind(m1_nu, m2_nu) %>%
dplyr::left_join(fips_idx_map %>%
dplyr::select(fips_st, s_idx, st_abbr) %>%
dplyr::distinct(),
by = "s_idx") %>%
dplyr::mutate(model = factor(
model,
levels = c("Model 2", "Model 1"),
ordered = TRUE
))
nu_plot <- ggplot2::ggplot(nu_df,
ggplot2::aes(
x = st_abbr,
y = avg,
ymax = p975,
ymin = p025,
color = model
)) +
ggplot2::geom_hline(yintercept = 0) +
ggplot2::geom_point(position = ggplot2::position_dodge(.5)) +
ggplot2::geom_linerange(position = ggplot2::position_dodge(.5)) +
ggplot2::coord_flip() +
ggplot2::theme_minimal() +
ggplot2::theme(legend.position = c(0, 1),
legend.justification = c(0, 1)) +
ggplot2::scale_color_brewer(NULL, palette = "Set1") +
ggplot2::labs(title = "Posterior distribution of\nstate random effects (nu)",
x = NULL, y = NULL)
ggplot2::ggsave(
nu_plot,
filename = './plots/nu_posterior_distribution.pdf',
height = 7,
width = 3,
scale = 1.25
)
|
df42a7dd82c5eca9f1adf52a3ec590ddcd69d1ec
|
0d1ef2cb3818bcadd602a0f46cbfec7ed55a9aef
|
/h2o_3.10.4.4/h2o/man/h2o.clusterInfo.Rd
|
c91a41ce1503c3b5c0cbd656f824cd80c97464e5
|
[] |
no_license
|
JoeyChiese/gitKraken_test
|
dd802c5927053f85afba2dac8cea91f25640deb7
|
3d4de0c93a281de648ae51923f29caa71f7a3342
|
refs/heads/master
| 2021-01-19T22:26:29.553132
| 2017-04-20T03:31:39
| 2017-04-20T03:31:39
| 88,815,763
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
rd
|
h2o.clusterInfo.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{h2o.clusterInfo}
\alias{h2o.clusterInfo}
\title{Print H2O cluster info}
\usage{
h2o.clusterInfo()
}
\description{
Print H2O cluster info
}
|
4b054ab8436bf3cd9a43b2629ca209f97da9fbee
|
c3f087b8e2e2331b4d4946b722d8357cef1b8fc7
|
/man/setParameters.Rd
|
60ee64af99fd31e0343868a17888deee67f2c88c
|
[
"LicenseRef-scancode-unknown",
"Artistic-2.0"
] |
permissive
|
jpahle/CoRC
|
40f2d27b1adb321ecc93eb087c7f6d516cae94a4
|
103fb7ae2d01538703fc20a0745f8ffa0adea24b
|
refs/heads/master
| 2023-03-17T07:23:55.156712
| 2023-03-12T22:49:13
| 2023-03-12T22:49:13
| 87,799,312
| 5
| 2
|
Artistic-2.0
| 2021-07-21T19:12:51
| 2017-04-10T10:39:08
|
R
|
UTF-8
|
R
| false
| true
| 2,048
|
rd
|
setParameters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/entity_accessors.R
\name{setParameters}
\alias{setParameters}
\title{Set reaction parameters}
\usage{
setParameters(
key = NULL,
name = NULL,
value = NULL,
mapping = NULL,
data = NULL,
model = getCurrentModel()
)
}
\arguments{
\item{key}{Identify which reaction parameter to edit by specifying it's key, as string.
Also supports fragments of keys, if uniquely matching one reaction parameter.}
\item{name}{Parameter is deprecated.}
\item{value}{Value to set, as numeric.}
\item{mapping}{Key of global quantity to map to, as string.
Also supports fragments of keys, if uniquely matching one global quantity.}
\item{data}{A data frame as given by \code{\link{getParameters}} which will be applied before the other arguments.}
\item{model}{A model object.}
}
\description{
\code{setParameters} applies given values to reaction parameters of the model depending on the \code{key} argument.
}
\details{
Use the \code{key} argument to specify which reaction parameter to modify and any of the other arguments to specify the value to set.
The function is fully vectorized.
If a \code{NA} value is supplied, the model value is kept unchanged.
The \href{https://jpahle.github.io/CoRC/articles/entity_management.html}{online article on managing model entities} provides some further context.
}
\seealso{
\code{\link{getParameters}} \code{\link{getParameterReferences}}
Other reaction functions:
\code{\link{clearCustomKineticFunctions}()},
\code{\link{deleteKineticFunction}()},
\code{\link{deleteReaction}()},
\code{\link{entity_finders}},
\code{\link{getParameterReferences}()},
\code{\link{getParameters}()},
\code{\link{getReactionMappings}()},
\code{\link{getReactionReferences}()},
\code{\link{getReactions}()},
\code{\link{getValidReactionFunctions}()},
\code{\link{newKineticFunction}()},
\code{\link{newReaction}()},
\code{\link{setReactionFunction}()},
\code{\link{setReactionMappings}()},
\code{\link{setReactions}()}
}
\concept{reaction functions}
|
efb70bed67abb91b7f13e0327c0511d163291ce5
|
be043b7fe4d683b44246fd7bb4045885c5205402
|
/plot2.R
|
b322fa21df8d080ea1b6d3c642958d4683bf32e3
|
[] |
no_license
|
mkschle/ExData_Plotting1
|
ae866e327d807afeb400ef84e7e767ab9adc1539
|
00d6689c9ceefa0045c38fa15cfb2819bd5b8151
|
refs/heads/master
| 2021-05-10T09:34:40.332412
| 2018-01-25T17:14:37
| 2018-01-25T17:14:37
| 118,928,675
| 0
| 0
| null | 2018-01-25T15:12:19
| 2018-01-25T15:12:19
| null |
UTF-8
|
R
| false
| false
| 1,055
|
r
|
plot2.R
|
# read the data
data_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(data_url, destfile = "data.zip")
unzip("data.zip")
data <- read.table ("household_power_consumption.txt", header =TRUE, na.strings = "?", sep = ";", stringsAsFactors = F)
# Clean the data and get it ready to plot
data$Date_Time <- paste(data$Date, data$Time)
# Below formula is to tell R how to read date (day, month, date)
data$Date_Time <- as.POSIXct(data$Date_Time, format = "%d/%m/%Y %H:%M:%S")
#Only looking to extract Feb 1 and 2nd of 2007 data
limit <- as.POSIXct(c("1/2/2007", "3/2/2007"), format = "%d/%m/%Y")
# data$Date_Time >= limit[1]
# data$Date_Time < limit[2]
# plot(data$Date_Time >= limit[1] & data$Date_Time < limit[2])
# Get the subset of the data
data_sub <- subset(data, data$Date_Time >= limit[1] & data$Date_Time < limit[2])
png(file = "plot2.png")
plot(x = data_sub$Date_Time, y = data_sub$Global_active_power,
type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
dev.off()
|
b776f0e28d392df5145d4e89fef228dbcabd28c8
|
687f1e6cb6665509677d644a8fbbb4cde4846d57
|
/R/anovaSStypes.R
|
20e8acbd46566ada210b78c0b470fca1e10dfa00
|
[] |
no_license
|
ramnathv/RExRepos
|
75cbb338725af8189dcce6cfeb723489786cfbc0
|
e0edd9d2272ae7290a94b64c62af8acb1e675848
|
refs/heads/master
| 2016-08-03T00:32:11.574993
| 2012-08-14T14:09:03
| 2012-08-14T14:09:03
| 5,886,181
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,056
|
r
|
anovaSStypes.R
|
## @knitr unnamed-chunk-1
wants <- c("car")
has <- wants %in% rownames(installed.packages())
if(any(!has)) install.packages(wants[!has])
## @knitr unnamed-chunk-2
P <- 3
Q <- 3
g11 <- c(41, 43, 50)
g12 <- c(51, 43, 53, 54, 46)
g13 <- c(45, 55, 56, 60, 58, 62, 62)
g21 <- c(56, 47, 45, 46, 49)
g22 <- c(58, 54, 49, 61, 52, 62)
g23 <- c(59, 55, 68, 63)
g31 <- c(43, 56, 48, 46, 47)
g32 <- c(59, 46, 58, 54)
g33 <- c(55, 69, 63, 56, 62, 67)
dfMD <- data.frame(IV1=factor(rep(1:P, c(3+5+7, 5+6+4, 5+4+6))),
IV2=factor(rep(rep(1:Q, P), c(3,5,7, 5,6,4, 5,4,6))),
DV =c(g11, g12, g13, g21, g22, g23, g31, g32, g33))
## @knitr unnamed-chunk-3
xtabs(~ IV1 + IV2, data=dfMD)
## @knitr unnamed-chunk-4
anova(lm(DV ~ IV1 + IV2 + IV1:IV2, data=dfMD))
anova(lm(DV ~ IV2 + IV1 + IV1:IV2, data=dfMD))
## @knitr unnamed-chunk-5
SS.I1 <- anova(lm(DV ~ 1, data=dfMD),
lm(DV ~ IV1, data=dfMD))
SS.I2 <- anova(lm(DV ~ IV1, data=dfMD),
lm(DV ~ IV1+IV2, data=dfMD))
SS.Ii <- anova(lm(DV ~ IV1+IV2, data=dfMD),
lm(DV ~ IV1+IV2 + IV1:IV2, data=dfMD))
## @knitr unnamed-chunk-6
SS.I1[2, "Sum of Sq"]
SS.I2[2, "Sum of Sq"]
SS.Ii[2, "Sum of Sq"]
## @knitr unnamed-chunk-7
SST <- anova(lm(DV ~ 1, data=dfMD),
lm(DV ~ IV1*IV2, data=dfMD))
SST[2, "Sum of Sq"]
SS.I1[2, "Sum of Sq"] + SS.I2[2, "Sum of Sq"] + SS.Ii[2, "Sum of Sq"]
## @knitr unnamed-chunk-8
library(car)
Anova(lm(DV ~ IV1*IV2, data=dfMD), type="II")
## @knitr unnamed-chunk-9
SS.II1 <- anova(lm(DV ~ IV2, data=dfMD),
lm(DV ~ IV1+IV2, data=dfMD))
SS.II2 <- anova(lm(DV ~ IV1, data=dfMD),
lm(DV ~ IV1+IV2, data=dfMD))
SS.IIi <- anova(lm(DV ~ IV1+IV2, data=dfMD),
lm(DV ~ IV1+IV2+IV1:IV2, data=dfMD))
## @knitr unnamed-chunk-10
SS.II1[2, "Sum of Sq"]
SS.II2[2, "Sum of Sq"]
SS.IIi[2, "Sum of Sq"]
## @knitr unnamed-chunk-11
SST <- anova(lm(DV ~ 1, data=dfMD),
lm(DV ~ IV1*IV2, data=dfMD))
SST[2, "Sum of Sq"]
SS.II1[2, "Sum of Sq"] + SS.II2[2, "Sum of Sq"] + SS.IIi[2, "Sum of Sq"]
## @knitr unnamed-chunk-12
# options(contrasts=c(unordered="contr.sum", ordered="contr.poly"))
## @knitr unnamed-chunk-13
# options(contrasts=c(unordered="contr.treatment", ordered="contr.poly"))
## @knitr unnamed-chunk-14
fitIII <- lm(DV ~ IV1 + IV2 + IV1:IV2, data=dfMD,
contrasts=list(IV1=contr.sum, IV2=contr.sum))
## @knitr unnamed-chunk-15
library(car)
Anova(fitIII, type="III")
## @knitr unnamed-chunk-16
# A: lm(DV ~ IV2 + IV1:IV2) vs. lm(DV ~ IV1 + IV2 + IV1:IV2)
## @knitr unnamed-chunk-17
# B: lm(DV ~ IV1 + IV1:IV2) vs. lm(DV ~ IV1 + IV2 + IV1:IV2)
## @knitr unnamed-chunk-18
drop1(fitIII, ~ ., test="F")
## @knitr unnamed-chunk-19
drop1(fitIII, ~ ., test="F")
## @knitr unnamed-chunk-20
try(detach(package:car))
try(detach(package:nnet))
try(detach(package:MASS))
|
151db5326f49a3b3054dce210e32c58c03c0d534
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/951_0/rinput.R
|
f6a8b3206452b8ba3a8523c2d6a6efeefafffcbf
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("951_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="951_0_unrooted.txt")
|
11255ef050b29703c2c51faadd2f5078816d8aaa
|
0e5313e368f15aeffd5426e96f15ddfc7cbfc05b
|
/scripts/rangeSize/OBIS_explore.R
|
a6b0138661cfe0f63e8273d9efccee25ad3eb507
|
[] |
no_license
|
baumlab/open-science-project
|
144346d1f7befc82f8eb159542a8b2832363f7ab
|
7112ce9d6739c3f8a50da9f0aa028ac9cb1c90c8
|
refs/heads/master
| 2020-07-16T13:17:30.400656
| 2018-02-07T13:44:44
| 2018-02-07T13:44:44
| 73,947,077
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,106
|
r
|
OBIS_explore.R
|
# Created by Jamie McDevitt-Irwin
# Created on 16-October-2017
# Purpose: this code explores the obis range size data and appends it to trophic data
# setwd("~/Desktop/Research/open-science-project")
# setwd("~/Documents/git-jpwrobinson/open-science-project")
# setwd("~/Documents/git_repos/open-science-project")
setwd("~/Documents/git_repos/open-science-project")
library("ggplot2"); library(dplyr)
theme_set(theme_bw())
# clear my environment
rm(list=ls())
load("data/trade_with_obis_eoo.Rdata") # "trade.obis"
ls()
head(trade.obis)
dim(trade.obis) #2147,7
#####################################################
# WITHOUT TROPHIC DATA
# Average total by year and then sum by country
#####################################################
# want to plot range size ~ total volume, but need to deal with different years and different countries
a<-aggregate(Total ~ Taxa+ EOO + Exporter.Country, data=trade.obis, mean, na.action=NULL)
head(a)
dim(a) #2147 by 4 # doesn't change the dimensions? # maybe there are no different years with the same taxa in teh same country?
trade.obis.mean <- aggregate(Total ~ Taxa + EOO, data = a, sum) # sum these averages across countries
head(trade.obis.mean)
dim(trade.obis.mean) # 1445 by 3 so there are 1445 species with range sizes and volumes
tail(trade.obis.mean)
# there doesn't seem to be any different years within the same country for one species
# nick and i confirmed with the code below
t <-trade.obis %>% group_by(Taxa, Exporter.Country) %>% summarise(a=sum(n()))
t <-data.frame(t)
summary(t)
# also confirmed here:
y<-aggregate(YEAR ~ Exporter.Country + Taxa, trade.obis, function(x)length(unique(x)))
unique(y$YEAR)
# but why? this seems wierd
#####################################################
# Plots!
#####################################################
# trade volume by eoo
pdf(file='figures/eoo_trade_volume_2008_09_11.pdf', height=11, width=9)
ggplot(trade.obis.mean, aes(EOO, log10(Total))) + geom_point() + ylab('log10 trade volume') + geom_smooth(method=lm, se=FALSE)
dev.off()
# there are lots of species that got a zero range size in teh EOO calculation, why and should we drop them?
#####################################################
## James looking at export diversity
library(dplyr); library(visreg); library(lme4)
trade.obis<-trade.obis %>% group_by(Taxa) %>% mutate(div=length(unique(Exporter.Country)))
hist(trade.obis$div)
m<-glm(div ~ scale((EOO)) + scale(decimalLatitude), trade.obis, family=poisson)
summary(m)
visreg(m)
hist(resid(m))
# WITH TROPHIC DATA
# plot by trophic and rnage size
trade<-read.csv(file='data/trade_with_trophic_group.csv')
head(trade)
# merge with the obis data
trade$EOO<-trade.obis$EOO[match(trade$Taxa, trade.obis$Taxa)]
head(trade)
dim(trade)
# Match range size with the trade.trophic data
#####################################################
trade.trophic<-read.csv(file='data/trade_with_trophic_group.csv')
head(trade.trophic)
trade.trophic$EOO<-trade.obis$EOO[match(trade.trophic$Taxa, trade.obis$Taxa)]
trade.trophic$range<-ifelse(is.na(trade.trophic$EOO), 'NO', 'YES')
trade.trophic$range
head(trade.trophic)
dim(trade.trophic) #393, 10
str(trade.trophic)
# subset out species without range size data
trade.trophic.obis <- dplyr::filter(trade.trophic, range=="YES", )
head(trade.trophic.obis)
head(trade.trophic.obis$range) # now only "YES"
dim(trade.trophic.obis) #393 by 10
str(trade.trophic.obis)
# all of the trophic groups also had range sizes
#####################################################
# Average total by year and then sum by country
#####################################################
# want to plot range size ~ total volume, but need to deal with different years and different countries
a<-aggregate(Total ~ Taxa+ EOO + Exporter.Country, data=trade.trophic.obis, mean, na.action=NULL)
head(a)
dim(a) # 393 by 4 # doesn't change the dimensions? # maybe there are no different years with the same taxa in teh same country?
trade.trophic.obis.mean <- aggregate(Total ~ Taxa + EOO, data = a, sum) # sum these averages across countries
head(trade.trophic.obis.mean)
dim(trade.trophic.obis.mean) # 244 by 3 so there are 244 species with range sizes and volumes
tail(trade.trophic.obis.mean)
# there doesn't seem to be any different years within the same country for one species
# nick and i confirmed with the code below
t <-trade.trophic.obis %>% group_by(Taxa, Exporter.Country) %>% summarise(a=sum(n()))
t <-data.frame(t)
summary(t)
# but why? this seems wierd
#####################################################
# Plots!
#####################################################
# trade.trophic volume by range size
pdf(file='figures/EOO_trade.trophic_volume_2008_09_11.pdf', height=11, width=9)
ggplot(trade.trophic.obis, aes(EOO, log10(Total), col=TROPHIC)) + geom_point() + ylab('log10 trade volume') + geom_smooth(method=lm, se=FALSE)
dev.off()
# this way each data point is one species and one raneg size, averaged by years and summed by countries
#####################################################
|
b415419a909a4be430d4b51129e1be57c7886aea
|
634b04b41470149ff9e41850c9c6b8b7b085634f
|
/man/assert_pos.Rd
|
470d65c410c8559c41a64b5a9b1bd52f0c28b5bb
|
[
"MIT"
] |
permissive
|
KeithJF82/vectorpower
|
f04fa79322e0ac551b56df13c90a2cbc120e20dc
|
2f224b79f2db6226bc686b791253c06cb2b443df
|
refs/heads/master
| 2023-06-21T21:21:26.338743
| 2023-06-08T09:31:20
| 2023-06-08T09:31:20
| 212,610,822
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 812
|
rd
|
assert_pos.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{assert_pos}
\alias{assert_pos}
\title{x is positive (with or without zero allowed)}
\usage{
assert_pos(
x,
zero_allowed = TRUE,
message1 = "\%s must be greater than or equal to zero",
message2 = "\%s must be greater than zero",
name = deparse(substitute(x))
)
}
\arguments{
\item{x}{Value to check if positive}
\item{zero_allowed}{Indicator (TRUE/FALSE) of whether zero is allowed}
\item{message1}{Message to output if x is negative (when zero allowed)}
\item{message2}{Message to output if x is not positive (when zero disallowed)}
\item{name}{Name}
}
\description{
Check that value x is positive
}
\details{
Check that x is positive (zero allowed or disallowed using zero_allowed parameter)
}
|
8f3d8453c02402b2cde06b5a34a3f52dced1eaa8
|
2a9668232a4e5757d2166f7f23b1f3ea740dc343
|
/analysis/shiny-app/shiny_ui.R
|
ba4be6f72284b3e521a1ad464233b77cbf7ed5ce
|
[
"CC0-1.0"
] |
permissive
|
analytics-ufcg/obras-pb
|
b5b6e9d551d1e395f83699a889e53ef11aa9607f
|
47b33a17180547b1f76ee61d8ec676d884bdeac9
|
refs/heads/master
| 2021-05-07T14:30:01.875163
| 2018-06-18T18:42:48
| 2018-06-18T18:42:48
| 109,882,326
| 1
| 0
| null | 2018-07-23T19:13:53
| 2017-11-07T19:40:12
|
HTML
|
UTF-8
|
R
| false
| false
| 8,443
|
r
|
shiny_ui.R
|
# Interface do usuário
sidebar.width <- 240
ui <- dashboardPage(
title = "Sagres obras",
skin = "purple",
dashboardHeader(
title = span(
img(src = "sagres-obras.png")
),
titleWidth = sidebar.width
),
dashboardSidebar(
sidebarMenu(
menuItem("Obras georreferenciadas", tabName = "obras-georref", icon = icon("map-marker")),
menuItem("Custos efetivos", icon = icon("building"), tabName = "tipos-obras")
),
width = sidebar.width
),
dashboardBody(
tags$head(tags$style(HTML('.box-header {min-height: 35px;} .selectize-dropdown {z-index: 2000;}'))),
tags$head(tags$link(rel="shortcut icon", href="tce-cropped.png")),
tags$head(tags$link(rel="shortcut icon", href="sagres-obras.png")),
tags$head(
HTML("<script>
var socket_timeout_interval
var n = 0
$(document).on('shiny:connected', function(event) {
socket_timeout_interval = setInterval(function(){
Shiny.onInputChange('count', n++)
}, 15000)
});
$(document).on('shiny:disconnected', function(event) {
clearInterval(socket_timeout_interval)
});
</script>")
),
tabItems(
tabItem(
tabName = "obras-georref",
fluidRow(
height = "100vh",
column(width = 12,
box(width = NULL,
solidHeader = TRUE,
collapsible = FALSE,
h2("Painel I - Obras georreferenciadas"),
tags$p("Este painel fornece dados sumarizados e permite ter uma visão geral de quais gestões municipais mais
georreferenciam suas obras, onde é possível clicar nos municípios para obter informações mais detalhadas
sobre o mesmo. É possível ainda filtrar as obras por microrregião/mesorregião e pelo ano das obras,
permitindo assim que sejam realizadas análises históricas sobre os municípios. O painel conta ainda
com um ranking que filtra as obras através dos filtros selecionados e mostra os 25 municípios que
possuem mais obras georreferenciadas. Por fim, é possível também visualizar os dados de forma relativa
ou absoluta."),
tags$i("Nota: O georreferenciamento é uma forma de tornar as coordenadas conhecidas num dado sistema de referência,
onde descreve um ou mais pares de latitude e longitude do local, sendo assim possível localizar em um mapa.")
),
box(width = NULL,
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("select_tipo_localidade_georref", "Tipo de localidade:",
c("Município" = "municipio",
"Microrregião" = "microrregiao",
"Mesorregião" = "mesorregiao"),
inline = TRUE
),
radioButtons("select_tipo_representacao_georref", "Valor representado:",
c("Relativo" = "relativo",
"Absoluto" = "absoluto"),
inline = TRUE
),
selectInput("select_localidade_georref", label = h3("Selecione a localidade"),
choices = c("Todos", localidades.georref$nome.x),
selected = cidade.default(localidades.georref, "nome.x"))
)
),
column(width = 12,
box(width = NULL,
collapsible = TRUE,
leafletOutput("mapa_georref", height = "50vh")
),
box(width = NULL,
collapsible = TRUE,
dygraphOutput("dygraph_georref", height = "25vh")
),
box(width = "50vh",
collapsible = TRUE,
plotOutput("ranking_georref", height = "57vh")
)
)
)
),
tabItem(
tabName = "tipos-obras",
fluidRow(
column(width = 12,
box(width = NULL,
solidHeader = TRUE,
collapsible = FALSE,
h2("Painel II - Custo efetivo das obras"),
tags$p("Este painel apresenta dados sobre o custo das obras por unidade de medida construída dos mais
diversos tipos de obras realizadas no estado, permitindo uma fácil análise e detecção de anomalias
nos custos das obras, sendo possível clicar nos municípios para obter informações mais detalhadas
sobre o mesmo. É possível filtrar as obras por microrregião/mesorregião e pelo ano das mesmas,
permitindo assim que sejam realizadas análises históricas sobre os municípios. O painel conta
ainda com um ranking que filtra as obras através dos filtros selecionados e mostra os 25 municípios
que possuem o melhor custo efetivo. Por fim, é possível também selecionar o tipo de obra que
se deseja analisar.")
),
box(width = NULL,
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("select_tipo_localidade_tipo_obra", "Tipo de localidade:",
c("Município" = "municipio",
"Microrregião" = "microrregiao",
"Mesorregião" = "mesorregiao"),
inline = TRUE
),
selectInput("select_tipo_obra", label = h3("Selecione o tipo da obra"),
choices = get.top.10.tipo.obra(custo.efetivo.obras),
selected = tipo.obra.selecionada),
selectInput("select_localidade_tipo_obra", label = h3("Selecione a localidade"),
choices = localidades.custo.efetivo$nome,
selected = localidade.selecionada.tipo.obra)
),
box(width = NULL,
collapsible = TRUE,
leafletOutput("mapa_tipo_obra", height = "50vh")
),
box(width = NULL,
collapsible = TRUE,
dygraphOutput("dygraph_tipo_obra", height = "25vh")
)
),
column(width = 12,
box(width = NULL,
collapsible = TRUE,
plotOutput("ranking_tipo_obra", height = "51vh")
)
),
column(width = 12,
box(width = NULL,
collapsible = TRUE,
DT::dataTableOutput("obras_custo_efetivo")
)
)
)
)
)
)
)
|
b58a1c1be143b6637578481ad7ecc089c1f1cad4
|
3070367b785de76331a3a89682cfba01517e81d5
|
/hsownd.R
|
2929edf8c2ab1f593f7eba645a848b2d8c60a99a
|
[] |
no_license
|
nezzag1/RM01A_LE482_2018
|
24ac13750b189836c3145da7aad9a6d800aa2408
|
34604666725b9a0758c279be763729724ab235d3
|
refs/heads/master
| 2021-09-04T12:58:38.789794
| 2018-01-18T23:21:50
| 2018-01-18T23:21:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,484
|
r
|
hsownd.R
|
#RM01 Coursework - Testing whether the housing wealth effect is
#altered by housing ownership
library(lmtest)
library(xlsx)
library(ggplot2)
setwd("C:/Users/Neil/OneDrive/Documents/MPhil Environmental Policy/RM01")
#load and prepare data
df1 <- read.xlsx('RM01_OptionA_data.xlsx', sheetIndex = 1)
df1 <- subset(df1, select = c(FUELANNUAL, HSVAL, HSOWND,
HHINCOME, SAVINGSINCOME, HSBEDS, HHSIZE,
NKIDS_DV, HEATCH, FUELDUEL, SAVE, AGE, FINFUT,
ENVTEND))
df1 <- df1[complete.cases(df1),]
#in this subset there are 2335 observations
#run existing model without interaction term or dummy variable
mod7 <- lm(FUELANNUAL ~ HSVAL + HSOWND + log(HHINCOME) + SAVINGSINCOME
+ HSBEDS + HHSIZE + NKIDS_DV + HEATCH + FUELDUEL
+ SAVE + AGE + FINFUT + ENVTEND, data = df1)
#now we run the model with interaction terms between HSOWND and the other terms
mod8 <- lm(FUELANNUAL ~ HSOWND * (HSVAL + log(HHINCOME) + SAVINGSINCOME
+ HSBEDS + HHSIZE + NKIDS_DV + HEATCH + FUELDUEL
+ SAVE + AGE + FINFUT + ENVTEND), data = df1)
#run a partial F test to see if the dummy term and its interactions are
#jointly significant
Fstat <- ((sum(mod7$residuals^2) - sum(mod8$residuals^2))/12)/(sum(mod8$residuals^2)/(2335-27-1))
pval <- 1- pf(Fstat,12,1448-27-1)
#and run an ANOVA test
anova(mod7,mod8)
#summary of mod8
summary(mod8)
|
eb458f85c063163c88857ab3d383604166dfc4fa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sim1000G/examples/getCMfromBP.Rd.R
|
4b828c831565e5b3bb6ed0d220a24536801592b5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
getCMfromBP.Rd.R
|
library(sim1000G)
### Name: getCMfromBP
### Title: Converts centimorgan position to base-pair. Return a list of
### centimorgan positions that correspond to the bp vector (in
### basepairs).
### Aliases: getCMfromBP
### ** Examples
library("sim1000G")
examples_dir = system.file("examples", package = "sim1000G")
vcf_file = sprintf("%s/region.vcf.gz", examples_dir)
vcf = readVCF( vcf_file, maxNumberOfVariants = 100,
min_maf = 0.12)
# For realistic data use the function downloadGeneticMap
generateUniformGeneticMap()
getCMfromBP(seq(1e6,100e6,by=1e6))
|
25ad02369b9ebcf9b590b50336aa912656f5445f
|
d2eb97d81317dc5d774a8119bd517b6936bfe5bd
|
/man/segplots.Rd
|
f50566a45add5383eaaa9e5ac5a8584201227f92
|
[] |
no_license
|
nverno/seedsub
|
91cf34cdd559380e9e412908fdffa7fa77c4d37e
|
c0a0243f465f686572a75fa37ff7f92e1bec551d
|
refs/heads/master
| 2021-01-10T05:48:28.702693
| 2016-03-12T05:46:14
| 2016-03-12T05:46:14
| 50,882,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 916
|
rd
|
segplots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segplots.R
\docType{data}
\name{segplots}
\alias{segplots}
\title{Contour segments plot-level data.}
\format{An object of class \code{data.table} (inherits from
\code{data.frame}) with 157 rows and 13 columns.
\itemize{
\item PID: Plot identifier, this is determined by unique combinations of \code{CONTNAM} and \code{STPACE}.
\item CONTNAM: Contour name
\item STPACE: Beginning pace of a segment.
\item ELEVCL: Elevation class
\item ASPCL: Aspect class
\item SOILCL[1-2]: Soil class
\item HISTO: Soil type.
\item SPODO: Soil type.
\item INCEP: Soil type.
\item MINER: ?
\item ROCK: Soil type
\item PHISTO: Soil type.
}}
\usage{
segplots
}
\description{
For more details, see \code{segdata}. This data links to
individual-level data in \code{segplants} by the `PID` variable.
}
\keyword{datasets}
|
ddccf0aa278bf74edf7cf7a27e1af9058e4660fa
|
fc683f8ce6f3eb8c4ef6f0309f3c7bb7bf8eef60
|
/xgboost.R
|
33052db04f52ae83c0f04051d7a832c805b666db
|
[] |
no_license
|
lolgein/Machine-Learning
|
6976ce6e13642ce701fc6b76a0a965a6560fe9f3
|
c1861d5f4fdfbf80ad9df15815a790c31b147324
|
refs/heads/master
| 2021-01-18T20:44:56.355064
| 2017-04-02T13:47:24
| 2017-04-02T13:47:24
| 86,987,226
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,414
|
r
|
xgboost.R
|
# Gradient Boosting (Ensemble of weak decisions trees) of the package xgboost
library(xgboost)
# Set parameters, seed, best logloss and index
best_param = list()
best_seednumber = 1234
best_logloss = Inf
best_logloss_index = 0
mydataMatrix = data.matrix(train_data)
mydataMatrix[,15] = mydataMatrix[,15] - 1
dtrain = xgb.DMatrix(data = mydataMatrix[,1:14], label = mydataMatrix[,15],missing = NA)
# randomized parameter search
for (iter in 1:100) {
param <- list(objective = "multi:softprob",
eval_metric = "mlogloss",
num_class = 3,
max_depth = sample(4:10, 1),
eta = runif(1, .01, .3),
gamma = runif(1, 0.0, 0.2),
subsample = runif(1, .5, .8),
colsample_bytree = runif(1, .5, .9),
min_child_weight = sample(1:40, 1),
max_delta_step = 1
)
cv.nround = 1000
cv.nfold = 10
seed.number = sample.int(10000, 1)[[1]]
set.seed(seed.number)
mdcv <- xgb.cv(data=dtrain, params = param,
nfold=cv.nfold, nrounds=cv.nround,
verbose = T, early.stopping.rounds=8, maximize=FALSE)
min_logloss = min(mdcv$evaluation[, test_mlogloss_mean])
min_logloss_index = which.min(mdcv$evaluation[, test_mlogloss_mean])
# save best parameter and iteration as index
if (min_logloss < best_logloss) {
best_logloss = min_logloss
best_logloss_index = min_logloss_index
best_seednumber = seed.number
best_param = param
}
print(iter)
print(best_logloss)
}
# Creates plot of cross validation results of model training
plot(mdcv$evaluation_log$iter,mdcv$evaluation_log$train_mlogloss_mean,type="l",col="red"
,xlab="Epochs",ylab = "logloss mean",
main = "Cross-validation results of constructing model\n with 1000 iterations")
lines(mdcv$evaluation_log$iter,mdcv$evaluation_log$test_mlogloss_mean,col="green")
legend(850,1.0, c("Test","Train"),lty=c(1,1),lwd=c(2.5,2.5),col=c("green","red"))
train_data$interest_level = factor(train_data$interest_level)
train_data$neighborhoods = as.numeric(train_data$neighborhoods)
trainTask <- makeClassifTask(data = train_data, target = "interest_level")
#Get cross-validation values of finalised model, because not possible with only gxboost package
xg_set <- makeLearner("classif.xgboost", predict.type = "prob")
bbb <- list(
objective = "multi:softprob",
eval_metric = "mlogloss",
nrounds = best_logloss_index,
max_depth = best_param$max_depth,
eta = best_param$eta,
gamma = best_param$gamma,
subsample = best_param$subsample,
colsample_bytree = best_param$colsample_bytree,
min_child_weight = best_param$min_child_weight,
max_delta_step = best_param$max_delta_step
)
xg_new <- setHyperPars(learner = xg_set, par.vals = bbb)
rdesc = makeResampleDesc("CV", iters = 10L)
# Do cross validation with specific parameters to get all folds
r = resample(learner = xg_new, resampling = rdesc, measures = logloss, task = trainTask)
# Train the model with the best parameters
nround = best_logloss_index
set.seed(best_seednumber)
md <- xgb.train(data=dtrain, params=best_param, nrounds=nround, nthread=6)
# Prediction of the test_data
xgboost_pred = matrix(predict(md,data.matrix(test_data)),ncol=3,byrow = T)
colnames(xgboost_pred) = c("high","medium","low")
|
2751bc59e76b1f2555c00ebcd469e8f5c6bb413d
|
f666db90df3146ec868d4d4174de715adf84b3dd
|
/.Rprofile
|
4c2c99cfa8e57459a5a63c1edddbccc5e9a92477
|
[] |
no_license
|
kevyin/home-settings
|
116ecf89c2688ad2f37ec5c599c00116e5e19231
|
e6b0f2c94a23f7b3451a0bffa25f308194ff0cc5
|
refs/heads/master
| 2021-01-23T15:04:08.771973
| 2018-05-01T12:31:38
| 2018-05-01T12:31:38
| 4,710,677
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,493
|
rprofile
|
.Rprofile
|
## see help(Startup) for documentation on ~/.Rprofile and Rprofile.site
#.Library.site <- "/usr/lib/R/site-library"
#.Library.site <- ""
local({
# CRAN mirror
r <- getOption("repos")
# r["CRAN"] <- "http://cran.ms.unimelb.edu.au/"
r["CRAN"] <- "http://mirror.aarnet.edu.au/pub/CRAN/"
options(repos=r)
# There's an AArnet Bioconductor mirror, but only from v2.7, thus R>=2.12
if(as.numeric(R.Version()$minor) >= 12.0)
bm <- "http://mirror.aarnet.edu.au/pub/bioconductor"
else
bm <- "http://bioconductor.org"
options(BioC_mirror=bm)
# set Ncpus to make installing packages faster
switch(Sys.info()[["sysname"]],
Darwin=options(Ncpus=as.numeric(system("sysctl -a hw | grep hw.ncpu: | sed 's/.*: //'", intern=TRUE))),
Linux=options(Ncpus=as.numeric(system("cat /proc/cpuinfo | grep -c processor", intern=TRUE)))
)
# this forces Biobase to use this Bioconductor mirror, which means 'oligo' downloads pd* packages from the mirror
# There's no point evaluating this code prior to R 2.12, as there was no AArnet mirror before then.
if(as.numeric(R.Version()$minor) >= 12.0) {
options(BioC=structure(list(Base = structure(list(urls = structure(list(bioc = getOption("BioC_mirror", default="http://bioconductor.org")), .Names = "bioc"),
use.widgets = FALSE), .Names = c("urls", "use.widgets"), class = "BioCPkg")), .Names = "Base", class = "BioCOptions")
)
}
})
|
0ad7d6e763aab519c7f55bcd1d5cfeef0d099e67
|
45630d190e8f27cbc45821ba0ff9eeb3cbb9f211
|
/Simulation/simulation5_2/2/DDEestimate.R
|
1b1a380a72e104c76ce8fb60ee4b47ac6d4c2307
|
[] |
no_license
|
ecustwy/smcDE
|
822d24281f6be249cf5a2fdbf9885495ae6ba934
|
c9d75fcb162b29e98a0ea82b0319e36d31e8006f
|
refs/heads/master
| 2023-01-15T13:44:44.893025
| 2020-11-23T14:03:20
| 2020-11-23T14:03:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 787
|
r
|
DDEestimate.R
|
gname = c("EstimatedDDE2.eps",sep="")
postscript(gname,width=10,height=5,horizontal = FALSE, onefile = FALSE, paper = "special")
par(mfrow=c(1,2),oma=c(0.2,1.5,0.2,1.5),mar=c(3,2,0.2,2),cex.axis=1,las=1,mgp=c(1,0.5,0),adj=0.5)
plot(output[,2], type = 'l', ylim = c(-8, 30), xlab = expression(X[1]), ylab = '', lwd = 3)
lines(fitted1, lty = 2, col = 2, lwd = 3)
lines(upper1, lty = 3, col =2, lwd = 3)
lines(lower1, lty = 3, col =2, lwd = 3)
plot(output[,3], type = 'l', ylim = c(-60, 600), xlab = expression(X[2]), ylab = '', lwd = 3)
lines(fitted2,lty = 2, col = 2, lwd = 3)
lines(upper2, lty = 3, col =2, lwd = 3)
lines(lower2, lty = 3, col =2, lwd = 3)
legend("topright",c("TRUE","MEAN","CI"),cex=1.2,
col=c(1,2,2),lty = c(1, 2, 3), lwd = 3,pch = 20,bty="n")
dev.off()
|
3bad30b6b2a9dc42b7a41a58943ae7079b34b5a8
|
4e5f8a57cc9e4bca1711d4d32a89cbaa691ee57f
|
/man/node_download.Rd
|
6eee703b59ad7f284d6cdb794a06d22f14c49fcd
|
[
"MIT"
] |
permissive
|
Hong-Sung-Hyun/multilinguer
|
1db6704266b91011f20c31d54acf2ad0dfa9a38c
|
6068e6377e493ad60ec95e874d5cfcee9d619207
|
refs/heads/master
| 2021-06-21T14:37:57.385129
| 2020-02-02T16:52:25
| 2020-02-02T16:52:25
| 254,499,426
| 1
| 0
| null | 2020-04-09T23:27:02
| 2020-04-09T23:27:01
| null |
UTF-8
|
R
| false
| true
| 315
|
rd
|
node_download.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_node.R
\name{node_download}
\alias{node_download}
\title{nodejs installer download}
\usage{
node_download(os, dest)
}
\arguments{
\item{os}{Type of OS}
\item{dest}{where to download}
}
\description{
nodejs installer download
}
|
1ca89bc294fd96c71c5ae5cb2dccba332bcb9fc7
|
e791ebe596bd3f12a7554acffa70947839c29c55
|
/preprocessing/get_singer_connection.R
|
655f0dfe7e136bb070ddbedbd6d6da9afc941d8d
|
[] |
no_license
|
ruibai118/Edav-Final-Project-1
|
46ec9db37296f4b45cc0710c580a4f41df131568
|
90d45d1670e8c4efc0d5f5455c0e17c04648df8c
|
refs/heads/master
| 2022-04-02T07:34:49.711801
| 2019-12-13T05:37:26
| 2019-12-13T05:37:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,569
|
r
|
get_singer_connection.R
|
library(tidyverse)
library(spotifyr)
library(readr)
Sys.setenv(SPOTIFY_CLIENT_ID = '2396242d1c2043ae90fa37810b334c56')
Sys.setenv(SPOTIFY_CLIENT_SECRET = 'e972f10fc0384e5683be1f603b869a55')
access_token <- get_spotify_access_token(client_id = Sys.getenv('SPOTIFY_CLIENT_ID'), client_secret = Sys.getenv('SPOTIFY_CLIENT_SECRET'))
year_df <- read.csv("data/clean/yearly_data.csv")
region = c("global","us","gb","ad","ar","at","au","be","bg","bo","br","ca","ch","cl","co","cr","cy","cz","de","dk","do","ec","ee","es","fi","fr","gr","gt","hk","hn","hu","id","ie","il","in","is","it","jp","lt","lu","lv","mc","mt","mx","my","ni","nl","no","nz","pa","pe","ph","pl","pt","py","ro","se","sg","sk","sv","th","tr","tw","uy","vn","za")
for (country in region){
print(country)
artist_list <- unique(as.vector(year_df[year_df$Region==country,]$Artist))
artist_id <- unique(as.vector(year_df[year_df$Region==country,]$Artist_ID))
artist_mat <- matrix(0, nrow = length(artist_list), ncol = length(artist_list),
dimnames = list(artist_list,
artist_list))
if (length(artist_list)!=0){
for (i in 1:length(artist_list)){
Sys.sleep(0.3)
artist <- artist_list[i]
print(artist)
related <- get_related_artists(artist_id[i],authorization = access_token,include_meta_info = FALSE)
for (name in related$name){
if (name %in% artist_list){
artist_mat[artist, name] <-1
}
}
}
}
write.csv(artist_mat,paste("data/clean/singer_connection/",country ,".csv", sep = ""))
}
|
955153295040849a732bb8bae211f2063345a39f
|
b85e454878ccfed1848bf255315123784279f134
|
/plot3.R
|
cc2dede93d7bb7177df7f4ef3ef7ccdb5b00aa68
|
[] |
no_license
|
Wyingli/ExData_Plotting1
|
fb4bd99474ee863a2fb586c3a7b0590defcb6b06
|
bc8e08d5b793d0e126a026edeb037299bf400e9b
|
refs/heads/master
| 2022-12-02T08:29:44.193303
| 2020-08-18T07:39:58
| 2020-08-18T07:39:58
| 286,737,644
| 0
| 0
| null | 2020-08-11T12:23:17
| 2020-08-11T12:23:16
| null |
UTF-8
|
R
| false
| false
| 901
|
r
|
plot3.R
|
data1<-read.table("C:/Users/li/Documents/exdata_data_household_power_consumption/household_power_consumption.txt",header=TRUE,sep=";")
data2<-data1[grepl("1/2/2007|2/2/2007",data1$Date),]
data22<-filter(data2,Date=="1/2/2007"|Date=="2/2/2007")
dates<-data22$Date
times<-data22$Time
x<-paste(dates,times)
a<-strptime(x, "%d/%m/%Y %H:%M:%S")
data5<-mutate(data22,date=a)
data<-select(data5,-(Date:Time))
data$Global_active_power<-as.numeric(unlist(data$Global_active_power))
Sys.setlocale("LC_TIME", "English")
with(data,{
plot(date,Sub_metering_1, type="l",ylab="Global Active Power (kilowatts)", xlab="")
lines(date,Sub_metering_2,col="red")
lines(date,Sub_metering_3,col="blue")
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
8d62cd4b1331d95d437560a1aea8f8fb88c4d3ad
|
02f053ce70b065724d4a02619fb402adcc0ec997
|
/analysis/boot/boot156.R
|
6c8a8f6c8c9d96c393e777d179ac314a24f65c02
|
[] |
no_license
|
patperry/interaction-proc
|
27950482929240bba55c7d0f2f8c5235d770feea
|
cf8dfd6b5e1d0684bc1e67e012bf8b8a3e2225a4
|
refs/heads/master
| 2021-01-01T06:11:47.125853
| 2012-12-04T20:01:42
| 2012-12-04T20:01:42
| 673,564
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,742
|
r
|
boot156.R
|
seed <- 156
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 224930.59100142674
df.resid <- 35402
df <- 165
coefs <- c(6.58967112997812, 5.9081230706123335, 5.648012844799544, 5.417734225196756, 5.1816434439462, 4.912427763338423, 4.818695336330744, 4.656985716785295, 4.424835500300569, 4.244849353226472, 4.35240649469474, 4.172441391182303, 4.05487843908711, 3.9678661235801305, 3.797288061761219, 3.5479952598319677, 3.260944444363024, 2.9894802959084408, 2.5203226569894412, 2.0257078253718466, 1.51548549488063, 0.8907076832345708, 1.0271597242959263, 0.5469983195454466, 0.29391585011616694, -0.8608859382686036, -0.3664300660853649, 0.9905378227106858, 1.092401870965946, -1.0888997397376945, -2.7121858662439933, -2.011693593823409, -0.33821771437548076, 0.7041851934395976, 1.252718601989205, -1.1428161870816853, -0.5476094710506813, -1.3622559606284186, 0.27309968304968385, -0.8181794423419871, 0.8854287398447436, 0.6853988407255708, -0.7778066685211964, -2.1517451821639817, -0.7079952898110736, -0.8150115141262012, -0.8218860922072699, 0.35690662010646323, 0.49632991168730484, -1.153835236651491, 3.875861427029556e-2, 0.7603898411622644, -2.5018233876082805, 1.676798258131181, 0.8660476499829115, 1.0897711868130222, -2.3317604789685027, 5.4437253182109756e-2, -0.8211649497822479, 1.4022364970782517, 1.3596906693677722, 0.9236875732472705, -1.6573678216268304, -1.3354898217561555, -0.6946797118684982, 0.44734975934832627, 0.3736260409553261, -0.20859376957920486, -1.3911753952682941, -0.6084996635352453, -1.8688042452076548, -0.20024992432183653, 0.5318303616871214, 1.0576353833885948, 0.6598056141919026, -0.6503302222342113, -1.2721128429505153, -1.333357632574788, -8.486298183080268e-3, 0.6901308995975114, 1.2356429306087577, 0.10947274676296755, 0.12333323847134582, -2.139530406803184, -0.9269574960277023, 0.5015835842896944, 1.2162095781693851, 0.40844911233678993, 0.8727727038714551, -1.8219949938406999, 0.671285573051017, 0.6320243204741026, 0.8235067149866822, 0.24744262630271344, 9.7922144516248e-2, 1.4066197972291918, -0.3441889019778916, 0.5277263727994431, 9.181504268588615e-2, 7.97153245039561e-2, 0.34949689800947586, -0.26255282221628246, 0.9372338391487277, 0.14955023894167566, 0.8090153110325006, 0.8142137329547592, 1.1217015757690159, -0.8763977737648548, -0.29612651324347633, -1.1760001658189445, 0.391176455153309, 0.6225102919013669, 1.57852183281837, -0.7936320775625385, -0.2736422786231171, -1.1959868458535845, 0.8951816752602271, -0.30484901048381546, 0.34756234323865653, 0.6454920406706623, -0.785856973235225, -0.6545624775554573, -1.1710645736888672, -0.5913756621545677, 0.3385050946070128, 0.7592547112535982, -4.275103247221126e-2, 0.905218359293844, -0.7928725534188106, -0.3440240106866375, 0.37146238901134093, 0.8778644973076667, 0.8090133429505336, 0.6040067218922599, -0.1868805592509024, 1.0765450037455861, -0.2922493962085346, 0.9646296948692444, 0.7512849762587608, 0.967536533098471, 0.8049289004066446, -0.6535259725631395, -0.7943969919584365, 0.5978242011371454, 0.5366301966061688, 0.667947463033121, -0.39993771369849546, -0.42469362049472703, -1.9406254518543808, 1.1619850808794756, 0.26126210631882624, 1.1934406419791057, -0.2692942759578836, -9.454906448164065e-2, -3.560773813617298e-2, -1.4749476811032727, -1.1266709768801073, 0.9249526802725555, 1.1108157468299857, -0.118078627378988, 1.5843636503731333, -0.3949790792754003, -4.586804167195068e-2, -5.9652788777572574e-2, 1.263040175505222)
|
c771ae7ca6e2ba4bf3c634b23a7a46819d30eb8c
|
c142155fe76ba283c3ab4e91653a7dfa1bdecd2f
|
/cachematrix.R
|
455827a6cce95adba539322fffb44b41687b666b
|
[] |
no_license
|
prajwal-naik/ProgrammingAssignment2
|
102de9b972adbaf7441856133684f45bf8b46ee6
|
8cd9b087ff87d0e872adb36e9a62fca85348e3f5
|
refs/heads/master
| 2022-12-05T14:09:08.772586
| 2020-08-04T10:16:01
| 2020-08-04T10:16:01
| 284,932,697
| 0
| 0
| null | 2020-08-04T09:17:04
| 2020-08-04T09:17:03
| null |
UTF-8
|
R
| false
| false
| 1,038
|
r
|
cachematrix.R
|
#These function find the inverse of a matrix.
#However, since the processing of inverting a matrix is not resource friendly, the functions tcashe thevalue of the inverse for future use
#This function creates a special "matrix" object that can cache it's inverse
makeCacheMatrix <- function(x = matrix())
{
inv<-matrix()
set<-function(y)
{
x<<-y
inv<<-matrix()
}
get<-function() x
setinverse<-function(inverse) inv<<-inverse
getinverse<-function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
#This function checks if the inverse exists in cache. Else it finds it's inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv<-x$getinverse()
if(!all(is.na(inv)))
{
message("getting cached data")
return(inv)
}
data <- x$get()
inv<-solve(data, ...)
x$setinverse(inv)
inv
}
|
688196a9b8d2f7bd4ebe9db5a2b8cf7493ae297a
|
6cab63f138aa37eaeb2366284ee4001496356fa1
|
/Analyses/BlueOregonLogistic.R
|
e58e180d50e94ba8dadbf55d0ba7738333864c18
|
[] |
no_license
|
dackerly/PepperwoodVegPlots
|
a5e5c6ddbcd20a1d5a514c0edf169d3606817cdd
|
4cbe62ab9c12c5dd266ffe1cf40a16155b341e6f
|
refs/heads/master
| 2023-08-09T00:45:48.425990
| 2023-07-24T22:33:34
| 2023-07-24T22:33:34
| 36,311,558
| 8
| 9
| null | 2019-03-27T18:27:42
| 2015-05-26T17:17:19
|
R
|
UTF-8
|
R
| false
| false
| 4,752
|
r
|
BlueOregonLogistic.R
|
# Logistic Regression with QUEDOU,QUEGAR Seedlings #
# Author: Meagan F. Oldfather
# Date Created: 20150530
# I added one line to try a push and commit
# clear workspace
rm(list=ls())
library("RCurl")
library("data.table")
library("picante")
#function to source html
source_https <- function(url, ...) {
# parse and evaluate each .R script
sapply(c(url, ...), function(u) {
eval(parse(text = getURL(u, followlocation = TRUE, cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl"))), envir = .GlobalEnv)
})
}
# sources in all PW functions
source_https('https://raw.githubusercontent.com/dackerly/PepperwoodVegPlots/master/Analyses/PWfunctions_GitHub.R')
# get plot climate data
clim<-get.clim.pts()
clim$UTM.E<-as.numeric(clim$UTM.E)
clim$UTM.N<-as.numeric(clim$UTM.N)
# get plot info
info<-get.plot.info()
# merge climate and info data
clim<-cbind(clim, info[,4:5])
clim$Slope<-as.numeric(clim$Slope)
# get aggregated by plot, species seedling/juvenile data
SJ15<-plants.by.plot(year = 2015,type = "SEJU")
head(SJ15)
# get aggregated by plot,species tree data
T15<-plants.by.plot(year = 2014,type = "TR")
head(T15)
#subset by species of interest, only currently looking at QUEGAR & QUEDOU though
#species.interest<-c("ARBMEN", "FRACAL", "PSEMEN", "QUEAGR", "QUEDOU", "QUEGAR", "QUEKEL", "UMBCAL")
#SJ15<-subset(SJ15, SJ15$Species%in%species.interest)
#T15<-subset(T15, T15$Species%in%species.interest)
# turn into matrix where all plot,species combination are represented based on counts for seedling/juveniles and basal area for trees
SJ15.mat<-sample2matrix(SJ15[,c("Plot","Total.Number","Species")])
T15.mat<-sample2matrix(T15[,c("Plot","Basal.Area","Species")])
# adding "BA" to tree columns to distinquish from counts
colnames(T15.mat)<-paste(colnames(T15.mat),".BA",sep="")
SJ15.mat$Plot<-rownames(SJ15.mat)
T15.mat$Plot<-rownames(T15.mat)
# merge tree and seedling together
all.mat<-merge(SJ15.mat,T15.mat)
head(all.mat)
# add on clim/info data
all<-merge(all.mat,clim)
# scale basal area and climate data
all.s<-all
all.s[,55]<-as.numeric(all.s[,55])
all.s[,57]<-as.numeric(all.s[,57])
# combine with raw seedling data
all.s<-as.data.frame(cbind(all[,c(1:9)],apply(all.s[,10:ncol(all.s)], 2, scale)))
# logistic regression with QUEDOU seedling = successs and QUEGAR seedling = failure; additive model with QUEDOU basal area, QUEGAR basal area and CWD
m1 <- glm(cbind(QUEDOU, QUEGAR) ~ QUEDOU.BA + QUEGAR.BA + CWD, data=all.s, family=binomial)
summary(m1)
# considered adding in Plot as a random effect, but decided it didn't make sense as only one data point for covariates
#library(lme4)
#m2 <- glmer(cbind(QUEDOU, QUEGAR) ~ QUEDOU.BA + QUEGAR.BA + CWD + (1|Plot), data=all.s, family=binomial)
#summary(m2)
# plot the probability of a Blue Oak seedling with changing CWD, not sure how to interpret these marginal effects of a single predictor... this doesn't feel useful for intrepetation.
pred.CWD<-predict(m1,type="response")
# plot model predictions
plot(all.s$CWD, pred.CWD, xlab="CWD (SD from Mean)",ylab="Blue Oak", ylim=c(0,1), pch=19)
# plot caculated proportion
points(all.s$CWD, (all.s$QUEDOU/(all.s$QUEGAR+all.s$QUEDOU)), col="dodgerblue",pch=19)
newdata<-data.frame(CWD=seq(-2.5,2.5, length.out=100), QUEDOU.BA=rep(0,100), QUEGAR.BA=rep(0,100))
# best fit lines
best.fit.CWD<-predict(m1,newdata=newdata,type="response")
lines(newdata$CWD, best.fit.CWD,col="red")
# add legend
legend("topleft",legend = c("Probability", "Proportion"), col=c("black","dodgerblue"), pch=19)
# looking at general relationships
pairs(all[,c("QUEDOU.BA","QUEGAR.BA")])
pairs(all.s[,c("QUEGAR.BA","CWD")])
pairs(all.s[,c("QUEDOU.BA","CWD")])
pairs(all.s[c("QUEGAR", "CWD")])
pairs(all.s[c("QUEDOU", "CWD")])
pairs(all.s[c("QUEGAR","QUEDOU")])
# linear models of Basal Area and CWD for both species; not significant but inverse effects
summary(lm(QUEDOU.BA~CWD,data=all.s))
summary(lm(QUEGAR.BA~CWD,data=all.s))
# removed Basal Areas from model; effect of CWD flips to positive
m2<- glm(cbind(QUEDOU, QUEGAR) ~ CWD, data=all.s, family=binomial)
summary(m2)
# Looking at tree counts as binomial response (not basal area)
Adult.mat<-sample2matrix(T15[,c("Plot","Count","Species")])
colnames(Adult.mat)<-paste(colnames(Adult.mat),".C",sep="")
Adult.mat$Plot<-rownames(Adult.mat)
all.s.A<- merge(all.s,Adult.mat)
# logistic regression with QUEDOU tree counts = successs and QUEGAR tree counts = failure; additive model with CWD
m3<-glm(cbind(QUEDOU.C, QUEGAR.C) ~ CWD, data=all.s.A, family=binomial)
summary(m3)
# looking at how individually the species tree counts change with CWD
summary(lm(QUEGAR.C+QUEDOU.C~CWD,data=all.s.A))
summary(lm(QUEDOU.C~CWD,data=all.s.A))
summary(lm(QUEGAR.C~CWD,data=all.s.A))
|
7ec21130c9cc0ca6998202964a67692713b8ed28
|
ddd24d6bad77216149d891f5d1fd95ac73f41bcd
|
/man/getEnv.Rd
|
025d64029d70d19b1cd083484adad6e5ec0fbc66
|
[] |
no_license
|
itsaquestion/EasyRedis
|
f276bfbf526dcb1687667291435eed41e90ce384
|
82edfc47eea8f591fb8114c80618ac840fdb7b84
|
refs/heads/master
| 2020-03-30T09:53:57.239860
| 2019-05-16T02:22:08
| 2019-05-16T02:22:08
| 151,097,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 305
|
rd
|
getEnv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/easy_redis_api2.R
\name{getEnv}
\alias{getEnv}
\title{getEnv get env value.}
\usage{
getEnv(x)
}
\arguments{
\item{x}{env value name}
}
\value{
the value. if value == "" then return NULL
}
\description{
getEnv get env value.
}
|
603d2497db914dd8c5cf551cbcedbfaf69cdaed4
|
6a174492297d65704b2788db6b4ae280421d2e29
|
/RShiny_Proj/cancer/global.R
|
46048d869e44f105f41e2a93ec106dc4ec9a3b9d
|
[] |
no_license
|
ericjmeyers1/RShiny_Proj
|
9a9879d5f42ba4c70768c766a8b89cb86dc7ad89
|
4e08d561e9bc9ca4da743ce3de5030d4002f032c
|
refs/heads/master
| 2020-06-29T17:18:22.458580
| 2019-09-23T20:01:10
| 2019-09-23T20:01:10
| 200,576,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 480
|
r
|
global.R
|
library(shinydashboard)
library(shiny)
library(DT)
library(googleVis)
library(ggthemes)
library(vcd)
library(dplyr)
library(ggplot2)
options(scipen=20)
cancer <- read.csv(file = './Cancer.csv')
cancer$Age.Groups.Code <- substr(cancer$Age.Groups.Code, 1, 2)
cancer$Age.Groups.Code <- gsub('-', '', cancer$Age.Groups.Code)
cancer$Age.Groups.Code <- as.numeric(cancer$Age.Groups.Code)
cancer_stat <- data.frame(cancer[c(-2, -4, -5, -8, -10)])
#choice <- colnames(cancer_stat)[-1]
|
7212c40293d4c9cd64285306bc6dc84c87c9d4b5
|
ff9eb712be2af2fa24b28ecc75341b741d5e0b01
|
/R/pred.int.norm.k.of.m.on.r.K.R
|
85f43156f2f0369f54da547750a267859eda87f3
|
[] |
no_license
|
alexkowa/EnvStats
|
715c35c196832480ee304af1034ce286e40e46c2
|
166e5445d252aa77e50b2b0316f79dee6d070d14
|
refs/heads/master
| 2023-06-26T19:27:24.446592
| 2023-06-14T05:48:07
| 2023-06-14T05:48:07
| 140,378,542
| 21
| 6
| null | 2023-05-10T10:27:08
| 2018-07-10T04:49:22
|
R
|
UTF-8
|
R
| false
| false
| 2,708
|
r
|
pred.int.norm.k.of.m.on.r.K.R
|
pred.int.norm.k.of.m.on.r.K <-
function (n, df = n - 1, n.mean = 1, k = 1, m = 1, r = 1, delta.over.sigma = 0,
pi.type = c("upper", "lower", "two-sided"), conf.level = 0.95,
K.tol = .Machine$double.eps^(1/2),
integrate.args.list = NULL)
{
if (!is.vector(n, mode = "numeric") || length(n) != 1 ||
!is.vector(df, mode = "numeric") || length(df) != 1 ||
!is.vector(k, mode = "numeric") || length(k) != 1 ||
!is.vector(m, mode = "numeric") || length(m) != 1 ||
!is.vector(n.mean, mode = "numeric") || length(n.mean) !=
1 || !is.vector(r, mode = "numeric") || length(r) !=
1 || !is.vector(delta.over.sigma, mode = "numeric") ||
length(delta.over.sigma) != 1 || !is.vector(conf.level,
mode = "numeric") || length(conf.level) != 1)
stop(paste("'n', 'df', 'k', 'm', 'n.mean', 'r', 'delta.over.sigma',",
"and 'conf.level' must be numeric scalars"))
if (n < 2 || n != trunc(n))
stop("'n' must be an integer greater than or equal to 2")
if (df < 1 || df != trunc(df))
stop("'df' must be an integer greater than or equal to 1")
if (k < 1)
stop("'k' must be greater than or equal to 1")
if (m < 1)
stop("'m' must be greater than or equal to 1")
if (k > m)
stop("'k' must be between 1 and 'm'")
if (n.mean < 1)
stop("'n.mean' must be greater than or equal to 1")
if (r < 1)
stop("'r' must be greater than or equal to 1")
if (!is.finite(delta.over.sigma))
stop("'delta.over.sigma' must be finite")
if (conf.level <= 0 || conf.level >= 1)
stop("'conf.level' must be between 0 and 1")
pi.type <- match.arg(pi.type)
if (k == m & r == 1 & delta.over.sigma == 0) {
K <- predIntNormK(n = n, df = df, n.mean = n.mean, k = m,
method = "exact", pi.type = pi.type, conf.level = conf.level)
}
else {
fcn.to.min <- function(K, n.weird, df.weird, n.mean,
k.weird, m.weird, r.weird, delta.over.sigma, conf.level,
integrate.args.list) {
(conf.level - pred.int.norm.k.of.m.on.r.prob(n = n.weird,
df = df.weird, n.mean = n.mean, K = K, delta.over.sigma = delta.over.sigma,
k = k.weird, m = m.weird, r = r.weird, integrate.args.list = integrate.args.list))^2
}
K <- nlminb(start = 1, objective = fcn.to.min, lower = 0,
control = list(x.tol = K.tol), n.weird = n, df.weird = df,
n.mean = n.mean, k.weird = k, m.weird = m, r.weird = r,
delta.over.sigma = delta.over.sigma, conf.level = conf.level,
integrate.args.list = integrate.args.list)$par
}
K
}
|
2321423f7490c595412c6a44d796274c8fdbe321
|
ca836882bc56c582c31eeda51b1b33eee1a021aa
|
/R scripts/qPCRs/anova and tukey qPCR.R
|
e8787af57b1369f8d5b37f843e61cfcb29d1d9ef
|
[] |
no_license
|
jucapitanio/Software
|
9ccea783da2c81e3b099fda6cb3fdfed8677e211
|
5ca179d6fb54f743c75222e85196220324255de0
|
refs/heads/master
| 2021-01-19T08:23:15.057621
| 2016-05-16T16:25:14
| 2016-05-16T16:25:14
| 43,160,269
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,522
|
r
|
anova and tukey qPCR.R
|
# ANOVA and Tukey for qPCR data
qPCR <- read.csv("stats.csv")
qPCR <- qPCR[1:9,]
pvals <- vector("list",26)
tuknames <- list(c("NUP98-CO", "RHA-CO", "NUP98-DHX9"), c("nada"))
tukresult <- matrix(data = NA, nrow = 3, ncol = 1, byrow = FALSE, dimnames = tuknames)
for (i in 2:26) {
fitanova <- aov(qPCR[,i] ~ qPCR[,1], data=qPCR)
pvals[[i]][[1]] <- colnames(qPCR)[i]
pvals[[i]][[2]] <- summary(fitanova)[[1]][["Pr(>F)"]][1]
tukfit <- TukeyHSD(fitanova)
tukresult <- cbind(tukresult, tukfit$`qPCR[, 1]`)
}
df <- data.frame(matrix(unlist(pvals), nrow=25, byrow=T),stringsAsFactors=FALSE)
write.csv(df, "pvals.csv")
write.csv(tukresult, "tukeyresults.csv")
# ANOVA and Tukey for RIP-qPCR data
setwd("~/Lab Stuff 2015 2nd sem/Experiments/qPCR RNA-IPs results")
RIP <- read.csv("~/Lab Stuff 2015 2nd sem/Experiments/qPCR RNA-IPs results/for stats r.csv")
fitanova <- aov(RIP[,2] ~ RIP[,1], data=RIP)
summary(fitanova)
tukfit <- TukeyHSD(fitanova)
pvals <- vector("list",6)
tuknames <- list(rownames(tukfit$`RIP[, 1]`), c("nada"))
tukresult <- matrix(data = NA, nrow = 15, ncol = 1, byrow = FALSE, dimnames = tuknames)
for (i in 2:6) {
fitanova <- aov(RIP[,i] ~ RIP[,1], data=RIP)
pvals[[i]][[1]] <- colnames(RIP)[i]
pvals[[i]][[2]] <- summary(fitanova)[[1]][["Pr(>F)"]][1]
tukfit <- TukeyHSD(fitanova)
tukresult <- cbind(tukresult, tukfit$`RIP[, 1]`)
}
df <- data.frame(matrix(unlist(pvals), nrow=11, byrow=T),stringsAsFactors=FALSE)
write.csv(df, "pvals.csv")
write.csv(tukresult, "tukeyresults.csv")
|
b47d9c13e6ffee28317afccae89aa4e9453c82bc
|
3c258c7fe3244f4a41dea7d264098ac614eef19a
|
/man/validateFetchedData.Rd
|
15584672feefb2d08de98f5296f619a8e1ebbdef
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
USGS-R/repgen
|
379be8577f3effbe7067e2f3dc5b5481ca69999e
|
219615189fb054e3b421b6ffba4fdd9777494cfc
|
refs/heads/main
| 2023-04-19T05:51:15.008674
| 2021-04-06T20:29:38
| 2021-04-06T20:29:38
| 31,678,130
| 10
| 25
|
CC0-1.0
| 2023-04-07T23:10:19
| 2015-03-04T20:24:02
|
R
|
UTF-8
|
R
| false
| true
| 1,236
|
rd
|
validateFetchedData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-validation.R
\name{validateFetchedData}
\alias{validateFetchedData}
\title{Validate to ensure it is not null or empty and has all required fields}
\usage{
validateFetchedData(data, name, requiredFields, stopNull = TRUE,
stopMissing = TRUE, stopEmpty = TRUE)
}
\arguments{
\item{data}{the data to check the validity of}
\item{name}{the name to use for the data when logging errors}
\item{requiredFields}{a list of the required fields for this data to be valid}
\item{stopNull}{(optional - default = TRUE) whether or not the function should
throw an error if the data is NULL.}
\item{stopMissing}{(optional - default = TRUE) whether or not the function should
throw an error if the data is missing some required fields.}
\item{stopEmpty}{(optional - default = TRUE) whether or not the function should
throw an error if the data is present but empty.}
}
\description{
Given some data and required fields, will check to ensure
that the supplied data is not null or empty and has all required fields.
Will throw an error if either of these checks fail. Returns TRUE if the
retrieved data is valid with all required fields, returns false otherwise.
}
|
61d7b27860a667806624528a879a9b9b6c14b15e
|
9aae9188b7c460f3ab26dc68a21ac3bce128b1be
|
/part_1/data_cleaning_pt1.R
|
c4644a8d74007f206347657b66a6ad149ef33697
|
[] |
no_license
|
richardsalex/r_cleaning
|
5a4a35644e4b1c9fdd1ccfb4624d3ed7741416e3
|
ce5a78bd2df7e2fcebb9d96b81b7615f232d5954
|
refs/heads/master
| 2020-03-18T05:16:29.863518
| 2018-05-31T23:36:45
| 2018-05-31T23:36:45
| 134,333,973
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,394
|
r
|
data_cleaning_pt1.R
|
###############################################
## Data Cleaning with R (Pt. I) ##
## Alex Richards, NerdWallet (@alexrichards) ##
###############################################
# Much credit for this cleaning exercise goes to Sandhya Kambhampati (@sandhya__k)
# and Caelainn Barr (@caelainnbarr).
###########
## Intro ##
###########
# - What is data cleaning?
# - What do you need to be able to do to clean data?
# - Why R?
###########################
## Install/load packages ##
###########################
# If you don't have them already! In RStudio, check the "packages" pane.
install.packages("tidyverse") # https://www.tidyverse.org/packages/
install.packages("rvest")
# Load them for use.
library(tidyverse)
library(rvest)
# Check/set working directory
#########################
## Getting data into R ##
#########################
# 1. Delimited files
banks <- read_csv("banklist.csv")
# What to do if the readr package guesses the wrong data type?
bank_spec <- cols(
CERT = col_character()
)
# 2. Excel
library(readxl)
same_banks <- read_xlsx('failed_banks.xlsx')
# 3. HTML
# Pull the HTML of a web page into R
# http://espn.go.com/nfl/superbowl/history/winners
sb_page <- read_html('http://espn.go.com/nfl/superbowl/history/winners')
# Let's look at the page in a web browser's developer view.
# Isolate parts of the page that fall within <table> tags
page_tables <- html_nodes(sb_page, 'table')
# Move the table into a data frame
sb_table <- html_table(page_tables[[1]])
############################
## Tweaking the structure ##
############################
# Which rows should be dropped? We can access parts of the data frame by [row, column]
# Pipe it to a View() in RStudio if you want to see how it looks first
sb_table[-(1:2),] %>% View()
# Modify sb_table to drop the necessary rows
sb_table <- sb_table[-(1:2),]
# Check the column names
names(sb_table)
# Update them to be a bit more descriptive
names(sb_table) <- c("numeral", "date", "location", "result")
#######################
## Cleaning the data ##
#######################
# Add a column with Superbowl number as an integer
sb_table$number <- c(1:52)
# Why keep the original?
# Reorder your columns so that "number" appears first (name OR column number works)
sb_table <- sb_table[, c("number", "numeral", "date", "location", "result")]
# R is treating the date as a string; let's fix
# ?as.Date to review formats
sb_table$date <- as.Date(sb_table$date, "%b. %d, %Y")
# Give year its own column using date formats
sb_table$year <- format(sb_table$date, "%Y")
# Let's do the same with month but extract a section of the date
sb_table$month <- substr(sb_table$date, 6, 7)
# Split result into two separate fields based on a delimiter (check first?)
sb_table <- separate(sb_table, result, c("winner", "loser"), sep = ", ", remove = FALSE)
# How to best move scores into their own columns?
# Regular expressions can help when data follows a pattern.
# https://regex101.com/
score_pattern <- "\\d+$"
# Test to see if the regex extraction pattern works
str_extract(sb_table$winner, score_pattern)
# The same pattern can be replaced with an empty string to just have team names
str_replace(sb_table$winner, score_pattern, "")
# We end up with a hanging space, but can add an operation to trim this off
str_replace(sb_table$winner, score_pattern, "") %>% str_trim(side = "right")
# Stitch these steps together to add score columns and remove score from teams
sb_table$winner_score <- str_extract(sb_table$winner, score_pattern)
sb_table$loser_score <- str_extract(sb_table$loser, score_pattern)
sb_table$winner <- str_replace(sb_table$winner, score_pattern, "") %>% str_trim(side = "right")
sb_table$loser <- str_replace(sb_table$loser, score_pattern, "") %>% str_trim(side = "right")
# Switch scores to a numeric type
sb_table$winner_score <- as.numeric(sb_table$winner_score)
sb_table$loser_score <- as.numeric(sb_table$loser_score)
# Did ESPN always put the winners first?
sb_table[sb_table$winner_score <= sb_table$loser_score,]
# Reorder and drop some unnecessary fields
sb_table <- sb_table[, c("number", "numeral", "date", "year", "month", "location", "winner", "winner_score", "loser", "loser_score")]
# Export a CSV copy
write_csv(sb_table, "sb_data.csv")
# What's the advantage in R over a traditional GUI like Excel?
# Notice anything else we could change?
|
c8581aee64eded2a3ed022f4bec9e731820ecc1f
|
edfdfa035aa28d3acffa1a65ca9ea40bb8fd01e2
|
/Get_SPR.R
|
bfd568ebda4a816f19545c963ce5fff493af87c0
|
[] |
no_license
|
chantelwetzel-noaa/Ch3_DataLoss
|
4da7bb569d7b8fd6b2cf7c93de7c6ad54a613273
|
b020669f3f681ca2de8c46b76206bdb27a65dcd0
|
refs/heads/master
| 2021-03-27T14:44:26.747508
| 2019-08-07T22:21:21
| 2019-08-07T22:21:21
| 19,943,691
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,470
|
r
|
Get_SPR.R
|
drive <-"F"
LH <- "flatfish"
SR <- "BH" #"Shep"
nsim <- NSIM <- 1
start.survey <- 1
require(compiler)
#Source in the life-history parameter values
source(paste(drive,":/PhD/Chapter3/code/LH_parameter_values.R",sep=""))
#Source in external functions
source(paste(drive,":/PhD/Chapter3/code/Functions.R",sep=""))
fishery.yrs <- 1000 #add one year for following year analysis
setup.yrs <- fishery.yrs
total.yrs <- fishery.yrs
years <- 1:fishery.yrs
#Arrays----------------------------------------------------------------------------------------------------
age.bins <- seq(1,num.ages,1)
numbers <- array(0,dim=c(total.yrs,ages,sexes)) ; rownames(numbers)<-years
biomass <- array(0,dim=c(total.yrs,ages,sexes))
#Matrix Storage for output to R list
SSB.matrix <- array(NA,dim=c(total.yrs,NSIM))
depl.matrix <- array(NA,dim=c(total.yrs,NSIM))
Ry.matrix <- array(NA,dim=c(total.yrs,NSIM))
rec.matrix <- array(NA,dim=c(total.yrs,NSIM))
vul.total.matrix <- array(NA,dim=c(total.yrs,NSIM))
index.expect.matrix <- array(NA,dim=c(total.yrs,NSIM))
vul.total.obs.matrix <- array(NA,dim=c(total.yrs,NSIM))
f.values.matrix <- array(NA,dim=c((total.yrs-1),NSIM))
fore.catch.matrix <- array(NA,dim=c((total.yrs+1),NSIM))
true.ofl.matrix <- array(NA,dim=c((total.yrs-1),NSIM))
vul.total.obs <- matrix(0, total.yrs, 1)
rownames(vul.total.obs) <- years
index.expect <- matrix(0,total.yrs,1) ; rownames(index.expect) <- years
survey.catch.age.len <- array(NA,c(total.yrs,ages, length(len.step), sexes))
Ry <- matrix(0,total.yrs,1); rownames(Ry)<-years
SSB<-matrix(0,total.yrs,1) ; rownames(SSB)<-years
catch.at.age <- array(NA, dim=c(total.yrs, ages, sexes)) ; rownames(catch.at.age) <- years
catch.at.len <- array(NA, dim=c(total.yrs, length(len.step), sexes))
f.values <- rep(0,(total.yrs-1))
catch.wght.values <- rep(0,(total.yrs-1))
z.rate <- array(NA,dim = c(total.yrs, ages, sexes)) ; rownames(z.rate) <- years
R0 = 2000 ; steep = 1
#Recruits Spawning biomass Vulnerable biomas---------------------------------------------------
Update_Dynamics <- function(f, biology)
{
UpdateDyn <- list()
#Virgin Population Structure ----------------------------------------------------------------------------------------------------
Ry[1] <- R0 / 2
numbers[1,1:(ages-1),] <- (R0 / 2)* exp(-m * (0:(ages-2)))
numbers[1,ages,] <- numbers[1,ages-1,] * exp( -m ) / (1 - exp(-m))
catch.wght <- numeric(fishery.yrs)
#Virgin Biomass By Age
SSB0 <- SSB[1] <- sum(numbers[1,,1] * biology$fecund)
for(y in 1:(fishery.yrs -1)) {
z.m <- (1 - exp(-(m + biology$selec.age.m * f))) / (m + biology$selec.age.m * f)
z.f <- (1 - exp(-(m + biology$selec.age.f * f))) / (m + biology$selec.age.f * f)
#Catch at Age
catch.at.age.f <- f * (numbers[y,,1] * biology$selec.age.f) * z.f
catch.at.age.m <- f * (numbers[y,,2] * biology$selec.age.m) * z.m
#Catch At Length
mid.temp.f <- numbers[y,,1] * z.f
mid.temp.m <- numbers[y,,2] * z.m
catch.at.len.f <- ((biology$mid.phi.f * biology$selec[,1]) %*% (mid.temp.f))
catch.at.len.m <- ((biology$mid.phi.m * biology$selec[,2]) %*% (mid.temp.m))
#Catch in Weight by Sex, mid.wght (41X2) calculated in the GetWght() function
catch.wght[y] <- f * (sum(biology$mid.wght.at.len[,1] * catch.at.len.f) +
sum(biology$mid.wght.at.len[,2] * catch.at.len.m))
# survival at age by gender
S.f <- exp(-(m + biology$selec.age.f * f))
S.m <- exp(-(m + biology$selec.age.m * f))
#Update the numbers and remove the catch by applying the solved for f value
numbers[y+1, 2:ages, 1] <- numbers[y, 1:(ages-1), 1] * S.f[1:(ages-1)]
numbers[y+1, 2:ages, 2] <- numbers[y, 1:(ages-1), 2] * S.m[1:(ages-1)]
numbers[y+1, ages, 1] <- numbers[y+1, ages, 1] + numbers[y, ages, 1] * exp(-m - biology$selec.age.f[ages] * f)
numbers[y+1, ages, 2] <- numbers[y+1, ages, 2] + numbers[y, ages, 2] * exp(-m - biology$selec.age.m[ages] * f)
SSB[y+1] <- sum(numbers[y+1, 2:ages, 1] * biology$fecund[2:ages])
#Expected (and then realized) recruitment
Ry[y+1] <- (4 * steep * ( R0 / 2 ) * SSB[y+1]) / (SSB0 * (1 - steep) + SSB[y+1] * (5 * steep - 1))
numbers[y+1,1,] <- Ry[y+1]
} #closes yearly loop
UpdateDyn[[1]] <- NULL#f.values
UpdateDyn[[2]] <- NULL#z.rate
UpdateDyn[[3]] <- catch.wght
UpdateDyn[[4]] <- catch.at.age
UpdateDyn[[5]] <- catch.at.len
UpdateDyn[[6]] <- numbers
UpdateDyn[[7]] <- SSB
UpdateDyn[[8]] <- Ry
names(UpdateDyn) <- c("f.values","z.rate","catch.wght.values","catch.at.age","catch.at.len","numbers","SSB","Ry")
return(UpdateDyn)
}
if (LH == "flatfish") {
f<-uniroot(function(f)(Update_Dynamics(f, biology=Get_Biology())$SSB[fishery.yrs]/
Update_Dynamics(f, biology=Get_Biology())$SSB[1])-0.30,lower=0,upper=4,tol=0.0001) }
if (LH == "rockfish") {
f<-uniroot(function(f)(Update_Dynamics(f, biology=Get_Biology())$SSB[fishery.yrs]/
Update_Dynamics(f,biology=Get_Biology())$SSB[1])-0.50,lower=0,upper=4,tol=0.0001) }
out = Update_Dynamics(f$root, biology=Get_Biology())
steep = 0.60
f.values <- seq(0,0.40,0.01)
catch <- numeric(length(f.values))
for (i in 1:length(f.values)) {
catch[i] <- Update_Dynamics(f.values[i], biology=Get_Biology())$catch.wght[fishery.yrs-1]
}
plot(f.values,catch)
|
5cd4da85d397eb97e016ed61412f121a899769f7
|
6222f8aba75f7adbb2793a488e4a28b704cfab23
|
/cvirus2.R
|
33ea6e89c8d94de9115e0c8cc110c039cb90f11d
|
[] |
no_license
|
anovitt/C_Virus
|
5816983ab930bfe1317337574cedce26902d5fd9
|
5bdc5d42fa22e906fd2e3aa3e3a735e3684061fc
|
refs/heads/master
| 2021-07-12T22:59:00.405937
| 2021-03-21T11:13:33
| 2021-03-21T11:13:33
| 242,705,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70,199
|
r
|
cvirus2.R
|
library(data.table)
library(ggplot2)
library(lubridate)
library(leaflet)
library(tidyverse)
library(deSolve)
library(directlabels)
# Load data
source("R/2019-coronavirus-dataset-01212020-01262020/cvirusDataLoad.R")
#####
# Plot by all states confirmed cases
#####
notIn <- c('Princess', 'Guam', 'Samoa', 'Islands','Rico')
datUS %>%
filter(!grepl(paste(notIn, collapse="|"),Province_State)) %>%
mutate(Existing = Confirmed - Deaths - Recovered) %>%
arrange(Province_State) %>%
ggplot(aes(y=Province_State,x=Confirmed)) +
geom_line() +
geom_point(shape = 1)
datUS %>%
filter(!grepl(paste(notIn, collapse="|"),Province_State)) %>%
mutate(Existing = Confirmed - Deaths - Recovered) %>%
arrange(Province_State) %>%
ggplot(aes(y=Province_State,x=Deaths)) +
geom_line() +
geom_point(shape = 1)
# nomalized by state population
datUS %>%
filter(!grepl(paste(notIn, collapse="|"),Province_State)) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(conf_per_million = (Confirmed/Pop)*1000000,
deaths_per_million = (Deaths/Pop)*1000000) %>%
ggplot(aes(y=Province_State,x=conf_per_million)) +
geom_line() +
geom_point(shape = 1)
top_125_Country =
dat %>%
filter(Last_Update == max(Last_Update)) %>%
filter(!grepl('US',`Country_Region`) & !grepl('Other',`Country_Region`) |
`Country_Region` == 'US' & !grepl('County',`Province_State`) & !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
group_by(`Country_Region`) %>%
summarise(Confirmed = sum(Confirmed),
Deaths = sum(Deaths),
Recovered = sum(Recovered),
Existing = sum(Existing)) %>%
top_n(125,Confirmed) %>%
pull(`Country_Region`)
dat %>%
filter(Country_Region %in% top_125_Country & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update > '2020-03-15') %>%
group_by(`Country_Region`, Last_Update) %>%
summarise(Confirmed = sum(Confirmed),
Deaths = sum(Deaths),
Recovered = sum(Recovered),
Existing = sum(Existing)) %>%
arrange(Country_Region) %>%
ggplot(aes(y=Country_Region,x=Confirmed)) +
geom_line() +
geom_point(shape = 1) +
coord_flip()+
theme(axis.text.x = element_text(angle = 90))
# US testing rate normalized vs. confirmed cases
rateLines <- data.table(slope = c(2,5,10,20,50,100),intercept = c(0,0,0,0,0,0))
# Confirmed per million vs. test per million, rolling seven day average.
datUS %>%
filter(!grepl(paste(notIn, collapse="|"),Province_State)) %>%
mutate(Last_Update = date(Last_Update)) %>%
group_by(Province_State) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Test = People_Tested - lag(People_Tested,default = 0)) %>%
mutate(Rate_Confirmed = zoo::rollapply(Rate_Confirmed,5,mean,align='right',fill=0),
Rate_Test = zoo::rollapply(Rate_Test,5,mean,align='right',fill=0)) %>%
select(Province_State,Last_Update,Confirmed,People_Tested,Rate_Confirmed,Rate_Test) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(conf_per_million = (Rate_Confirmed/Pop)*1000000,
test_per_million = (Rate_Test/Pop)*1000000) %>%
filter(Last_Update == Sys.Date()-1 |Last_Update == '2020-06-01' |Last_Update == '2020-05-01' |
Last_Update == '2020-07-01' | Last_Update =='2020-08-01'| Last_Update =='2020-09-01'| Last_Update =='2020-10-01',
test_per_million >0 & test_per_million <40000,
conf_per_million >= 0) %>%
ggplot(aes(x=conf_per_million,y=test_per_million,color=Province_State)) +
geom_point() +
geom_text(aes(x=conf_per_million,y=test_per_million,label=Province_State),nudge_x = 5, nudge_y = 75) +
geom_abline(data = rateLines,aes(slope = slope,intercept = intercept),linetype = 2, color = 'grey') +
facet_grid( .~Last_Update) +
theme(legend.position = "none")
datUS %>%
filter(!grepl(paste(notIn, collapse="|"),Province_State)) %>%
mutate(Last_Update = date(Last_Update)) %>%
group_by(Province_State) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Test = People_Tested - lag(People_Tested,default = 0)) %>%
mutate(Rate_Confirmed = zoo::rollapply(Rate_Confirmed,5,mean,align='right',fill=0),
Rate_Test = zoo::rollapply(Rate_Test,5,mean,align='right',fill=0)) %>%
select(Province_State,Last_Update,Confirmed,People_Tested,Rate_Confirmed,Rate_Test) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(conf_per_million = (Rate_Confirmed/Pop)*1000000,
test_per_million = (Rate_Test/Pop)*1000000) %>%
filter(Last_Update == Sys.Date()-1,
test_per_million >0 & test_per_million <40000,
conf_per_million >= 0) %>%
ggplot(aes(x=conf_per_million,y=test_per_million,color=Province_State)) +
geom_point() +
geom_text(aes(x=conf_per_million,y=test_per_million,label=Province_State),nudge_x = 5, nudge_y = 75) +
scale_y_continuous(limits = c(0,10000)) +
geom_abline(data = rateLines,aes(slope = slope,intercept = intercept),linetype = 2, color = 'grey') +
facet_grid( .~Last_Update) +
theme(legend.position = "none")
In <- c('Texas', 'Florida', 'Alabama', 'Georgia','California','Louisiana','Mississippi')
datUS %>%
filter(grepl(paste(In, collapse="|"),Province_State)) %>%
mutate(Last_Update = date(Last_Update)) %>%
group_by(Province_State) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Test = People_Tested - lag(People_Tested,default = 0)) %>%
mutate(Rate_Confirmed = zoo::rollapply(Rate_Confirmed,5,mean,align='right',fill=0),
Rate_Test = zoo::rollapply(Rate_Test,5,mean,align='right',fill=0)) %>%
select(Province_State,Last_Update,Confirmed,People_Tested,Rate_Confirmed,Rate_Test) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(conf_per_million = (Rate_Confirmed/Pop)*1000000,
test_per_million = (Rate_Test/Pop)*1000000) %>%
filter(Last_Update == Sys.Date()-1 |Last_Update == '2020-06-01' |Last_Update == '2020-05-01' |
Last_Update == '2020-07-01' | Last_Update =='2020-08-01'| Last_Update =='2020-09-01'|
Last_Update =='2020-10-01',
test_per_million >0) %>%
ggplot(aes(x=conf_per_million,y=test_per_million,color=Province_State)) +
geom_point() +
geom_text(aes(x=conf_per_million,y=test_per_million,label=Province_State),nudge_x = 5, nudge_y = 75) +
geom_abline(data = rateLines,aes(slope = slope,intercept = intercept),linetype = 2, color = 'grey') +
facet_grid( .~Last_Update) +
theme(legend.position = "none")
### Infection rate plots
dat %>%
filter(`Country_Region` == 'US' & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1)) &
Last_Update > '2020-03-09') %>%
group_by( Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'USA')%>%
ggplot(aes(x = Last_Update))+
geom_line(aes(y=Rate_Confirmed, color = "Infections_per_day"))+
geom_line(aes(y=Rate_Deaths, color = "Deaths_per_day"))+
geom_point(size = I(3), shape = 1, aes(y=Rate_Confirmed, color = "Infections_per_day"))+
geom_point(size = I(3), shape = 1,aes(y=Rate_Deaths, color = "Deaths_per_day"))+
facet_grid(.~Country_Region) +
ylab(label="Count")+
xlab(label="Date")+
theme(legend.justification=c(1,0), legend.position=c(0.25,0.75))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=0.5,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.25,
linetype="solid"))+
scale_colour_manual("Compartments",
breaks=c("Infections_per_day","Existing","Recovered","Deaths_per_day"),
values=c("green","red","blue","black"))+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = paste("United States Of America",sep =" "))
dat %>%
filter(`Country_Region` == 'India' &
Last_Update > '2020-03-09') %>%
group_by( Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Country_Region = 'India')%>%
mutate(Roll_Rate_Confirmed = zoo::rollmean(Rate_Confirmed,7,mean,align='right',fill=Rate_Confirmed)) %>%
ggplot(aes(x = Last_Update))+
geom_line(aes(y=Rate_Confirmed, color = "Infections_per_day"))+
geom_line(aes(y=Roll_Rate_Confirmed, color = "Rolling_Infrections_per_day"))+
geom_point(size = I(3), shape = 1, aes(y=Rate_Confirmed, color = "Infections_per_day"))+
#geom_point(size = I(3), shape = 1,aes(y=Roll_Rate_Confirmed, color = "Rolling_Infrections_per_day"))+
facet_grid(.~Country_Region) +
ylab(label="Count")+
xlab(label="Date")+
theme(legend.justification=c(1,0), legend.position=c(0.25,0.75))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=0.5,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.25,
linetype="solid"))+
scale_colour_manual("Compartments",
breaks=c("Infections_per_day","Existing","Recovered","Rolling_Infrections_per_day"),
values=c("green","red","blue","black"))+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = paste("India",sep =" "))
# Death rate plot
dat %>%
filter(`Country_Region` == 'US' & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1)) &
Last_Update > '2020-03-09') %>%
group_by( Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'USA')%>%
ggplot(aes(x = Last_Update))+
#geom_line(aes(y=Rate_Confirmed, color = "Infections_per_day"))+
geom_line(aes(y=Rate_Deaths, color = "Deaths_per_day"))+
#geom_point(size = I(3), shape = 1, aes(y=Rate_Confirmed, color = "Infections_per_day"))+
geom_point(size = I(3), shape = 1,aes(y=Rate_Deaths, color = "Deaths_per_day"))+
facet_grid(.~Country_Region) +
ylab(label="Count")+
xlab(label="Date")+
theme(legend.justification=c(1,0), legend.position=c(0.25,0.75))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=0.5,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.25,
linetype="solid"))+
scale_colour_manual("Compartments",
breaks=c("Infections_per_day","Existing","Recovered","Deaths_per_day"),
values=c("green","red","blue","black"))+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = paste("United States Of America",sep =" "))
# Plots of various countries and states
plotDat('US','California')
plotDatPhase('US','California')
plotDat('US','New Jersey')
plotDatPhase('US','New Jersey')
plotDat('US','New York')
plotDatPhase('US','New York')
plotDat('US','Michigan')
plotDatPhase('US','Michigan')
plotDat('US','Ohio')
plotDatPhase('US','Ohio')
plotDat('US', 'Washington')
plotDatPhase('US','Washington')
plotDat('US', 'Illinois')
plotDatPhase('US','Illinois')
plotDat('US', 'Louisiana')
plotDatPhase('US', 'Louisiana')
plotDat('US', 'Georgia')
plotDatPhase('US','Georgia')
plotDat('US', 'Texas')
plotDatPhase('US','Texas')
plotDat('US', 'Colorado')
plotDatPhase('US','Colorado')
plotDat('US', 'Florida')
plotDatPhase('US', 'Florida')
plotDat('US', 'Alabama')
plotDatPhase('US', 'Alabama')
plotDat('US', 'Mississippi')
plotDatPhase('US', 'Mississippi')
plotDat('US', 'Indiana')
plotDatPhase('US', 'Indiana')
plotDat('US', 'Wisconsin')
plotDatPhase('US', 'Wisconsin')
plotDat('US', 'Arkansas')
plotDatPhase('US', 'Arkansas')
plotDat('US', 'Arizona')
plotDatPhase('US', 'Arizona')
plotDat('US', 'North Carolina')
plotDatPhase('US', 'North Carolina')
plotDat('US', 'South Carolina')
plotDatPhase('US', 'South Carolina')
plotDat('US', 'North Dakota')
plotDatPhase('US', 'North Dakota')
plotDat('US', 'South Dakota')
plotDatPhase('US', 'South Dakota')
# USA
plotDatContryUSA()
#China
plotDatContry2('China')
plotDat('China','Shanghai')
plotDatPhase('China','Shanghai')
plotDat('China','Beijing')
plotDatPhase('China','Beijing')
plotDat('Korea','')
plotDatPhase('Korea','')
plotDat('Iceland','')
plotDatPhase('Iceland','')
plotDatContry2('Sweden')
plotDatPhaseCountry('Sweden','')
plotDatContry2('Canada')
plotDatPhaseCountry('Canada','')
plotDatContry2('Mexico')
plotDatPhaseCountry('Mexico','')
plotDatContry2('Brazil')
plotDatPhaseCountry('Brazil','')
plotDatContry2('Germany')
plotDatPhaseCountry('Germany','')
plotDatContry2('Spain')
plotDatPhaseCountry('Spain','')
plotDatContry2('France')
plotDatPhaseCountry('France','')
plotDatContry('New Zealand')
plotDatPhase('New Zealand','')
plotDatContry2('Philippines')
plotDatPhaseCountry('Philippines','')
# Hubei
plotDat('China','Hubei')
# Japan
plotDatContry2('Japan')
plotDatPhase('Japan','')
# Italy
plotDatContry2('Italy')
plotDatPhase('Italy','')
# Morocco
plotDatContry2('Morocco')
plotDatContry2('India')
plotDatPhase('India','')
# UK
plotDatContry2('United Kingdom')
# Austrilia
plotDatContry2('Australia')
plotDatPhase('Australia','')
# plot of top 10 states and provinces
# get the names of the 10 provinces
top_10 =
dat %>%
filter(grepl('China',`Country_Region`) ) %>%
filter(Last_Update == max(Last_Update)) %>%
top_n(10,Confirmed) %>%
pull(`Province_State`)
dat%>%
filter(grepl('China',`Country_Region`) & `Province_State` %in% top_10) %>%
ggplot(aes(x = Last_Update, y= Confirmed, color = `Province_State`))+
geom_line()+
geom_point(size = I(3), shape = 1)+
ylab(label="Count")+
xlab(label="Date")+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = "Top 10 Confirmed Cases By Province, China")
# top 10 US states
top_10_US_State =
dat %>%
filter(`Country_Region` == 'US'& !grepl('County',`Province_State`)& !grepl('Princess',`Province_State`)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update == max(Last_Update)) %>%
#group_by(`Province_State`) %>%
#filter(grepl('US',`Country/Region`) & !grepl('County',`Province/State`) )%>%
top_n(10,Confirmed) %>%
pull(`Province_State`)
dat %>%
filter(grepl('US',`Country_Region`) & `Province_State` %in% top_10_US_State & !grepl('County',`Province_State`) & !grepl('Princess',`Province_State`)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update > '2020-03-09') %>%
ggplot(aes(x = Last_Update, y= Confirmed, color = `Province_State`))+
geom_line()+
geom_point(size = I(3), shape = 1)+
geom_dl(aes(label = Province_State), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8)) +
ylab(label="Count")+
xlab(label="Date")+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = "Top 10 Confirmed Cases By State, United States")
dat %>%
filter(grepl('US',`Country_Region`) & `Province_State` %in% top_10_US_State & !grepl('County',`Province_State`) & !grepl('Princess',`Province_State`)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update > '2020-03-09') %>%
ggplot(aes(x = Last_Update, y= Existing, color = `Province_State`))+
geom_line()+
geom_point(size = I(3), shape = 1)+
geom_dl(aes(label = Province_State), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8)) +
ylab(label="Count")+
xlab(label="Date")+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = "Top 10 Existing Cases By State, United States")
# top 10 US states
top_10_US_Deaths =
dat %>%
filter(`Country_Region` == 'US'& !grepl('County',`Province_State`)& !grepl('Princess',`Province_State`)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update == max(Last_Update)) %>%
#group_by(`Province_State`) %>%
#filter(grepl('US',`Country/Region`) & !grepl('County',`Province/State`) )%>%
top_n(10,Deaths) %>%
pull(`Province_State`)
dat %>%
filter(grepl('US',`Country_Region`) & `Province_State` %in% top_10_US_Deaths & !grepl('County',`Province_State`) & !grepl('Princess',`Province_State`)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update > '2020-03-16') %>%
ggplot(aes(x = Last_Update, y= Deaths, color = `Province_State`))+
geom_line()+
geom_point(size = I(3), shape = 1)+
geom_dl(aes(label = Province_State), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8)) +
ylab(label="Count")+
xlab(label="Date")+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = "Top 10 Deaths Cases By State, United States")
# Shanghai and Beijing only
dat %>%
filter(grepl('China',`Country_Region`) & `Province_State` %in% c('Shanghai','Beijing')) %>%
group_by(`Province_State`) %>%
ggplot(aes(x = Last_Update, y= Confirmed, color = `Province_State`))+
geom_line()+
geom_point(size = I(3), shape = 1)+
ylab(label="Count")+
xlab(label="Date")+
labs(title = "Wuhan Coronavirus(2019-nCoV)",
subtitle = "Confirmed Cases Shanghai and Beijing")
dat %>%
filter(grepl('China',`Country_Region`) & `Province_State` %in% c('Shanghai','Beijing')) %>%
group_by(`Province_State`) %>%
ggplot(aes(x = Last_Update, y= Existing, color = `Province_State`))+
geom_line()+
geom_point(size = I(3), shape = 1)+
ylab(label="Count")+
xlab(label="Date")+
labs(title = "Wuhan Coronavirus(2019-nCoV)",
subtitle = "Confirmed Cases Shanghai and Beijing")
#USA Brazil India
rbind(
dat %>%
filter(Country_Region == 'Brazil',
Last_Update > '2020-04-01') %>%
group_by(Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'Brazil'),
dat %>%
filter(Country_Region == 'India',
Last_Update > '2020-04-01') %>%
group_by(Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'India'),
dat %>%
filter(`Country_Region` == 'US' & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1)) &
Last_Update > '2020-03-09') %>%
group_by( Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'USA')
) %>%
ggplot(aes(x = Last_Update))+
geom_line(aes(y=Confirmed, color = "Infected"))+
geom_line(aes(y=Existing, color = "Existing"))+
geom_line(aes(y=Recovered, color = "Recovered"))+
geom_line(aes(y=Deaths, color = "Deaths"))+
geom_point(size = I(3), shape = 1, aes(y=Confirmed, color = "Infected"))+
geom_point(size = I(3), shape = 1,aes(y=Existing, color = "Existing"))+
geom_point(size = I(3), shape = 1,aes(y=Recovered, color = "Recovered"))+
geom_point(size = I(3), shape = 1,aes(y=Deaths, color = "Deaths"))+
facet_grid(.~Country_Region) +
ylab(label="Count")+
xlab(label="Date")+
theme(legend.justification=c(1,0), legend.position=c(0.25,0.75))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=0.5,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.25,
linetype="solid"))+
scale_colour_manual("Compartments",
breaks=c("Infected","Existing","Recovered","Deaths"),
values=c("green","red","blue","black"))+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = paste("United States Of America, India, Brazil",sep =" "))
# Look only at deaths
dat %>%
filter(`Country_Region` == 'US' & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1)) &
Last_Update > '2020-03-09') %>%
group_by( Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'USA') %>%
ggplot(aes(x = Last_Update))+
#geom_line(aes(y=Confirmed, color = "Infected"))+
#geom_line(aes(y=Existing, color = "Existing"))+
#geom_line(aes(y=Recovered, color = "Recovered"))+
geom_line(aes(y=Deaths, color = "Deaths"))+
#geom_point(size = I(3), shape = 1, aes(y=Confirmed, color = "Infected"))+
#geom_point(size = I(3), shape = 1,aes(y=Existing, color = "Existing"))+
#geom_point(size = I(3), shape = 1,aes(y=Recovered, color = "Recovered"))+
geom_point(size = I(3), shape = 1,aes(y=Deaths, color = "Deaths"))+
facet_grid(.~Country_Region) +
ylab(label="Count")+
xlab(label="Date")+
theme(legend.justification=c(1,0), legend.position=c(0.25,0.75))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=0.5,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.25,
linetype="solid"))+
scale_colour_manual("Compartments",
breaks=c("Infected","Existing","Recovered","Deaths"),
values=c("green","red","blue","black"))+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = paste("United States Of America",sep =" "))
rbind(
dat %>%
filter(Country_Region == 'Brazil',
Last_Update > '2020-04-01') %>%
group_by(Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'Brazil'),
dat %>%
filter(`Country_Region` == 'US' & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1)) &
Last_Update > '2020-03-09') %>%
group_by( Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'USA'),
dat %>%
filter(Country_Region == 'India',
Last_Update > '2020-04-01') %>%
group_by(Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'India')
) %>%
ggplot(aes(x = Last_Update))+
geom_line(aes(y=Rate_Confirmed, color = "Infections_per_day"))+
geom_line(aes(y=Rate_Deaths, color = "Deaths_per_day"))+
geom_point(size = I(3), shape = 1, aes(y=Rate_Confirmed, color = "Infections_per_day"))+
geom_point(size = I(3), shape = 1,aes(y=Rate_Deaths, color = "Deaths_per_day"))+
facet_grid(.~Country_Region) +
ylab(label="Count")+
xlab(label="Date")+
theme(legend.justification=c(1,0), legend.position=c(0.25,0.75))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=0.5,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.25,
linetype="solid"))+
scale_colour_manual("Compartments",
breaks=c("Infections_per_day","Existing","Recovered","Deaths_per_day"),
values=c("green","red","blue","black"))+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = paste("United States Of America",sep =" "))
# plot top 10 country's confirmed .
top_20_Country =
dat %>%
filter(Last_Update == max(Last_Update)) %>%
filter(!grepl('US',`Country_Region`) & !grepl('Other',`Country_Region`) |
`Country_Region` == 'US' & !grepl('County',`Province_State`) & !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
group_by(`Country_Region`) %>%
summarise(Confirmed = sum(Confirmed),
Deaths = sum(Deaths),
Recovered = sum(Recovered),
Existing = sum(Existing)) %>%
top_n(20,Confirmed) %>%
pull(`Country_Region`)
dat %>%
filter(Country_Region %in% top_20_Country & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update > '2020-03-15') %>%
group_by(`Country_Region`, Last_Update) %>%
summarise(Confirmed = sum(Confirmed),
Deaths = sum(Deaths),
Recovered = sum(Recovered),
Existing = sum(Existing)) %>%
ggplot(aes(x = Last_Update, color = Country_Region))+
geom_line(aes(y= Confirmed)) +
geom_point(size = I(3), shape = 1,aes(y= Confirmed))+
geom_dl(aes(label = Country_Region,y=Confirmed), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8)) +
ylab(label="Count")+
xlab(label="Date")+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = "Top 10 Confirmed Cases ")
# plot top 10 existing cases
top_10_Country_Existing =
dat %>%
filter(Last_Update == max(Last_Update)) %>%
filter(!grepl('US',`Country_Region`) & !grepl('Other',`Country_Region`) |
`Country_Region` == 'US' & !grepl('County',`Province_State`) & !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
group_by(`Country_Region`) %>%
summarise(Confirmed = sum(Confirmed),
Deaths = sum(Deaths),
Recovered = sum(Recovered),
Existing = sum(Existing)) %>%
top_n(10,Existing) %>%
pull(`Country_Region`)
dat %>%
filter(Country_Region %in% top_10_Country_Existing & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update > '2020-03-15') %>%
group_by(`Country_Region`, Last_Update) %>%
summarise(Confirmed = sum(Confirmed),
Deaths = sum(Deaths),
Recovered = sum(Recovered),
Existing = sum(Existing)) %>%
ggplot(aes(x = Last_Update, color = Country_Region))+
geom_line(aes(y= Existing)) +
geom_point(size = I(2), shape = 1,aes(y= Existing))+
geom_dl(aes(label = Country_Region, y=Existing), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8)) +
ylab(label="Count")+
xlab(label="Date")+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = "Top 10 Existing Cases ")
###
# Plot total case Conavirus vs. flu.
datCVirFlu <-
dat %>%
group_by(`Last_Update`) %>%
summarise(Confirmed = sum(Confirmed),
Deaths = sum(Deaths),
Recovered = sum(Recovered),
Existing = sum(Existing)) %>%
mutate(numDays = as.duration(lag(Last_Update,1)%--%Last_Update)) %>%
mutate(numDays = ifelse(is.na(numDays),0,numDays/86400)) %>%
mutate(numDays = cumsum(numDays))
datFluAlla <-
datFluAll %>%
select(Confirmed = cSumPositive,numDays) %>%
mutate(type = rep("Flu (A and B) ",times = nrow(datFluAll)))
datCVirFlu %>%
select(Confirmed,numDays) %>%
mutate(type = rep("Coronavirus",times = nrow(datCVirFlu))) %>%
rbind(datFluAlla) %>%
ggplot(aes(x = numDays,y=Confirmed,color=type))+
geom_line()+
geom_point(size = I(3), shape = 1)+
ylab(label="Count")+
xlab(label="Date")+
theme(legend.justification=c(1,0), legend.position=c(0.25,0.75))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=0.5,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.25,
linetype="solid"))+
labs(title = "Coronavirus(2019-nCoV)",
subtitle = "Global")
######
# lat lon for the provinces
provinceCoord <- data.table(Province = c('Shanghai',
'Beijing','Guangdong','Hubei','Tianjin','Chongqing',
'Liaoning','Sichuan','Jiangsu','Guizhou','Heilongjiang',
'Jilin','Zhejiang','Yunnan','Shanxi','Shandong',
'Fujian','Hunan','Gansu','Hebei','Guangxi',
'Xinjiang','Hainan','Anhui','Inner Mongolia',
'Qinghai','Ningxia','Tibet','Shaanxi','Henan','Jiangxi'
),
lng = c(121.458056, 116.388869,113.25,114.266667,117.176667,106.552778,123.432778,
104.066667,118.777778,106.716667,126.65,125.322778,120.161419,102.718333,
112.560278,116.997222,119.306111,112.966667,103.839868,114.478611,108.316667,
87.630506,110.341667,117.280833,111.652222,101.75739,106.273056,91.1,108.928611,
113.648611, 115.883333),
lat = c(31.222222, 39.928819,23.116667, 30.583333,39.142222,29.562778,41.792222,30.666667,
32.061667,26.583333,45.75,43.88,30.29365,25.038889,37.869444,36.668333,26.061389,
28.2,36.057006,38.041389,22.816667,43.807347,20.045833,31.863889, 40.810556,
36.625541,38.468056,29.65,34.258333,34.757778,28.683333))
dat %>%
filter(grepl('China',`Country_Region`)) %>%
group_by(`Province_State`) %>%
filter(`Last_Update` == max(`Last_Update`)) %>%
#left_join(provinceCoord, by = c('Province_State' = 'Province')) %>%
select(`Province_State`,lng,lat,Confirmed) %>%
leaflet() %>%
addTiles() %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~log(Confirmed) * 20000,
popup = ~`Province_State`,
label = ~Confirmed) %>%
addPopups(lng=121.4, lat=50, paste(sep = "<br/>", "Scroll over the circle to see the confirmed count","Click the circle to see the provice name"),
options = popupOptions(closeButton = FALSE)) %>%
addPopups(lng=90, lat=50, paste(sep = "<br/>", "<b>Coronavirus(2019-nCoV</b>","Confirmed Infection Counts By Province",max(dat$`Last Update`)),
options = popupOptions(closeButton = FALSE))
## existing cases map
dat %>%
filter(grepl('China',`Country_Region`)) %>%
group_by(`Province_State`) %>%
filter(`Last_Update` == max(`Last_Update`)) %>%
#left_join(provinceCoord, by = c('Province_State' = 'Province')) %>%
select(`Province_State`,lng,lat,Existing) %>%
leaflet() %>%
addTiles() %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~log(Existing) * 20000,
popup = ~`Province_State`,
label = ~Existing) %>%
addPopups(lng=121.4, lat=50, paste(sep = "<br/>", "Scroll over the circle to see the existing count","Click the circle to see the provice name"),
options = popupOptions(closeButton = FALSE)) %>%
addPopups(lng=90, lat=50, paste(sep = "<br/>", "<b>Coronavirus(2019-nCoV</b>","Existing Infection Counts By Province",max(dat$`Last Update`)),
options = popupOptions(closeButton = FALSE))
######### SEIR model to actual data
## Model 1 returns a plot of the cumlative model results to match the John Hopkin's University data.
## Model 2 will take the John Hopkin's data, get a cases per day.
SEIR.model.incubation.pop.cumsum.US <- function(t, b, g, c ,
population = 100000, infect = 100, exposed = 1000,
country = 'US', province = 'New York' ,
start_day = '2020-01-22'){
init <- c(S=(population-infect-exposed)/population,I=infect/population,R=0, E=exposed/population)
parameters <- c(bet=b,gamm=g,eta=c)
time <- seq(0,t,by=t/(1*length(1:t)))
eqn <- function(time,state,parameters){
with(as.list(c(state,parameters)),{
dS <- -bet*S*I
dI <- eta*E-gamm*I
dR <- gamm*I
dE <- bet*S*I -eta*E
return(list(c(dS,dI,dR,dE)))})}
out<-ode(y=init,times=time,eqn,parms=parameters)
out.dt<-as.data.table(out)
out.dt[,Suspected := S*population]
out.dt[,Infeceted := I*population]
out.dt[,Recovered := R*population]
out.dt[,Exposed := E*population]
out.dt[,Population := Suspected + Infeceted + Recovered + Exposed]
datAct <-
dat %>%
filter(grepl(country,`Country_Region`) & grepl(province,`Province_State`)) %>%
#filter(Country_Region == 'US' & Province_State == 'Michigan') %>%
group_by(`Province_State`) %>%
filter(Last_Update > start_day) %>%
mutate(Date = ymd(Last_Update)) %>%
arrange(Date) %>%
mutate(numDays = as.numeric(c(diff(Date),1))) %>%
as.data.table() %>%
mutate(numDays = cumsum(numDays)) %>%
select(Confirmed,Recovered,numDays) %>%
as.data.table()
datAct =
datAct %>%
mutate(type = rep("Coronavirus_Data",times = nrow(datAct)),
Suspected = rep(0,times = nrow(datAct)),
Exposed = rep(0,times = nrow(datAct)),
Population = rep(population,times = nrow(datAct)),
Confirmed_sum = Confirmed,
Recovered_sum = Recovered,
Exposed_sum = Exposed) %>%
as.data.table()
out.dt =
out.dt %>%
mutate(Confirmed_sum = Population - Suspected - Exposed) %>%
mutate(type = rep('Model',times = nrow(out.dt)),
Recovered_sum = Recovered,
Exposed_sum = Exposed) %>%
select(numDays = time, Suspected, Confirmed = Infeceted ,Recovered, Exposed, Population, type, Confirmed_sum, Recovered_sum,Exposed_sum ) %>%
rbind(datAct)
print(tail(out.dt))
title <- paste("Coronavirus(2019-nCoV) SEIR Model: Basic vs. Actual Data",country,province,sep=" ")
subtit <- bquote(list(beta==.(parameters[1]),~gamma==.(round(parameters[2],3)),~eta==.(round(parameters[3],3))))
res<-ggplot(out.dt,aes(x=numDays))+
ggtitle(bquote(atop(bold(.(title)),atop(bold(.(subtit))))))+
geom_line(size = I(1), aes(y=Confirmed_sum,colour="Confirmed"))+
geom_line(aes(y=Recovered_sum,colour="Recovered"))+
geom_line(aes(y=Exposed_sum,colour="Incubation"))+
geom_line(aes(y=Confirmed,colour="Infected"))+
ylab(label="Count")+
xlab(label="Time (days)")+
facet_grid(type~.)+
theme(legend.justification=c(1,0), legend.position=c(.125,0.75))+
theme(legend.title=element_text(size=12,face="bold"),
legend.background = element_rect(fill='#FFFFFF',
size=0.5,linetype="solid"),
legend.text=element_text(size=10),
legend.key=element_rect(colour="#FFFFFF",
fill='#C2C2C2',
size=0.25,
linetype="solid"))+
scale_colour_manual("Compartments",
breaks=c("Susceptible","Confirmed","Recovered","Incubation","Infected"),
values=c("blue","red","green","black","orange"))
print(res)
return(out.dt)
}
# beta is the number infection contacts per day.
# gamma is 1 over the duration of the infection.
# e is 1 over the incubation period
SEIR.model.incubation.pop.cumsum.US(100,.50,1/14,1/14,infect = 5000, exposed = 900,population = 400000,country = 'US',province = 'New York', start_day = '2020-03-10' )
SEIR.model.incubation.pop.cumsum.US(100,2.5,1/14,1/14,population = 75000,country = 'China',province = 'Hubei')
SEIR.model.incubation.pop.cumsum.US(120,.30,1/14,1/14,infect = 150,
exposed = 350,population = 80000,country = 'US',province = 'Michigan', start_day = '2020-03-10' )
# fft of rate of increase
rateConf =
dat %>%
filter(`Country_Region` == 'US' & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1)) &
Last_Update > '2020-03-09') %>%
group_by( Last_Update) %>%
summarise_if(is.numeric,sum) %>%
select(Last_Update,Confirmed,Deaths,Recovered,Existing) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Country_Region = 'USA') %>%
pull(Rate_Confirmed)
s1 <- rateConf
samp.rate <- 1
timeArray <- (0:(length(s1-1)) / samp.rate)
timeDt <- data.table(tm = timeArray, amp = s1)
timeDt[, tm := tm ]
#timeDt[amp < 0.4 & amp > -0.4,]
plottime <-
ggplot(data = timeDt, aes(x=tm,y=amp))+
geom_point(size = I(0)) +
xlab(label="time (days)")+
ylab(label="Confirmed Cases" )+
labs(title= "Confirmed Cases USA vs Time: ")+
theme(plot.title = element_text(lineheight = 0.8, face="bold"))
plottime
# Frequency content
n <- length(timeDt$amp) #number of samples
p <- fft(timeDt$amp) #fft
nUniquePts <- ceiling((n+1)/2)
p <- p[1:nUniquePts] #select just the first half since the second half
# is a mirror image of the first
p <- abs(p) #take the absolute value, or the magnitude
p <- p / n #scale by the number of points so that
# the magnitude does not depend on the length
# of the signal or on its sampling frequency
p <- p^2 # square it to get the power
# multiply by two (see technical document for details)
# odd nfft excludes Nyquist point
if (n %% 2 > 0){
p[2:length(p)] <- p[2:length(p)]*2 # we've got odd number of points fft
} else {
p[2: (length(p) -1)] <- p[2: (length(p) -1)]*2 # we've got even number of points fft
}
freqArray <- (0:(nUniquePts-1)) * (samp.rate / n) # create the frequency array
freq_Data_Table <- data.table(Frequency = freqArray,Power = p)
plot_FFT <-
ggplot(data = freq_Data_Table[Power < 2.5*10^8,],aes(x=Frequency,y=Power))+
geom_point(size=I(3)) +
#geom_text(data = freq_Data_Table[Power < 1e-06 &Power > 1.5e-07 & Frequency < 2000,],nudge_x = 10,aes(x=Frequency,y=Power,label = round(Frequency,0)))+
xlab(label="Frequency (1/Day)")+
ylab(label="Power " )+
labs(title= "Confimred Rate FFT: ")+
theme(plot.title = element_text(lineheight = 0.8, face="bold"))
plot_FFT
plot.frequency.spectrum <- function(X.k, xlimits=c(0,length(X.k))) {
plot.data <- cbind(0:(length(X.k)-1), Mod(X.k))
# TODO: why this scaling is necessary?
plot.data[2:length(X.k),2] <- 2*plot.data[2:length(X.k),2]
plot(plot.data, t="h", lwd=2, main="",
xlab="Frequency (Hz)", ylab="Strength",
xlim=xlimits, ylim=c(0,max(Mod(plot.data[,2]))))
}
plot.frequency.spectrum(fft(rateConf), xlimits=c(0,50))
spectrum(fft(rateConf))
# Plot the i-th harmonic
# Xk: the frequencies computed by the FFt
# i: which harmonic
# ts: the sampling time points
# acq.freq: the acquisition rate
plot.harmonic <- function(Xk, i, ts, acq.freq, color="red") {
Xk.h <- rep(0,length(Xk))
Xk.h[i+1] <- Xk[i+1] # i-th harmonic
harmonic.trajectory <- get.trajectory(Xk.h, ts, acq.freq=acq.freq)
points(ts, harmonic.trajectory, type="l", col=color)
}
# Chi Square Test for Recovered cases and deaths
datChiSq <-
dat %>%
filter(Country_Region %in% top_125_Country & !grepl('County',`Province_State`) &
!grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update == max(Last_Update)) %>%
group_by(`Country_Region`) %>%
summarise(Deaths = sum(Deaths),
Recovered = sum(Recovered),
Confirmed = sum(Confirmed)) %>%
as.matrix() %>%
t()
M <- as.table(rbind(as.numeric(datChiSq[2,]), as.numeric(datChiSq[3,]) , as.numeric(datChiSq[4,])))
dimnames(M) <- list(Outcome = c("Deaths", "Recovered","Confirmed"),
Country = datChiSq[1,])
(Xsq <- chisq.test(M)) # Prints test summary
Xsq$observed # observed counts (same as M)
round(Xsq$expected,0) # expected counts under the null
(Xsq$observed - round(Xsq$expected,0))
Xsq$residuals # Pearson residuals
Xsq$stdres # standardized residuals
round(Xsq$observed[1,]/Xsq$observed[3,],3)
# Chi square test for US states
notIn <- c('Princess', 'Guam', 'Samoa', 'Islands','Rico')
datChiSq <-
dat %>%
filter(Country_Region == 'US' & Last_Update == max(Last_Update) & !grepl(paste(notIn, collapse="|"), Province_State)) %>%
select(Province_State,Deaths,Recovered,Confirmed) %>%
as.matrix() %>%
t()
M <- as.table(rbind(as.numeric(datChiSq[2,]), as.numeric(datChiSq[3,]) , as.numeric(datChiSq[4,])))
dimnames(M) <- list(Outcome = c("Deaths", "Recovered","Confirmed"),
Country = datChiSq[1,])
(Xsq <- chisq.test(M)) # Prints test summary
Xsq$observed # observed counts (same as M)
round(Xsq$expected,0) # expected counts under the null
(Xsq$observed - round(Xsq$expected,0))
Xsq$residuals # Pearson residuals
Xsq$stdres # standardized residuals
round(Xsq$observed[1,]/Xsq$observed[3,],3)
as.data.table(t(M)) %>%
pivot_wider(names_from = 'Outcome', values_from = 'N') %>%
mutate(D_Rate = Deaths / Confirmed) %>%
ggplot(aes(x = Country, y = D_Rate)) +
geom_point() +
theme(axis.text.x = element_text(angle = 90))
###############################
# Case increase rate
##############################
# subset to USA
notIn <- c('Princess', 'Guam', 'Samoa', 'Islands','Rico')
datRateTop15 <-
dat %>%
filter(Country_Region == 'US' & !grepl(paste(notIn, collapse="|"), Province_State)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
select(Province_State,Deaths,Recovered,Confirmed,Last_Update) %>%
group_by(Province_State) %>%
mutate(Confirmed = zoo::rollapply(Confirmed,4,mean,align='right',fill=0),
Deaths = zoo::rollapply(Deaths,4,mean,align='right',fill=0),
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Accel_Confirmed = Rate_Confirmed - lag(Rate_Confirmed,default = 0),
Accel_Deaths = Rate_Deaths - lag(Rate_Deaths,default = 0)) %>%
#mutate(Rate_Confirmed = zoo::rollapply(Rate_Confirmed,4,mean,align='right',fill=0),
#Accel_Confirmed = zoo::rollapply(Accel_Confirmed,4,mean,align='right',fill=0),
#Rate_Deaths = zoo::rollapply(Rate_Deaths,4,mean,align='right',fill=0),
#Accel_Deaths = zoo::rollapply(Accel_Deaths,4,mean,align='right',fill=0)) %>%
filter(Last_Update == max(Last_Update)) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(Rate_Confirmed_per_million = (Rate_Confirmed/Pop)*1000000,
Accel_Confirmed_per_million = (Accel_Confirmed/Pop)*1000000,
Death_per_Conf = Confirmed/Deaths) %>%
filter(Rate_Confirmed > 0 & Accel_Confirmed >0) %>%
arrange(desc(Rate_Confirmed_per_million)) %>%
top_n(15,Rate_Confirmed_per_million) %>%
pull(Province_State)
dat %>%
filter(Country_Region == 'US' & !grepl(paste(notIn, collapse="|"), Province_State)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
select(Province_State,Deaths,Recovered,Confirmed,Last_Update) %>%
filter(Province_State %in% datRateTop15) %>%
group_by(Province_State) %>%
mutate(Confirmed = zoo::rollapply(Confirmed,4,mean,align='right',fill=0),
Deaths = zoo::rollapply(Deaths,4,mean,align='right',fill=0),
Existing = Confirmed - Deaths - Recovered,
Rate_Existing = Existing - lag(Existing,default = 0),
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0)) %>%
filter(Last_Update > '2020-06-01') %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(Rate_Confirmed_per_million = (Rate_Confirmed/Pop)*1000000,
Death_per_Conf = Confirmed/Deaths) %>%
ggplot(aes(x = Last_Update,color = Province_State)) +
geom_line(aes(y=Rate_Confirmed_per_million))+
geom_point(aes(y=Rate_Confirmed_per_million)) +
geom_dl(aes(label = Province_State, y=Rate_Confirmed_per_million), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8))
###
# rolling 4 day average filted for rate and acceleration > 0
###
dat %>%
filter(Country_Region == 'US' & !grepl(paste(notIn, collapse="|"), Province_State)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
select(Province_State,Deaths,Recovered,Confirmed,Last_Update) %>%
group_by(Province_State) %>%
mutate(
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Accel_Confirmed = Rate_Confirmed - lag(Rate_Confirmed,default = 0),
Accel_Deaths = Rate_Deaths - lag(Rate_Deaths,default = 0)) %>%
mutate(Rate_Confirmed = zoo::rollapply(Rate_Confirmed,4,mean,align='right',fill=0),
Accel_Confirmed = zoo::rollapply(Accel_Confirmed,4,mean,align='right',fill=0),
Rate_Deaths = zoo::rollapply(Rate_Deaths,4,mean,align='right',fill=0),
Accel_Deaths = zoo::rollapply(Accel_Deaths,4,mean,align='right',fill=0)) %>%
filter(Last_Update == max(Last_Update)) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(Rate_Confirmed_per_million = (Rate_Confirmed/Pop)*1000000,
Accel_Confirmed_per_million = (Accel_Confirmed/Pop)*1000000,
Death_per_Conf = Confirmed/Deaths) %>%
filter(Rate_Confirmed > 0 & Accel_Confirmed >0) %>%
arrange(desc(Rate_Confirmed_per_million))
datAccelTop15 <-
dat %>%
filter(Country_Region == 'US' & !grepl(paste(notIn, collapse="|"), Province_State)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
select(Province_State,Deaths,Recovered,Confirmed,Last_Update) %>%
group_by(Province_State) %>%
mutate(
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Accel_Confirmed = Rate_Confirmed - lag(Rate_Confirmed,default = 0),
Accel_Deaths = Rate_Deaths - lag(Rate_Deaths,default = 0)) %>%
mutate(Rate_Confirmed = zoo::rollapply(Rate_Confirmed,4,mean,align='right',fill=0),
Accel_Confirmed = zoo::rollapply(Accel_Confirmed,4,mean,align='right',fill=0),
Rate_Deaths = zoo::rollapply(Rate_Deaths,4,mean,align='right',fill=0),
Accel_Deaths = zoo::rollapply(Accel_Deaths,4,mean,align='right',fill=0)) %>%
filter(Last_Update == max(Last_Update)) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(Rate_Confirmed_per_million = (Rate_Confirmed/Pop)*1000000,
Accel_Confirmed_per_million = (Accel_Confirmed/Pop)*1000000,
Death_per_Conf = Confirmed/Deaths) %>%
filter(Rate_Confirmed > 0 & Accel_Confirmed >0) %>%
arrange(desc(Accel_Confirmed_per_million)) %>%
top_n(15,Accel_Confirmed_per_million) %>%
pull(Province_State)
datAccelTop15
dat %>%
filter(Country_Region == 'US' & !grepl(paste(notIn, collapse="|"), Province_State)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
select(Province_State,Deaths,Recovered,Confirmed,Last_Update) %>%
mutate(
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Accel_Confirmed = Rate_Confirmed - lag(Rate_Confirmed,default = 0),
Accel_Deaths = Rate_Deaths - lag(Rate_Deaths,default = 0)) %>%
mutate(Rate_Confirmed = zoo::rollapply(Rate_Confirmed,4,mean,align='right',fill=0),
Accel_Confirmed = zoo::rollapply(Accel_Confirmed,4,mean,align='right',fill=0),
Rate_Deaths = zoo::rollapply(Rate_Deaths,4,mean,align='right',fill=0),
Accel_Deaths = zoo::rollapply(Accel_Deaths,4,mean,align='right',fill=0)) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
filter(Last_Update > '2020-06-01') %>%
ggplot(aes(x = Last_Update,color = Province_State)) +
geom_line(aes(y=Rate_Confirmed))+
geom_point(aes(y=Rate_Confirmed)) +
geom_dl(aes(label = Province_State, y= Rate_Confirmed), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8)) +
scale_y_continuous(limits = c(1200,13000))
dat %>%
filter(Country_Region == 'US' & !grepl(paste(notIn, collapse="|"), Province_State)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
select(Province_State,Deaths,Recovered,Confirmed,Last_Update) %>%
filter(Province_State %in% datAccelTop15) %>%
mutate(
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Accel_Confirmed = Rate_Confirmed - lag(Rate_Confirmed,default = 0),
Accel_Deaths = Rate_Deaths - lag(Rate_Deaths,default = 0)) %>%
mutate(Rate_Confirmed = zoo::rollapply(Rate_Confirmed,4,mean,align='right',fill=0),
Accel_Confirmed = zoo::rollapply(Accel_Confirmed,4,mean,align='right',fill=0),
Rate_Deaths = zoo::rollapply(Rate_Deaths,4,mean,align='right',fill=0),
Accel_Deaths = zoo::rollapply(Accel_Deaths,4,mean,align='right',fill=0)) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(Rate_Confirmed_per_million = (Rate_Confirmed/Pop)*1000000,
Accel_Confirmed_per_million = (Accel_Confirmed/Pop)*1000000,
Death_per_Conf = Confirmed/Deaths) %>%
filter(Last_Update > '2020-06-01') %>%
ggplot(aes(x = Last_Update,color = Province_State)) +
geom_line(aes(y=Rate_Confirmed_per_million))+
geom_point(aes(y=Rate_Confirmed_per_million)) +
#geom_line(aes(y=Accel_Confirmed_per_million))+
#geom_point(aes(y=Accel_Confirmed_per_million)) +
geom_dl(aes(label = Province_State, y= Rate_Confirmed_per_million), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8))
dat %>%
filter(Country_Region == 'US' & !grepl(paste(notIn, collapse="|"), Province_State)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
select(Province_State,Deaths,Recovered,Confirmed,Last_Update) %>%
group_by(Province_State) %>%
mutate(Confirmed = zoo::rollapply(Confirmed,4,mean,align='right',fill=0),
Recovered = zoo::rollapply(Recovered,4,mean,align='right',fill=0),
Deaths = zoo::rollapply(Deaths,4,mean,align='right',fill=0)) %>%
mutate(#Existing = Confirmed - Deaths - Recovered,
#Rate_Existing = Existing - lag(Existing,default = 0),
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
#Accel_Existing = Rate_Existing - lag(Rate_Existing,default = 0),
Accel_Confirmed = Rate_Confirmed - lag(Rate_Confirmed,default = 0),
Accel_Deaths = Rate_Deaths - lag(Rate_Deaths,default = 0)) %>%
ungroup() %>%
filter(Last_Update == max(Last_Update) ) %>%
arrange(Accel_Confirmed) %>%
top_n(-15,Accel_Confirmed)
datRateBottom15 <-
dat %>%
filter(Country_Region == 'US' & !grepl(paste(notIn, collapse="|"), Province_State)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
select(Province_State,Deaths,Recovered,Confirmed,Last_Update) %>%
group_by(Province_State) %>%
mutate(
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Accel_Confirmed = Rate_Confirmed - lag(Rate_Confirmed,default = 0),
Accel_Deaths = Rate_Deaths - lag(Rate_Deaths,default = 0)) %>%
mutate(Rate_Confirmed = zoo::rollapply(Rate_Confirmed,4,mean,align='right',fill=0),
Accel_Confirmed = zoo::rollapply(Accel_Confirmed,4,mean,align='right',fill=0),
Rate_Deaths = zoo::rollapply(Rate_Deaths,4,mean,align='right',fill=0),
Accel_Deaths = zoo::rollapply(Accel_Deaths,4,mean,align='right',fill=0)) %>%
filter(Last_Update == max(Last_Update)) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(Rate_Confirmed_per_million = (Rate_Confirmed/Pop)*1000000,
Accel_Confirmed_per_million = (Accel_Confirmed/Pop)*1000000,
Death_per_Conf = Confirmed/Deaths) %>%
#filter(Rate_Confirmed < 0 & Accel_Confirmed < 0) %>%
arrange(Rate_Confirmed_per_million) %>%
top_n(-15,Rate_Confirmed_per_million) %>%
pull(Province_State)
datRateBottom15
dat %>%
filter(Country_Region == 'US' & !grepl(paste(notIn, collapse="|"), Province_State)& !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
select(Province_State,Deaths,Recovered,Confirmed,Last_Update) %>%
filter(Province_State %in% datRateBottom15) %>%
group_by(Province_State) %>%
mutate(Existing = Confirmed - Deaths - Recovered,
Rate_Existing = Existing - lag(Existing,default = 0),
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0)) %>%
filter(Last_Update > '2020-06-10') %>%
filter(Rate_Confirmed < 2000 & Rate_Confirmed > -2000) %>%
ggplot(aes(x = Last_Update,color = Province_State)) +
geom_line(aes(y=Rate_Confirmed))+
geom_point(aes(y=Rate_Confirmed)) +
geom_dl(aes(label = Province_State, y = Rate_Confirmed), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8))
#########################
# Estimate the number of people in a room to have 50% of a person confirmed
#########################
bday<-function(n,pr=1/365,stp = 1){
pepBuy <- seq(from = 1, to = n, by = stp)
#print(pepBuy)
prob<-function(n){1-dbinom(1,1:n,pr)}
bday<-vector()
for(i in pepBuy){
out<-prob(i)
bday<-c(bday,1-prod(out))
}
#plot(pepBuy,bday)
return(bday)
}
probUsa =
dat %>%
filter(`Country_Region` == 'US' & !grepl('County',`Province_State`) & !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update == max(Last_Update)) %>%
mutate(Existing = Confirmed - Deaths - Recovered) %>%
summarise(total_confirmed = sum(Confirmed),
total_existing = sum(Existing))
# population of the USA is approximately 327.2 million
probContact <- data.table(x= seq(from = 1, to = 100, by = 1))
probContact[ , prob_conf := bday(n = max(x), pr = probUsa$total_confirmed/327200000)]
probContact[ , prob_exist := bday(n = max(x), pr = probUsa$total_existing/327200000)]
probContact <- melt(probContact, id.vars = c('x'))
probContact[value >= 0.48 & value <= 0.53,max(x)]
ggplot(data = probContact, aes(x=x,y=value,color = variable))+
geom_line() +
geom_hline(yintercept = 0.5)+
geom_vline(xintercept = c(probContact[value >= 0.499 & value <= 0.501,max(x)],probContact[value >= 0.499 & value <= 0.501,min(x)]))
## Michigan data
probMichian =
dat %>%
filter(`Country_Region` == 'US' & !grepl('County',`Province_State`) & !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Last_Update == max(Last_Update)) %>%
filter(Province_State == 'Michigan') %>%
summarise(total_confirmed = sum(Confirmed),
total_existing = sum(Existing))
# population of the Michigan is approximately 9996000 Probablity for 50% chance of contact
probContact <- data.table(x= seq(from = 1, to = 100, by = 1))
probContact[ , prob_conf := bday(n = max(x), pr = probMichian$total_confirmed/9996000)]
probContact[ , prob_exist := bday(n = max(x), pr = probMichian$total_existing/9996000)]
probContact <- melt(probContact, id.vars = c('x'))
probContact[value >= 0.48 & value <= 0.51,max(x)]
ggplot(data = probContact, aes(x=x,y=value,color = variable))+
geom_line() +
geom_hline(yintercept = 0.5)+
geom_vline(xintercept = c(probContact[value >= 0.499 & value <= 0.501,max(x)],probContact[value >= 0.499 & value <= 0.501,min(x)]))
probMichian2 =
dat %>%
filter(`Country_Region` == 'US' & !grepl('County',`Province_State`) & !grepl("^[[:upper:]]+$",str_sub(`Province_State`,-2,-1))) %>%
filter(Province_State == 'Michigan') %>%
group_by(Last_Update ) %>%
summarise(total_confirmed = sum(Confirmed),
total_existing = sum(Existing))
#Region Plots
dat<-data.table()
##load data
files<-list.files("R/2019-coronavirus-dataset-01212020-01262020/csse_covid_19_daily_reports/",full.names=TRUE)
#print(files)
for(i in 1:(length(files)-1)){
data.temp<-fread(files[i],header =TRUE)
if (ncol(data.temp) == 12) {
data.temp = select(data.temp, `Province_State`, `Country_Region`,`Admin2`,`Last_Update`, `Confirmed`, `Deaths`, `Recovered`, Lat, Long_ )
}else{
data.temp = data.table(Province_State = 'xx', `Country_Region` = 'xx',`Admin2`= 'xx',`Last_Update`= '1900-01-01', `Confirmed` = NA, `Deaths`= NA, `Recovered` = NA , Lat = NA, Long_ =NA)
}
#names(data.temp) <- c('Province_State', 'Country_Region','Last_Update', 'Confirmed', 'Deaths', 'Recovered' )
data.temp <-
data.temp %>%
mutate(Last_Update = parse_date_time(Last_Update,c("mdy_HM","ymd_HMS")))%>%
mutate(Last_Update = max(Last_Update),
Confirmed = ifelse(is.na(Confirmed),0,Confirmed),
Deaths = ifelse(is.na(Deaths),0,Deaths),
Recovered = ifelse(is.na(Recovered),0,Recovered)) %>%
mutate(Existing = Confirmed - Deaths - Recovered) %>%
as.data.table()
dat<-rbind(dat,data.temp)
dat<-
dat %>%
filter(Country_Region == 'US')
}
datMidWest =
dat %>%
filter(Province_State == 'Illinois' |Province_State == 'Michigan' | Province_State == 'Indiana' | Province_State == 'Ohio' | Province_State == 'Wisconsin' | Province_State == 'Minnesota') %>%
rename(c('Province_State' = 'Province_State' , 'Country_Region' = 'Country_Region','Admin2' = 'Admin2','lat'= 'Lat','lng'='Long_' )) %>%
mutate(Existing = Confirmed - Deaths - Recovered)
datMidWest %>%
group_by(`Admin2`) %>%
filter(`Last_Update` == max(`Last_Update`)) %>%
select(`Admin2`,lng,lat,Confirmed,Existing) %>%
leaflet() %>%
addTiles() %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~(Confirmed) * 2,
popup = ~`Admin2`,
label = ~Confirmed) %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~(Existing) * 1,
color = 'red') %>%
addPopups(lng=-80, lat=45, paste(sep = "<br/>", "Scroll over the circle to see the confirmed count","Click the circle to see the provice name"),
options = popupOptions(closeButton = FALSE)) %>%
addPopups(lng=-80, lat=47, paste(sep = "<br/>", "<b>Coronavirus(2019-nCoV</b>","Confirmed Infection Counts Illinois",max(dat$`Last Update`)),
options = popupOptions(closeButton = FALSE))
datSouth =
dat %>%
filter(Province_State == 'Florida' |Province_State == 'Georgia' | Province_State == 'Alabama' | Province_State == 'Mississippi' |
Province_State == 'South Carolina' | Province_State == 'North Carolina'| Province_State == 'Louisiana'
| Province_State == 'Texas' | Province_State == 'Arkansas'| Province_State == 'Tennessee' |
Province_State == 'Arizona' | Province_State == 'New Mexico' | Province_State == 'Oklahoma') %>%
rename(c('Province_State' = 'Province_State' , 'Country_Region' = 'Country_Region','Admin2' = 'Admin2','lat'= 'Lat','lng'='Long_' )) %>%
mutate(Existing = Confirmed - Deaths - Recovered)
datSouth %>%
group_by(`Admin2`) %>%
filter(`Last_Update` == max(`Last_Update`)) %>%
select(`Admin2`,lng,lat,Confirmed,Existing) %>%
leaflet() %>%
addTiles() %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~(Confirmed) * 3,
popup = ~`Admin2`,
label = ~Confirmed) %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~(Existing) * 3,
color = 'red')
# United states map
dat %>%
rename(c('Province_State' = 'Province_State' , 'Country_Region' = 'Country_Region','Admin2' = 'Admin2','lat'= 'Lat','lng'='Long_' )) %>%
mutate(Existing = Confirmed - Deaths - Recovered) %>%
group_by(`Admin2`) %>%
filter(`Last_Update` == max(`Last_Update`)) %>%
select(`Admin2`,lng,lat,Confirmed,Existing) %>%
leaflet() %>%
addTiles() %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~(Confirmed) * 0,
popup = ~`Admin2`,
label = ~Confirmed) %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~(Existing) * 2,
color = 'red')
dat %>%
filter(Province_State == 'Michigan' ) %>%
rename(c('Province_State' = 'Province_State' , 'Country_Region' = 'Country_Region','Admin2' = 'Admin2','lat'= 'Lat','lng'='Long_' )) %>%
mutate(Existing = Confirmed - Deaths - Recovered) %>%
group_by(`Admin2`) %>%
filter(`Last_Update` == max(`Last_Update`)) %>%
select(`Admin2`,lng,lat,Confirmed,Existing) %>%
leaflet() %>%
addTiles() %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~(Confirmed) * 2,
popup = ~`Admin2`,
label = ~Confirmed) %>%
addCircles(lng = ~lng, lat = ~lat, weight = 1,
radius = ~(Existing) * 2,
color = 'red')
#########################################
# Look at number of test vs. infections / infection rate
#########################################
# Load data to pull number of test data
files<-list.files("R/2019-coronavirus-dataset-01212020-01262020/csse_covid_19_daily_reports_us/",full.names=TRUE)
datUS <- data.table()
for(i in 1:(length(files)-1)){
data.temp<-fread(files[i],header =TRUE)
#data.temp = select(data.temp, `Province_State`, `Country_Region`,`Last_Update`, `Confirmed`, `Deaths`, `Recovered` )
#names(data.temp) <- c('Province_State', 'Country_Region','Last_Update', 'Confirmed', 'Deaths', 'Recovered' )
data.temp <-
data.temp %>%
mutate(Last_Update = parse_date_time(Last_Update,c("mdy_HM","ymd_HMS")))%>%
mutate(Last_Update = max(Last_Update),
Confirmed = ifelse(is.na(Confirmed),0,Confirmed),
Deaths = ifelse(is.na(Deaths),0,Deaths),
Recovered = ifelse(is.na(Recovered),0,Recovered)) %>%
mutate(Country_Region = ifelse(grepl('Mainland China',Country_Region),'China',Country_Region)) %>%
filter(!is.na(Last_Update)) %>%
filter(!grepl('Guam',Province_State) | !grepl('Princess',Province_State) | !grepl('Island',Province_State)) %>%
filter(Province_State != 'Recovered' ) %>%
as.data.table()
datUS<-rbind(datUS,data.temp)
}
# Number of test for each positive by state
datUS %>%
filter(Province_State %in% datAccelTop15) %>%
mutate(Existing = Confirmed - Deaths - Recovered) %>%
group_by(Last_Update,Province_State) %>%
summarise(Confirmed = sum(Confirmed),
Deaths = sum(Deaths),
Recovered = sum(Recovered),
Existing = sum(Existing),
Test = sum(People_Tested,na.rm = TRUE)) %>%
ungroup() %>%
mutate(Last_Update = date(Last_Update)) %>%
group_by(Last_Update,Province_State) %>%
mutate(Rate_Existing = Existing - lag(Existing,default = 0),
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Rate_Test = Test - lag(Test,default = 0)) %>%
mutate(Accel_Existing = Rate_Existing - lag(Rate_Existing,default = 0),
Accel_Confirmed = Rate_Confirmed - lag(Rate_Confirmed,default = 0),
Accel_Deaths = Rate_Deaths - lag(Rate_Deaths,default = 0),
Accel_Test = Rate_Test - lag(Rate_Test,default = 0),
Test_Confirmed = Rate_Test/Rate_Confirmed) %>%
filter(Last_Update > '2020-05-01') %>%
ggplot(aes(x=Last_Update, color = Province_State)) +
geom_line(aes(y=Test_Confirmed)) +
geom_point(aes(y=Test_Confirmed),shape = 1) +
geom_dl(aes(label = Province_State, y = Test_Confirmed), method = list(dl.trans(x = x -1,y=y+.25), "last.points", cex = 0.8)) +
theme(legend.position = "none")
# Cumlative test vs. cumlative confirmed cases
datUS %>%
mutate(Existing = Confirmed - Deaths - Recovered) %>%
group_by(Last_Update) %>%
summarise(Confirmed = sum(Confirmed),
Deaths = sum(Deaths),
Recovered = sum(Recovered),
Existing = sum(Existing),
Test = sum(People_Tested,na.rm = TRUE)) %>%
ungroup() %>%
mutate(Last_Update = date(Last_Update)) %>%
group_by(Last_Update) %>%
mutate(Rate_Existing = Existing - lag(Existing,default = 0),
Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Deaths = Deaths - lag(Deaths,default = 0),
Rate_Test = Test - lag(Test,default = 0)) %>%
mutate(Accel_Existing = Rate_Existing - lag(Rate_Existing,default = 0),
Accel_Confirmed = Rate_Confirmed - lag(Rate_Confirmed,default = 0),
Accel_Deaths = Rate_Deaths - lag(Rate_Deaths,default = 0),
Accel_Test = Rate_Test - lag(Rate_Test,default = 0)) %>%
filter(Last_Update > '2020-05-01') %>%
ggplot(aes(x=Last_Update)) +
geom_point(aes(y=Confirmed))+
geom_point(color = 'red',aes(y=Test)) +
xlab(label = "Date") +
ylab(label = "Cumlative Sum (Confirmed | Test)")
notIn <- c('Princess', 'Guam', 'Samoa', 'Islands','Rico')
datUS %>%
filter(!grepl(paste(notIn, collapse="|"),Province_State)) %>%
mutate(Last_Update = date(Last_Update)) %>%
group_by(Province_State) %>%
mutate(Rate_Confirmed = Confirmed - lag(Confirmed,default = 0),
Rate_Test = People_Tested - lag(People_Tested,default = 0)) %>%
select(Province_State,Last_Update,Confirmed,People_Tested,Rate_Confirmed,Rate_Test) %>%
inner_join(select(usStatePopulation,`Geographic Area`,'2019'), by = c('Province_State' = 'Geographic Area')) %>%
mutate(`2019` = as.numeric(gsub(",","",`2019`,fixed=TRUE))) %>%
rename(Pop = `2019`) %>%
ungroup() %>%
mutate(conf_per_million = (Rate_Confirmed/Pop)*1000000,
test_per_million = (Rate_Test/Pop)*1000000) %>%
filter(Last_Update > '2020-05-01')
###########################################################################################################
|
e8261018de6ab70f22428b76caf83c173d5f329e
|
92d5a6381615b2e46cef6f9f9a13955ad78ed2c7
|
/scripts/CALC_poolrichness_regional.R
|
fed38c6694f6dad85ffdb157bfd001dec137ce5c
|
[
"MIT"
] |
permissive
|
katherinehebert/insular-mammals
|
250d082d9bde1fcc357cd1aeb79e5da64137b762
|
037b9c1dc7bbbf35bcc90cfefb274c63bedbba64
|
refs/heads/main
| 2023-06-04T15:22:18.026892
| 2021-06-24T04:07:48
| 2021-06-24T04:07:48
| 368,992,262
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,778
|
r
|
CALC_poolrichness_regional.R
|
# Script to assemble taxonomic and phylogenetic diversity of the regional pools
# Author: Katherine Hébert
library(dplyr)
# load community matrix of the metacommunity as a whole (all sites x all species)
comms <- readRDS("data/comm_metacomm.RDS")
# load assemblage dispersion fields
adfs <- list(
readRDS("output/regionalpoolAdr_25p.RDS"),
readRDS("output/regionalpoolAlx_25p.RDS"),
readRDS("output/regionalpoolClf_25p.RDS"),
readRDS("output/regionalpoolSnd_25p.RDS"),
readRDS("output/regionalpoolMdtLB_25p.RDS"),
readRDS("output/regionalpoolMdtOC_25p.RDS"),
readRDS("output/regionalpoolMlk_25p.RDS"),
readRDS("output/regionalpoolMln_25p.RDS"),
readRDS("output/regionalpoolPhl_25p.RDS")
)
# function to extract list of species in the pool
get_poollist <- function(adf, comm){
comm_list <- colnames(comm)
adf_list <- gsub(" ", "_", colnames(adf$PAM))
pool <- c(comm_list, adf_list) %>%
# remove duplicated species
unique()
return(pool)
}
# get species list in each island pool
pools <- adfs
for(i in 1:length(adfs)){
pools[[i]] <- get_poollist(adfs[[i]], comms[[i]])
}
# save pool list here with island name
saveRDS(pools, "output/regionalpool_25p_specieslists.RDS")
# store in a dataframe
df <- data.frame(
"group" = names(comms),
# get taxonomic diversity ----
"sp_rich" = lapply(pools, length) %>% unlist()
)
df$group <- gsub("Ind", "Snd", df$group)
df$group[6] <- "MdtOC"
# save
saveRDS(df, "output/regionalpool_25p_diversity.RDS")
# load the diversity data frame
regionalpool_25p_diversity <- readRDS("~/Documents/GitHub/IslandMammals/output/regionalpool_25p_diversity.RDS")
colnames(regionalpool_25p_diversity)[2] <- c("tr_25p")
# get phylogenetic diversity ----
sesmpd_25p <- list(
readRDS("output/regionalpool_25p_sesmpd_Adr.RDS"),
readRDS("output/regionalpool_25p_sesmpd_Alx.RDS"),
readRDS("output/regionalpool_25p_sesmpd_Clf.RDS"),
readRDS("output/regionalpool_25p_sesmpd_Snd.RDS"),
readRDS("output/regionalpool_25p_sesmpd_MdtLB.RDS"),
readRDS("output/regionalpool_25p_sesmpd_MdtOC.RDS"),
readRDS("output/regionalpool_25p_sesmpd_Mlk.RDS"),
readRDS("output/regionalpool_25p_sesmpd_Mln.RDS"),
readRDS("output/regionalpool_25p_sesmpd_Phl.RDS")
)
names(sesmpd_25p) <- c("Adr", "Alx", "Clf", "Snd", "MdtLB", "MdtOC", "Mlk", "Mln", "Phl")
# extract sesmpd per island
df2 <- data.frame(
"group" = names(sesmpd_25p),
"sesmpd_25p" = NA,
"sesmpd_25p_p" = NA)
for(i in 1:length(sesmpd_25p)){
df2$sesmpd_25p[i] <- sesmpd_25p[[i]]$mpd.obs.z[1]
df2$sesmpd_25p_p[i] <- sesmpd_25p[[i]]$mpd.obs.p[1]
}
# join to pool diversity
df <- left_join(regionalpool_25p_diversity, df2)
# save
saveRDS(df, "output/regionalpool_diversity.RDS")
write_csv(df, "output/regionalpool_diversity.csv")
|
dc3f6018938ca3e44db867712f385ab8ece65a3b
|
a54451d8d537b7b3c72183e4564de5e4785a8945
|
/R/useLuaVM.R
|
3f01a723e3aaade740fc77675480ee8b14a640e9
|
[
"MIT"
] |
permissive
|
arkraieski/shinyLua
|
ba3a92d69fdf4c2cc717495f19910ed94b490619
|
3b5fa606ecf179ccca81a79b5975815b39cf17ed
|
refs/heads/main
| 2023-02-22T01:02:39.490178
| 2021-02-01T01:52:13
| 2021-02-01T01:52:13
| 334,735,226
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,283
|
r
|
useLuaVM.R
|
#' Embed the Lua VM in the UI of a Shiny app
#'
#' @description This is a setup function that must be called from a Shiny app's UI to
#' add the Lua implementation required for other \code{shinyLua} functions to work.
#'
#' @param rmd Logical; this must be set to \code{TRUE} if you are using \code{shinyLua}
#' in an interactive R Markdown document rather than a Shiny app.
#'
#' @details
#' This function adds 'fengari-web.js', a JavaScript port of the Lua VM, to an app's <head> tag
#' and resource path along with other startup scripts and dependencies. Any scripts in the UI
#' of type \code{"application/lua"} will run in this embedded Lua VM. \code{shinyLua} provides a
#' convenience function, \code{\link{luaScript}}, for inserting Lua scripts into a Shiny UI.
#'
#' @import shiny
#' @export
useLuaVM <- function(rmd = FALSE){
# use the fengari-web.js library included in this package
addResourcePath("lib", system.file("lib", package = "shinyLua"))
# don't put the script/library inside HTML head if using with R Markdown
if(isTRUE(rmd)){
paste(tags$script(src = "lib/fengari-web.js"),
luaScript(src = "lib/init.lua"), sep = "\n")
} else{
tags$head(tags$script(src = "lib/fengari-web.js"),
luaScript(src = "lib/init.lua"))
}
}
|
0361a97423b87490d66ef5796af641a1c6f2b95e
|
d6af6ce27dee34cca22a3b62ff65f0583391bfb2
|
/man/cohort.Rd
|
357a7f6b37a0b0edb03418d0895a76bff0148aa3
|
[
"MIT"
] |
permissive
|
eringrand/RUncommon
|
8c1339c53f9ca962c4a9b24b4206134864a172df
|
2ff03e918d18420354f7fa19a8732475f80d23e6
|
refs/heads/master
| 2022-12-18T22:13:16.828217
| 2020-09-22T18:52:37
| 2020-09-22T18:52:37
| 110,620,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 708
|
rd
|
cohort.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/common_functions.R
\name{cohort}
\alias{cohort}
\title{Cohort Tag}
\usage{
cohort(grade, school_year = "")
}
\arguments{
\item{grade}{Enrolled grade of the student (0 - 12)}
\item{school_year}{School year either in 20XX-YY or 20XX (spring) format.}
}
\description{
Defines a student's cohort given the student's grade and current school year.
If you do not provide a school year the default is the current (spring) year.
e.g If today's date is Jan 10, 2017, the default year will be 2017,
however if the current date is Sep 10, 2017, the year will default to 2018.
}
\examples{
library(RUncommon)
cohort(10)
cohort(10, 2008)
}
|
ebe992ec363fcdf85d3c80a8e4fa365c1a214cf7
|
f097fff6b2db5935e85ffbcc56e04cbd7586846c
|
/plot4.R
|
b5a8ab9b6be759ddcfb647aea89c6ee66fdc87d1
|
[] |
no_license
|
ManelNavarro/ExData_Plotting1
|
76c477ae94fd62322fdb51d5b276a2718a5982b9
|
938fd15ecabeab10891c21800d43ee4ddf09388d
|
refs/heads/master
| 2020-12-14T06:16:56.895208
| 2015-05-10T20:29:34
| 2015-05-10T20:29:34
| 35,292,373
| 0
| 0
| null | 2015-05-08T17:25:34
| 2015-05-08T17:25:34
| null |
UTF-8
|
R
| false
| false
| 3,760
|
r
|
plot4.R
|
##plot4.R
##Multiple Plots: measuring 4 variables across time
##1. Global Active Power
##2. Voltage
##3. Energy sub mettering
##4. Global Reactive Power
##Assumption: base data file is in working folder, already unzipped ("household_power_consumption.txt")
##Base data file NOT pushed to github as it is large (120Mb uncompressed) and it is already available on website
##
##To save memory use and speed up data load, an initial analysis of the file has been performed
##using read.table with "nrows" and "skip" arguments and "head" / "tail" on the result, concluding that:
##1) starting date is 16/12/20007
##2) each day takes approximately 1450 rows
##
##Therefore, data will be loaded only for 6000 rows after skipping initial 65k rows
##(i.e. skipping approximately last 15 days of December and 30 days of January and taking enough rows
##to ensure that first 2 days of February 2007 are included entirely)
##Load packages
library(stringr)
##STEP 1) LOAD DATA
##Define colclasses to speed up data load
readclass<-c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")
##Load data for the selected rows, keeping original headers and defining "?" as NA as per instructions
datanames<-colnames(read.table("household_power_consumption.txt",header=TRUE,nrows=1,sep=";"))
hdata<-read.table("household_power_consumption.txt",
col.names=datanames, #original column names
header=TRUE,skip=65000,nrows=6000, #approx rows required
sep=";",colClasses=readclass,na.strings="?") #separator, classes and NA string
#Tranform dates, times
hdata$DateTime<-strptime(paste0(hdata$Date,hdata$Time),"%d/%m/%Y%H:%M:%S") #Creates new POSIXlt variable
#Select only required timeframe
hdata<-hdata[(hdata$DateTime>=strptime("1/2/2007","%d/%m/%Y") & hdata$DateTime<strptime("3/2/2007","%d/%m/%Y")),]
##STEP 2) PLOT DATA
##Set Locale time variable to english to replicate exactly required plot
##(otherways weekdays are plotted in local language)
LCT<-Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "C")
#Initialize device
png("plot4.png") #default resolution is 480x480 as required
#Initialize plotting parameters
par(mfrow=c(2,2))
#define dataframe
with(hdata,{
#Plot 1
plot(hdata$DateTime, hdata$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power")
#Plot 2
plot(DateTime, Voltage,
type="l",
xlab="datetime",
)
#Plot 3
plot(DateTime, Sub_metering_1,
type="l",
xlab="",
ylab="Energy sub metering")
#adds second variable in red
points(DateTime, Sub_metering_2,
col="red",
type="l")
#adds third variable in blue
points(DateTime, Sub_metering_3,
col="blue",
type="l")
#adds legends
legends<-colnames(hdata[,7:9]) #legends vector
lcols<-c("black","red","blue") #legends colors vector
legend("topright",legend=legends,lty=1,col=lcols, bty="n") #creates legends w/o box
#Plot 4
plot(DateTime, Global_reactive_power,
type="l",
xlab="datetime",
)
})
#Close device
dev.off()
##Recover locale time variable
Sys.setlocale("LC_TIME",LCT)
|
53c276fde32a178bdb2e2a336f9d543b4b554773
|
a8def44ca5bb9d5a1435d3d853df90b28ac9c44b
|
/R/Training/histogram/histogram_smallRNA_lengths.R
|
a1448da6afa541135cac0ca96f40a4c05e74c09c
|
[] |
no_license
|
edyscom/dataviz
|
706a15d001bb2da0de9f236cd99df5bcd147ddfe
|
63acf2f045c01b057bf6ba698100138360b3c04f
|
refs/heads/main
| 2023-08-23T01:57:28.191324
| 2021-10-26T06:45:28
| 2021-10-26T06:45:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 415
|
r
|
histogram_smallRNA_lengths.R
|
library(histogram)
mat <- read.table("../KnownGenes.bed",header=F,sep="\t")
head(mat)
category <- mat[,4]
category <- levels(category)
length_list <- mat[,3] - mat[,2]
pdf("histogram_mm10_smallRNA.pdf")
vector <- as.character(mat[,4])
par(mfrow=c(2,2))
for (cate in category){
do_list <- vector == cate
length_subset <- length_list[do_list]
hist(length_subset,type="regular",main=cate)
}
dev.off()
|
71d1940b3bdb7013ecf198f0bc54e57c78636bd6
|
06288661e11115193a6032097bc7d06e2b553cff
|
/mapping_clean.R
|
4a48c43d1d226653ec3541cf569c3d01db7a0020
|
[] |
no_license
|
divyargarg/Predicting-Voter-Turnout-for-2020-Presidential-Election-in-Charlotte
|
f4b627e928e297f148a71a73ed9ace5e7986b248
|
d376cba306e8c9adb52106c536f156f9b0c25853
|
refs/heads/master
| 2020-12-13T11:35:27.194749
| 2020-01-16T20:36:41
| 2020-01-16T20:36:41
| 234,404,371
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,159
|
r
|
mapping_clean.R
|
library("rgdal")
library("tmap")
library("readxl")
library("dplyr")
library('reshape2')
#Shapefile for data; dataset for county
meck_county <- readOGR(dsn = "qol-data", layer = "NPA_2014_meck")
summary(meck_county)
plot(meck_county)
#Adding additional information
#Income and demographics
meck_data <- read_excel("qol-data/QOL_Data_October_2018.xls", skip = 1, sheet = 3)
#Education
meck_data1 <- read_excel("qol-data/QOL_Data_October_2018.xls", skip = 1, sheet = 4)
#Community Partcipiation/Voter Participation
meck_data2 <- read_excel("qol-data/QOL_Data_October_2018.xls", skip = 1, sheet = 5)
#Population
meck_data3 <- read_excel("qol-data/QOL_Data_October_2018.xls", skip = 1, sheet = 2)
#Healthcare
meck_data4 <- read_excel("qol-data/QOL_Data_October_2018.xls", skip = 1, sheet = 7)
#Voter Participation Target Set
voter_part <- meck_data2[, c(1,10,12,14,16,18)]
#Income shapefile
shp <- merge(meck_county, meck_data, by = "NPA")
#Education shapefile
shp1 <- merge(meck_county, meck_data1, by = "NPA")
#Community Partcipiation/Voter Participation shapefile
shp2 <- merge(meck_county, meck_data2, by = "NPA")
#Population and demographics shapefile
shp3 <- merge(meck_county, meck_data3, by = "NPA")
#Healthcare shapefile
shp4 <- merge(meck_county, meck_data4, by = "NPA")
vote_shp <- merge(meck_county, voter_part, by = "NPA")
#Section of County by NPA ID
#Issues with 122 & 285?
tm_shape(meck_county) +
tm_text("NPA") +
tm_borders()
#INCOME
#Unemployment map
tm_shape(shp) +
tm_fill("Employment_Rate_2016") +
tm_borders()
#Household income
tm_shape(shp) +
tm_fill("Household_Income_2016") +
tm_borders()
#EDUCATION
#College degrees
tm_shape(shp1) +
tm_fill("Bachelors_Degree_2016") +
tm_borders()
#School attendance
shp1@data$Neighborhood_School_Attendance_2016
tm_shape(shp1) +
tm_fill("Neighborhood_School_Attendance_2016") +
tm_borders()
#POPULATION & DEMOGRAPHICS
#Percentage of Population- Caucasian
head(shp3@data$White_Population_2016)
tm_shape(shp3) +
tm_fill("White_Population_2016") +
tm_borders()
#Percentage of Population- Black
head(shp3@data$Black_Population_2016)
tm_shape(shp3) +
tm_fill("Black_Population_2016") +
tm_borders()
#Mean Age of Population
head(shp3@data$Age_of_Residents_2016)
tm_shape(shp3) +
tm_fill("Age_of_Residents_2016") +
tm_borders()
#HEALTHCARE
#Grocery proximity
shp4@data$Grocery_Proximity_2017
tm_shape(shp4) +
tm_fill("Grocery_Proximity_2017") +
tm_borders()
#Voting Participation Graphics
#Evaluating total percentage
head(vote_shp@data)
vote_shp@data$ACRES <- NULL
vote_shp1 <- vote_shp
#Melt data to wrap
vote_shp1@data <- melt(vote_shp@data, id.vars = "NPA")
head(vote_shp1@data)
unique(vote_shp1@data$variable)
#Voting participation for each election
tm_shape(vote_shp1) +
tm_fill('value') +
tm_borders() +
tm_facets(by = "variable" )
#Evaluating shifts in year to year participation
#Presidential Elections
head(vote_shp@data)
vote_shp@data$Difference2012_2016 <- vote_shp@data$Voter_Participation_2016 - vote_shp@data$Voter_Participation_2012
#Changes in voter participation in Mecklenburg Co in 2012 & 2016 Elections
tm_shape(vote_shp) +
tm_fill("Difference2012_2016") +
tm_borders()
#Presidential & Congressional Elections
vote_shp2 <- vote_shp
vote_shp2@data$Difference2014_2016 <- vote_shp2@data$Voter_Participation_2016 - vote_shp2@data$Voter_Participation_2014
vote_shp2@data$Difference2012_2014 <- vote_shp2@data$Voter_Participation_2014 - vote_shp2@data$Voter_Participation_2012
vote_shp2@data$Difference2010_2012 <- vote_shp2@data$Voter_Participation_2012 - vote_shp2@data$Voter_Participation_2010
#Remove columns, then melt data to wrap
vote_shp2@data <- vote_shp2@data[,c(1,8:10)]
head(vote_shp2@data)
vote_shp2@data <- melt(vote_shp2@data, id.vars = "NPA")
head(vote_shp2@data)
unique(vote_shp2@data$variable)
#Changes in participation between Presidential & Congressional Elections
tm_shape(vote_shp2) +
tm_fill('value') +
tm_borders() +
tm_facets(by = "variable" )
|
b348953582f56913f17e14ac632eb3244cdbaacf
|
387f07b6ecb1f1218fc3b78889703bb4bab2f4f5
|
/man/plot_multibars.Rd
|
806e60f2c85ff2cf30488cae18280144040b9355
|
[
"MIT"
] |
permissive
|
JouniVatanen/stools
|
94dee1eb23e1883b782220e8dd1f749dcba895fa
|
e0035fd5aa26dcb06fc7a0b1c4088e7f71f489d0
|
refs/heads/master
| 2023-01-28T21:39:41.085342
| 2023-01-11T07:31:22
| 2023-01-11T07:31:22
| 142,003,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,016
|
rd
|
plot_multibars.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_multibars.R
\name{plot_multibars}
\alias{plot_multibars}
\title{Multiple custom bar plots}
\usage{
plot_multibars(data, row = 1:nrow(data), labels = waiver())
}
\arguments{
\item{data}{dataframe}
\item{row}{select which row to plot. Default: 1:nrow(.)}
\item{labels}{select labels. Default: waiver()}
}
\description{
Create a multiple custom bar plots. With purr::map you can create a list of barplots.
}
\examples{
# Plot multiple bar plots to same grid
library(gridExtra)
plot_basic <- mtcars \%>\%
dplyr::group_by(cyl) \%>\%
dplyr::summarise_at(dplyr::vars(mpg, disp, hp, wt), ~mean(.)) \%>\%
tidyr::gather(2:ncol(.), key = "question", value = "values") \%>\%
tidyr::spread(cyl, values) \%>\%
{purrr::map(list(1, 2, 3), function(x) plot_multibars(., x))}
do.call("grid.arrange", c(plot_basic, ncol = 2))
# Plot a single bar plot
plot_multibars(mtcars, row = 3)
}
\keyword{ggplot2}
\keyword{multiplot}
\keyword{plot}
|
cc6284a233c85f23f8adaad20cfd59504353ab46
|
6b6e9d8dd2672b35936ba3b06b7f8ac30503778b
|
/man/ltr_age_estimation.Rd
|
7fec67c3ef08af7414d90f903d83523f4d7c2e54
|
[] |
no_license
|
anandksrao/LTRpred
|
4f0d9104bb7706ad4aaea22a202fdf829a20e17c
|
4e4f3a1cfaadfce7a4680f8629d567294c731e48
|
refs/heads/master
| 2020-04-23T00:56:47.445461
| 2019-02-11T17:11:17
| 2019-02-11T17:11:17
| 170,797,492
| 1
| 0
| null | 2019-02-15T03:40:18
| 2019-02-15T03:40:18
| null |
UTF-8
|
R
| false
| true
| 1,834
|
rd
|
ltr_age_estimation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ltr_age_estimation.R
\name{ltr_age_estimation}
\alias{ltr_age_estimation}
\title{Estimate retrotransposon insertion age in Mya based on 5 prime and 3 prime LTR sequence homology}
\usage{
ltr_age_estimation(ltr_seqs_3_prime, ltr_seqs_5_prime, model = "K80",
mutation_rate = 1.3 * 1e-07)
}
\arguments{
\item{ltr_seqs_3_prime}{file path to a fasta file storing the sequences of the respective 3 prime LTR (e.g. as annotatted by \code{\link{LTRpred}}).}
\item{ltr_seqs_5_prime}{file path to a fasta file storing the sequences of the respective 5 prime LTR (e.g. as annotatted by \code{\link{LTRpred}}).}
\item{model}{a model as specified in \code{\link[ape]{dist.dna}}: a character string specifying the evolutionary model to be used - must be one of
\code{raw}, \code{N}, \code{TS}, \code{TV}, \code{JC69}, \code{K80} (the default), \code{F81}, \code{K81}, \code{F84}, \code{BH87}, \code{T92},
\code{TN93}, \code{GG95}, \code{logdet}, \code{paralin}.}
\item{mutation_rate}{a mutation rate per site per year. For retrotransposons the default is \eqn{mutation_rate = 1.3 * 10E-8} (Wicker and Keller, 2007).}
}
\description{
This function implements diverse metrics to roughly estimate
the insertion age in Mya based on 5 prime and 3 prime LTR sequence homology.
}
\examples{
\dontrun{
# define file path to fasta file storing 3 prime LTR sequences
ltr_seqs_3_prime <- system.file("Hsapiens_ChrY-ltrdigest_3ltr.fas", package = "LTRpred")
ltr_seqs_5_prime <- system.file("Hsapiens_ChrY-ltrdigest_5ltr.fas", package = "LTRpred")
# estimate insertion age based on 3 prime and 5 prime LTR homology using the K80 model
Hsapiens_ltr_age <- ltr_age_estimation(ltr_seqs_3_prime, ltr_seqs_5_prime)
# look at results
Hsapiens_ltr_age
}
}
\author{
Hajk-Georg Drost
}
|
5044773fbf313a471b023afe9ea264f44e2ba06e
|
549ff637adc9779ac1b8a16a843042e48714aba6
|
/src/Fig2_smc_plot.R
|
5ce3d8a8f24db5669135f9eb5e01f4382e85fa27
|
[
"MIT"
] |
permissive
|
mishaploid/Bo-demography
|
4f4c6c3b1a3e3de01bc38811200ec7a3768e2b3a
|
ce040cb57548e6f2b561701ea57a7f7c75830c4c
|
refs/heads/main
| 2023-09-05T13:36:55.737420
| 2023-08-18T20:17:54
| 2023-08-18T20:17:54
| 175,095,224
| 13
| 2
|
MIT
| 2023-09-08T18:26:23
| 2019-03-11T22:44:34
|
Python
|
UTF-8
|
R
| false
| false
| 2,907
|
r
|
Fig2_smc_plot.R
|
library(tidyverse)
library(cowplot)
cols <- c(colorRampPalette(c("black", "gray"))(4),
colorRampPalette(c("#004949", "#66c2a4", "#ccece6"))(6),
"#490092", "#AA4499", "#DDCC77", "#006ddb", "#882255", "#924900")
names(cols) <- c("Brassica_cretica", "Brassica_incana", "Brassica_rupestris", "Brassica_villosa",
"tronchuda cabbage", "marrow cabbage", "lacinato kale", "perpetual kale", "collards", "curly kale",
"broccoli", "cauliflower", "cabbage", "kohlrabi", "Chinese kale", "Brussels sprouts")
new_names <- c("Brassica oleracea" = "oleracea",
"tronchuda cabbage" = "costata",
"marrow cabbage" = "medullosa",
"perpetual kale" = "ramosa",
"lacinato kale" = "palmifolia",
"curly kale" = "sabellica",
"collards" = "viridis",
"Chinese kale" = "alboglabra",
"cabbage" = "capitata",
"Brussels sprouts" = "gemmifera",
"kohlrabi" = "gongylodes",
"broccoli" = "italica",
"cauliflower" = "botrytis")
smc_results <- read_csv("reports/smc_cv_no_timepoints_results.csv") %>%
mutate(population = ifelse(grepl("Brassica_|oleracea", label),
"wild/weedy",
ifelse(grepl("medullosa|palmifolia|ramosa|sabellica|viridis|costata|alboglabra", label),
"kales",
"cultivated")),
label = fct_recode(label, !!!new_names),
label = fct_relevel(label,
"curly kale", after = Inf))
# split data for each "facet"
smc_results <- split(smc_results, f = smc_results$population)
# plot for the first "facet"
p1 <- smc_results$`wild/weedy` %>%
# filter(population %in% "wild/weedy") %>%
ggplot(., aes(x = x,
y = y,
color = label)) +
geom_line(size = 1.5,
alpha = 0.8) +
scale_color_manual(name = "",
values = cols) +
scale_x_log10(breaks = scales::trans_breaks("log10", function(x) 10^x, n = 4),
labels = scales::trans_format("log10", scales::math_format(10^.x)),
limits = c(400, 10^5.5)) +
scale_y_log10(breaks = scales::trans_breaks("log10", function(x) 10^x, n = 2),
labels = scales::trans_format("log10", scales::math_format(10^.x)),
limits = c(10^3.2, 10^5.5)) +
facet_wrap(population ~ ., ncol = 1) +
annotation_logticks() +
xlab("Generations ago") +
ylab("Effective population size") +
theme_classic() +
theme(legend.justification = "left")
p2 <- p1 %+% smc_results$kales
p3 <- p1 %+% smc_results$cultivated
plot_grid(p1, p2, p3,
ncol = 1,
align = "v",
labels = "AUTO")
ggsave("reports/figures/Figure2.png",
height = 8, width = 6)
|
2214173dc8199272ca94ec4995a83297b31a72d9
|
d5be72cb7f0964db9fd98aece7c9f9113d84a3c7
|
/Phase_2/Lazer Cutters/ctl_bot.rd
|
a26e24279efbf2af2ba751306a2c6866871f610c
|
[] |
no_license
|
apimentel1/CatchTheLight
|
85bdfd3da9e336067b0f4448f372ec5ecee13e39
|
68e341a88862424c3f9acde83f7aeb80324b8e13
|
refs/heads/master
| 2020-08-21T10:23:31.706960
| 2019-12-17T23:45:19
| 2019-12-17T23:45:19
| 216,139,567
| 1
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,954
|
rd
|
ctl_bot.rd
|
Òp» ¥½
¥½
Òpp±ppé¯ÙpÙpYé¯Ùp p
B©Ð9¯»Ð»¯»ÐI¯»Ð˯»ÄÄIpÛp[é¯Ùpipëé¯ÙÄ«pÝpÝ p]p] päpépppé¯Ùp+pp?é¯Ùp é¯ÙÄ ÄÄ ¹Ä Ä B©ÐÐÐÙ7wÐY7wÐ ¯»Ð¯»Ð)¯»Ð«¯»Ä7Ä¥Ù" {" o±" _S" Mó" ' " '"ó M"S _"± o" {"wå {"wÁ o"w- _"w M"÷ã '"÷Y "÷3ó"÷!S"÷±"÷"÷wå"÷wÁ"÷!w-"÷3w"÷Y÷ã"÷ã÷Y"w÷3"w-÷!"wÁ÷"wå÷"÷"±÷"S÷!"ó÷3" ÷Y" '÷ã" Mw" _w-" owÁ" {wå
ûÑ¥"÷"±÷"S÷!"ó÷3" ÷Y" '÷ã" Mw" _w-" owÁ" {wå" {" o±" _S" Mó" ' " '"ó M"S _"± o" {"wå {"wÁ o"w- _"w M"÷ã '"÷Y "÷3ó"÷!S"÷±"÷"÷wå"÷wÁ"÷!w-"÷3w"÷Y÷ã"÷ã÷Y"w÷3"w-÷!"wÁ÷"wå÷
÷½wÁ" {" o±" _S" Mó" ' " '"ó M"S _"± o" {"wå {"wÁ o"w- _"w M"÷ã '"÷Y "÷3ó"÷!S"÷±"÷"÷wå"÷wÁ"÷!w-"÷3w"÷Y÷ã"÷ã÷Y"w÷3"w-÷!"wÁ÷"wå÷"÷"±÷"S÷!"ó÷3" ÷Y" '÷ã" Mw" _w-" owÁ" {wå/"wå {"wÁ o"w- _"w M"÷ã '"÷Y "÷3ó"÷!S"÷±"÷"÷wå"÷wÁ"÷!w-"÷3w"÷Y÷ã"÷ã÷Y"w÷3"w-÷!"wÁ÷"wå÷"÷"±÷"S÷!"ó÷3" ÷Y" '÷ã" Mw" _w-" owÁ" {wå" {" o±" _S" Mó" ' " '"ó M"S _"± o" {Ññ¥¢¢é¢é¯Ù¢¯ÙdpÔ ©¥¥`
|
a94f9e0417c88361258304a5db125b9be4b14b3e
|
22a04444c55e3ed4bf60f5d22558ac28bff08828
|
/man/ActorMeasures.Rd
|
028f2b99ffad73a8c7bec08a0b3abf48bcb4557d
|
[] |
no_license
|
Sparkoor/multinet
|
af24aa52bad86d9d0b6facca153726fc1eac1b2d
|
9f6d3f76055caebb7f5436c9a3f168d18c9a46e1
|
refs/heads/master
| 2020-08-06T06:14:54.237417
| 2019-07-13T21:40:05
| 2019-07-13T21:40:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,500
|
rd
|
ActorMeasures.Rd
|
\name{Measures: basic}
\alias{Measures: basic}
\alias{degree_ml}
\alias{degree_deviation_ml}
\alias{neighborhood_ml}
\alias{xneighborhood_ml}
\alias{connective_redundancy_ml}
\alias{relevance_ml}
\alias{xrelevance_ml}
\title{
Network analysis measures
}
\description{
These functions compute network analysis measures providing a basic description of the actors in the network.
}
\usage{
degree_ml(mlnetwork,actors=character(0),layers=character(0),mode="all")
degree_deviation_ml(mlnetwork,actors=character(0),
layers=character(0),mode="all")
neighborhood_ml(mlnetwork,actors=character(0),layers=character(0),mode="all")
xneighborhood_ml(mlnetwork,actors=character(0),layers=character(0),mode="all")
connective_redundancy_ml(mlnetwork,actors=character(0),
layers=character(0),mode="all")
relevance_ml(mlnetwork,actors=character(0),layers=character(0),mode="all")
xrelevance_ml(mlnetwork,actors=character(0),layers=character(0),mode="all")
}
\arguments{
\item{mlnetwork}{A multilayer network.}
\item{actors}{An array of names of actors.}
\item{layers}{An array of names of layers.}
\item{mode}{This argument can take values "in", "out" or "all" to count respectively incoming edges, outgoing edges or both.}
}
\value{
\code{degree_ml} returns the number of edges adjacent to the input actor restricted to the specified layers.
\code{degree_deviation_ml} returns the standard deviation of the degree of an actor on the input layers. An actor with the same
degree on all layers will have deviation 0, while an actor with a lot of neighbors on one layer and only a few on another will have
a high degree deviation, showing an uneven usage of the layers (or layers with different densities).
\code{neighborhood_ml} returns the number of actors adjacent to the input actor restricted to the specified layers. \code{xneighborhood_ml} returns the number of actors adjacent to the input actor restricted to the specified layers and not present in the other layers.
\code{connective_redundancy_ml} returns 1 minus neighborhood divided by degree_
\code{relevance_ml} returns the percentage of neighbors present on the specified layers. \code{xrelevance_ml} returns the percentage of neighbors present on the specified layers and not on others.
}
\references{
Berlingerio, Michele, Michele Coscia, Fosca Giannotti, Anna Monreale, and Dino Pedreschi. 2011. "Foundations of Multidimensional Network Analysis." In International Conference on Social Network Analysis and Mining (ASONAM), 485-89. IEEE Computer Society.
Magnani, Matteo, and Luca Rossi. 2011. "The ML-Model for Multi-Layer Social Networks." In International conference on Social Network Analysis and Mining (ASONAM), 5-12. IEEE Computer Society.
}
\examples{
net <- ml_aucs()
# degrees of all actors, considering edges on all layers
degree_ml(net)
# degree of actors U54 and U3, only considering layers work and coauthor
degree_ml(net,c("U54","U3"),c("work","coauthor"),"in")
# an indication of whether U54 and U3 are selectively active only on some layers
degree_deviation_ml(net,c("U54","U3"))
# co-workers of U3
neighborhood_ml(net,"U3","work")
# co-workers of U3 who are not connected to U3 on other layers
xneighborhood_ml(net,"U3","work")
# percentage of neighbors of U3 who are also co-workers
relevance_ml(net,"U3","work")
# redundancy between work and lunch
connective_redundancy_ml(net,"U3",c("work","lunch"))
# percentage of neighbors of U3 who would no longer
# be neighbors by removing this layer
xrelevance_ml(net,"U3","work")
}
|
68319a93cff7310dcfb223a8e4537a56f1d2a075
|
939e31c911886d78e27e45af198a380b618054f3
|
/R/MakeInput_Fn.R
|
2cb253f0216753a63b454d8f311ee7b4d72b1b90
|
[] |
no_license
|
GodinA/spatial_factor_analysis
|
e5c06b05861e77987efa62ea63f2d0d1d96de87e
|
ab9436df59fdfc876dc60a8e6ded76ddd23e072d
|
refs/heads/master
| 2021-01-18T19:01:00.896176
| 2015-02-23T19:11:24
| 2015-02-23T19:11:24
| 36,754,960
| 1
| 0
| null | 2015-06-02T18:59:08
| 2015-06-02T18:59:07
| null |
UTF-8
|
R
| false
| false
| 17,603
|
r
|
MakeInput_Fn.R
|
MakeInput_Fn <-
function(Version, Y, X, Y_Report=NULL, LastRun=NULL, Loc, isPred, ObsModel, VaryingKappa, n_factors, n_stations, Use_REML, Aniso, mesh, spde){
# Pre-processing in R: Assume 2-Dimensional
Dset = 1:2
# Triangle info
TV = mesh$graph$tv # Triangle to vertex indexing
V0 = mesh$loc[TV[,1],Dset] # V = vertices for each triangle
V1 = mesh$loc[TV[,2],Dset]
V2 = mesh$loc[TV[,3],Dset]
E0 = V2 - V1 # E = edge for each triangle
E1 = V0 - V2
E2 = V1 - V0
# Calculate Areas
TmpFn = function(Vec1,Vec2) abs(det( rbind(Vec1,Vec2) ))
Tri_Area = rep(NA, nrow(E0))
for(i in 1:length(Tri_Area)) Tri_Area[i] = TmpFn( E0[i,],E1[i,] )/2 # T = area of each triangle
# Other pre-processing
NAind = ifelse( is.na(Y), 1, 0)
if(is.null(Y_Report)) Y_Report = array(0, dim=dim(Y))
n_species = ncol(Y)
n_fit = nrow(Y)
# Data
ErrorDist = as.integer(switch(ObsModel, "Poisson"=0, "NegBin0"=1, "NegBin1"=1, "NegBin2"=1, "NegBin12"=1, "Lognorm_Pois"=0))
if(Version=="spatial_factor_analysis_v18") Data = list(Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v19","spatial_factor_analysis_v20") ) Data = list(Pen_Vec=c(0,0), Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, G0=spde$param.inla$M0, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v21","spatial_factor_analysis_v22") ) Data = list(Aniso=as.integer(Aniso), Pen_Vec=c(0,0), Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, n_i=mesh$n, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, n_tri=nrow(mesh$graph$tv), Tri_Area=Tri_Area, E0=E0, E1=E1, E2=E2, TV=TV-1, G0=spde$param.inla$M0, G0_inv=spde$param.inla$M0_inv, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v23","spatial_factor_analysis_v24") ) Data = list(Aniso=as.integer(Aniso), Options_Vec=NA, Pen_Vec=c(0,0), Y=Y, Y_Report=Y_Report, NAind=NAind, isPred=isPred, ErrorDist=ErrorDist, VaryingKappa=as.integer(VaryingKappa), n_species=n_species, n_stations=n_fit, n_factors=n_factors, n_i=mesh$n, meshidxloc=mesh$idx$loc-1, n_p=ncol(X), X=X, n_tri=nrow(mesh$graph$tv), Tri_Area=Tri_Area, E0=E0, E1=E1, E2=E2, TV=TV-1, G0=spde$param.inla$M0, G0_inv=spde$param.inla$M0_inv, G1=spde$param.inla$M1, G2=spde$param.inla$M2)
if(Version %in% c("spatial_factor_analysis_v23","spatial_factor_analysis_v24") ){
if(ObsModel!="Lognorm_Pois") Data[["Options_Vec"]] = as.integer(0)
if(ObsModel=="Lognorm_Pois" & Use_REML==TRUE) Data[["Options_Vec"]] = as.integer(2)
if(ObsModel=="Lognorm_Pois" & Use_REML==FALSE) Data[["Options_Vec"]] = as.integer(1)
}
# Parameters
if(is.null(LastRun) || StartValue=="Default" ){
if(Version %in% c("spatial_factor_analysis_v18","spatial_factor_analysis_v19") ) Params = list( beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl=matrix(-2,nrow=3,ncol=n_species), Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v20") Params = list( beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v21") Params = list( ln_H_input=c(0,-10,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v22") Params = list( ln_H_input=c(0,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v23") Params = list( ln_H_input=c(0,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors), Lognorm_input=array(0,dim=dim(Data[["Y"]])) )
if(Version %in% "spatial_factor_analysis_v24") Params = list( ln_H_input=c(0,0), beta=matrix(0,nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=rep(1,n_factors*n_species-n_factors*(n_factors-1)/2), log_kappa_input=rep(0,ifelse(VaryingKappa==0,1,n_factors)), ln_VarInfl_NB0=NA, ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(rep(0,spde$n.spde*n_factors),ncol=n_factors), Lognorm_input=array(0,dim=dim(Data[["Y"]])) )
if(ObsModel=="Poisson"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin0"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-2,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin1"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-2,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin2"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-2,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin12"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-2,n_species)
Params[['ln_VarInfl_NB2']] = rep(-2,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="Lognorm_Pois"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-5,n_species)
}
}
if(!is.null(LastRun) && StartValue=="Last_Run"){
Par_last = LastRun$opt$par
Par_best = LastRun$ParBest
# names(Save)
if(Version %in% c("spatial_factor_analysis_v18","spatial_factor_analysis_v19") ) Params = list( beta=matrix(Par_last[which(names(Par_last)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl=Par_last[which(names(Par_last)=="ln_VarInfl")], Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v20") Params = list( beta=matrix(Par_last[which(names(Par_last)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v21") Params = list( ln_H_input=Par_last[which(names(Par_last)=="ln_H_input")], beta=matrix(Par_last[which(names(Par_last)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v22") Params = list( ln_H_input=Par_best[which(names(Par_best)=="ln_H_input")], beta=matrix(Par_best[which(names(Par_best)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors))
if(Version %in% "spatial_factor_analysis_v23") Params = list( ln_H_input=Par_best[which(names(Par_best)=="ln_H_input")], beta=matrix(Par_best[which(names(Par_best)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors), Lognorm_input=matrix(Par_best[which(names(Par_best)=="Lognorm_input")],ncol=n_species))
if(Version %in% c("spatial_factor_analysis_v24")) Params = list( ln_H_input=Par_best[which(names(Par_best)=="ln_H_input")], beta=matrix(Par_best[which(names(Par_best)=="beta")],nrow=ncol(X),ncol=n_species,byrow=TRUE), Psi_val=c(Par_last[which(names(Par_last)=="Psi_val")],rep(1,n_species-n_factors+1)), log_kappa_input=c(Par_last[which(names(Par_last)=="log_kappa_input")],list(NULL,1)[[ifelse(VaryingKappa==0,1,2)]]), ln_VarInfl_NB0=NA, ln_VarInfl_NB1=NA, ln_VarInfl_NB2=NA, ln_VarInfl_ZI=NA, ln_VarInfl_Lognorm=NA, Omega_input=matrix(c(Par_best[which(names(Par_best)=="Omega_input")],rep(0,spde$n.spde)),ncol=n_factors), Lognorm_input=matrix(Par_best[which(names(Par_best)=="Lognorm_input")],ncol=n_species))
if(ObsModel=="Poisson"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin0"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB0")]
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin1"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB1")]
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin2"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB2")]
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="NegBin12"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB1")]
Params[['ln_VarInfl_NB2']] = Par_best[which(names(Par_best)=="ln_VarInfl_NB2")]
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = rep(-20,n_species)
}
if(ObsModel=="Lognorm_Pois"){
if('ln_VarInfl_NB0'%in%names(Params)) Params[['ln_VarInfl_NB0']] = rep(-20,n_species)
Params[['ln_VarInfl_NB1']] = rep(-20,n_species)
Params[['ln_VarInfl_NB2']] = rep(-20,n_species)
Params[['ln_VarInfl_ZI']] = rep(-20,n_species)
if('ln_VarInfl_Lognorm'%in%names(Params)) Params[['ln_VarInfl_Lognorm']] = Par_best[which(names(Par_best)=="ln_VarInfl_Lognorm")]
}
if(Version %in% c("spatial_factor_analysis_v20","spatial_factor_analysis_v21") ){
if(Aniso==0) Params[['ln_H_input']] = c(0,-10,0)
}
if(Version %in% c("spatial_factor_analysis_v22","spatial_factor_analysis_v23","spatial_factor_analysis_v24") ){
if(Aniso==0) Params[['ln_H_input']] = c(0,0)
}
}
# Fix parameters
Map = list()
if(ObsModel=="Poisson"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin0"){
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin1"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin2"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="NegBin12"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_Lognorm']] = factor( rep(NA,n_species) )
Map[['Lognorm_input']] = factor( array(NA,dim=dim(Params[['Lognorm_input']])) )
}
if(ObsModel=="Lognorm_Pois"){
Map[['ln_VarInfl_NB0']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB1']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_NB2']] = factor( rep(NA,n_species) )
Map[['ln_VarInfl_ZI']] = factor( rep(NA,n_species) )
}
if(Version %in% c("spatial_factor_analysis_v18","spatial_factor_analysis_v19","spatial_factor_analysis_v20","spatial_factor_analysis_v21") ){
if(Aniso==0) Map[['ln_H_input']] = factor( rep(NA,3) )
if(Aniso==1 & VaryingKappa==0) Map[['log_kappa_input']] = factor( NA )
}
if(Version %in% c("spatial_factor_analysis_v22","spatial_factor_analysis_v23","spatial_factor_analysis_v24") ){
if(Aniso==0) Map[['ln_H_input']] = factor( rep(NA,2) )
}
# Declare random
Random = c("Omega_input")
if(Version %in% c("spatial_factor_analysis_v23","spatial_factor_analysis_v24") ) Random = c(Random, "Lognorm_input")
if(Use_REML==TRUE) Random = c(Random, "beta", "ln_VarInfl_NB0", "ln_VarInfl_NB1", "ln_VarInfl_NB2", "ln_VarInfl_ZI", "ln_VarInfl_Lognorm")
# Return
Return = list("Map"=Map, "Random"=Random, "Params"=Params, "Data"=Data, "spde"=spde)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.